/external/mesa3d/src/gallium/drivers/nouveau/ |
D | nouveau_mm.c | 53 mm_slab_alloc(struct mm_slab *slab) in mm_slab_alloc() argument 57 if (slab->free == 0) in mm_slab_alloc() 60 for (i = 0; i < (slab->count + 31) / 32; ++i) { in mm_slab_alloc() 61 b = ffs(slab->bits[i]) - 1; in mm_slab_alloc() 64 assert(n < slab->count); in mm_slab_alloc() 65 slab->free--; in mm_slab_alloc() 66 slab->bits[i] &= ~(1 << b); in mm_slab_alloc() 74 mm_slab_free(struct mm_slab *slab, int i) in mm_slab_free() argument 76 assert(i < slab->count); in mm_slab_free() 77 slab->bits[i / 32] |= 1 << (i % 32); in mm_slab_free() [all …]
|
/external/mesa3d/src/gallium/auxiliary/pipebuffer/ |
D | pb_bufmgr_slab.c | 62 struct pb_slab *slab; member 197 struct pb_slab *slab = buf->slab; in pb_slab_buffer_destroy() local 198 struct pb_slab_manager *mgr = slab->mgr; in pb_slab_buffer_destroy() 208 LIST_ADDTAIL(list, &slab->freeBuffers); in pb_slab_buffer_destroy() 209 slab->numFree++; in pb_slab_buffer_destroy() 211 if (slab->head.next == &slab->head) in pb_slab_buffer_destroy() 212 LIST_ADDTAIL(&slab->head, &mgr->slabs); in pb_slab_buffer_destroy() 215 if (slab->numFree == slab->numBuffers) { in pb_slab_buffer_destroy() 216 list = &slab->head; in pb_slab_buffer_destroy() 218 pb_reference(&slab->bo, NULL); in pb_slab_buffer_destroy() [all …]
|
D | pb_slab.c | 56 struct pb_slab *slab = entry->slab; in pb_slab_reclaim() local 59 LIST_ADD(&entry->head, &slab->free); in pb_slab_reclaim() 60 slab->num_free++; in pb_slab_reclaim() 63 if (!slab->head.next) { in pb_slab_reclaim() 65 LIST_ADDTAIL(&slab->head, &group->slabs); in pb_slab_reclaim() 68 if (slab->num_free >= slab->num_entries) { in pb_slab_reclaim() 69 LIST_DEL(&slab->head); in pb_slab_reclaim() 70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim() 103 struct pb_slab *slab; in pb_slab_alloc() local 123 slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head); in pb_slab_alloc() [all …]
|
/external/jemalloc_new/include/jemalloc/internal/ |
D | arena_inlines_b.h | 23 if (unlikely(!alloc_ctx->slab)) { in arena_prof_tctx_get() 43 if (unlikely(!alloc_ctx->slab)) { in arena_prof_tctx_set() 168 bool slab; in arena_dalloc_no_tcache() local 170 true, &szind, &slab); in arena_dalloc_no_tcache() 177 assert(slab == extent_slab_get(extent)); in arena_dalloc_no_tcache() 180 if (likely(slab)) { in arena_dalloc_no_tcache() 201 bool slab; in arena_dalloc() local 205 slab = alloc_ctx->slab; in arena_dalloc() 210 (uintptr_t)ptr, true, &szind, &slab); in arena_dalloc() 219 assert(slab == extent_slab_get(extent)); in arena_dalloc() [all …]
|
D | rtree.h | 281 rtree_leaf_elm_t *elm, bool slab) { in rtree_leaf_elm_slab_write() argument 287 (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); in rtree_leaf_elm_slab_write() 290 atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); in rtree_leaf_elm_slab_write() 296 extent_t *extent, szind_t szind, bool slab) { in rtree_leaf_elm_write() argument 300 ((uintptr_t)slab); in rtree_leaf_elm_write() 303 rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); in rtree_leaf_elm_write() 315 rtree_leaf_elm_t *elm, szind_t szind, bool slab) { in rtree_leaf_elm_szind_slab_update() argument 316 assert(!slab || szind < NBINS); in rtree_leaf_elm_szind_slab_update() 322 rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); in rtree_leaf_elm_szind_slab_update() 397 extent_t *extent, szind_t szind, bool slab) { in rtree_write() argument [all …]
|
/external/jemalloc_new/src/ |
D | arena.c | 56 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 58 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 224 arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { in arena_slab_reg_alloc() argument 226 arena_slab_data_t *slab_data = extent_slab_data_get(slab); in arena_slab_reg_alloc() 229 assert(extent_nfree_get(slab) > 0); in arena_slab_reg_alloc() 233 ret = (void *)((uintptr_t)extent_addr_get(slab) + in arena_slab_reg_alloc() 235 extent_nfree_dec(slab); in arena_slab_reg_alloc() 243 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { in arena_slab_regind() argument 247 assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); in arena_slab_regind() 248 assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); in arena_slab_regind() [all …]
|
D | extent.c | 111 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, 520 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { in extents_alloc() argument 527 new_addr, size, pad, alignment, slab, szind, zero, commit, false); in extents_alloc() 695 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { in extent_rtree_write_acquired() argument 696 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); in extent_rtree_write_acquired() 699 slab); in extent_rtree_write_acquired() 771 bool slab = extent_slab_get(extent); in extent_register_impl() local 772 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); in extent_register_impl() 773 if (slab) { in extent_register_impl() 866 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, in extent_recycle_extract() argument [all …]
|
D | android_je_iterate.c | 44 bool slab; in je_iterate() local 45 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, ptr, true, &szind, &slab); in je_iterate() 46 if (slab) { in je_iterate()
|
/external/mesa3d/src/gallium/winsys/radeon/drm/ |
D | radeon_drm_bo.c | 81 for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) { in radeon_bo_is_busy() 82 if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) { in radeon_bo_is_busy() 86 radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL); in radeon_bo_is_busy() 88 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle], in radeon_bo_is_busy() 89 (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0])); in radeon_bo_is_busy() 90 bo->u.slab.num_fences -= num_idle; in radeon_bo_is_busy() 111 while (bo->u.slab.num_fences) { in radeon_bo_wait_idle() 113 radeon_bo_reference(&fence, bo->u.slab.fences[0]); in radeon_bo_wait_idle() 120 if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) { in radeon_bo_wait_idle() 121 radeon_bo_reference(&bo->u.slab.fences[0], NULL); in radeon_bo_wait_idle() [all …]
|
D | radeon_drm_cs.c | 298 real_idx = radeon_lookup_or_add_real_buffer(cs, bo->u.slab.real); in radeon_lookup_or_add_slab_buffer() 322 item->u.slab.real_idx = real_idx; in radeon_lookup_or_add_slab_buffer() 359 index = cs->csc->slab_buffers[index].u.slab.real_idx; in radeon_drm_cs_add_buffer() 510 for (unsigned src = 0; src < bo->u.slab.num_fences; ++src) { in radeon_bo_slab_fence() 511 if (bo->u.slab.fences[src]->num_cs_references) { in radeon_bo_slab_fence() 512 bo->u.slab.fences[dst] = bo->u.slab.fences[src]; in radeon_bo_slab_fence() 515 radeon_bo_reference(&bo->u.slab.fences[src], NULL); in radeon_bo_slab_fence() 518 bo->u.slab.num_fences = dst; in radeon_bo_slab_fence() 521 if (bo->u.slab.num_fences >= bo->u.slab.max_fences) { in radeon_bo_slab_fence() 522 unsigned new_max_fences = bo->u.slab.max_fences + 1; in radeon_bo_slab_fence() [all …]
|
/external/jemalloc_new/test/unit/ |
D | slab.c | 8 extent_t slab; in TEST_BEGIN() local 10 extent_init(&slab, NULL, mallocx(bin_info->slab_size, in TEST_BEGIN() 13 assert_ptr_not_null(extent_addr_get(&slab), in TEST_BEGIN() 16 void *reg = (void *)((uintptr_t)extent_addr_get(&slab) + in TEST_BEGIN() 18 assert_zu_eq(arena_slab_regind(&slab, binind, reg), in TEST_BEGIN() 23 free(extent_addr_get(&slab)); in TEST_BEGIN()
|
/external/mesa3d/src/amd/vulkan/ |
D | radv_shader.c | 314 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) { in radv_alloc_shader_memory() 316 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) { in radv_alloc_shader_memory() 318 shader->bo = slab->bo; in radv_alloc_shader_memory() 322 return slab->ptr + offset; in radv_alloc_shader_memory() 326 if (slab->size - offset >= shader->code_size) { in radv_alloc_shader_memory() 327 shader->bo = slab->bo; in radv_alloc_shader_memory() 329 list_addtail(&shader->slab_list, &slab->shaders); in radv_alloc_shader_memory() 331 return slab->ptr + offset; in radv_alloc_shader_memory() 336 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab)); in radv_alloc_shader_memory() local 338 slab->size = 256 * 1024; in radv_alloc_shader_memory() [all …]
|
/external/mesa3d/src/gallium/winsys/amdgpu/drm/ |
D | amdgpu_bo.c | 308 real = bo->u.slab.real; in amdgpu_bo_map() 341 real = bo->bo ? bo : bo->u.slab.real; in amdgpu_bo_unmap() 501 bo = container_of(entry, bo, u.slab.entry); in amdgpu_bo_can_reclaim_slab() 512 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry); in amdgpu_bo_slab_destroy() 525 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab); in amdgpu_bo_slab_alloc() local 530 if (!slab) in amdgpu_bo_slab_alloc() 534 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base, in amdgpu_bo_slab_alloc() 537 if (!slab->buffer) in amdgpu_bo_slab_alloc() 540 assert(slab->buffer->bo); in amdgpu_bo_slab_alloc() 542 slab->base.num_entries = slab->buffer->base.size / entry_size; in amdgpu_bo_slab_alloc() [all …]
|
D | amdgpu_bo.h | 73 } slab; member 132 void amdgpu_bo_slab_free(void *priv, struct pb_slab *slab); 141 struct amdgpu_slab *amdgpu_slab(struct pb_slab *slab) in amdgpu_slab() argument 143 return (struct amdgpu_slab *)slab; in amdgpu_slab()
|
/external/syzkaller/pkg/report/testdata/linux/guilty/ |
D | 19 | 10 hardirqs last enabled at (331): [<00000000eed64b41>] slab_alloc mm/slab.c:3378 [inline] 11 hardirqs last enabled at (331): [<00000000eed64b41>] __do_kmalloc mm/slab.c:3709 [inline] 12 hardirqs last enabled at (331): [<00000000eed64b41>] __kmalloc+0x23a/0x760 mm/slab.c:3720 13 hardirqs last disabled at (332): [<00000000f3407c2d>] kfree+0x6a/0x250 mm/slab.c:3800 27 RIP: 0010:virt_to_cache mm/slab.c:400 [inline] 28 RIP: 0010:kfree+0xb2/0x250 mm/slab.c:3802
|
D | 18 | 4 IP: virt_to_cache mm/slab.c:400 [inline] 5 IP: kfree+0xb2/0x250 mm/slab.c:3802 15 RIP: 0010:virt_to_cache mm/slab.c:400 [inline] 16 RIP: 0010:kfree+0xb2/0x250 mm/slab.c:3802
|
D | 29 | 44 __do_kmalloc_node mm/slab.c:3672 [inline] 45 __kmalloc_node+0x47/0x70 mm/slab.c:3679 46 kmalloc_node include/linux/slab.h:541 [inline] 66 __cache_free mm/slab.c:3488 [inline] 67 kfree+0xd6/0x260 mm/slab.c:3803 85 flags: 0x2fffc0000008100(slab|head)
|
/external/skqp/src/compute/hs/vk/bench/ |
D | main.c | 79 uint32_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u32() local 84 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u32() 88 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u32() 104 uint64_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u64() local 109 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u64() 113 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u64() 533 uint32_t const slab_size = hs_target->config.slab.height << hs_target->config.slab.width_log2; in main() 1078 1u<<hs_target->config.slab.width_log2, in main() 1079 hs_target->config.slab.height, in main() 1092 hs_debug_u32(1u<<hs_target->config.slab.width_log2, in main() [all …]
|
/external/skia/src/compute/hs/vk/bench/ |
D | main.c | 79 uint32_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u32() local 84 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u32() 88 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u32() 104 uint64_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u64() local 109 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u64() 113 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u64() 533 uint32_t const slab_size = hs_target->config.slab.height << hs_target->config.slab.width_log2; in main() 1078 1u<<hs_target->config.slab.width_log2, in main() 1079 hs_target->config.slab.height, in main() 1092 hs_debug_u32(1u<<hs_target->config.slab.width_log2, in main() [all …]
|
/external/syzkaller/pkg/report/testdata/linux/report/ |
D | 15 | 1 TITLE: KASAN: slab-out-of-bounds Read in corrupted 5 [ 1722.511384] BUG: KASAN: slab-out-of-bounds in memcpy+0x1d/0x40 at addr ffff88003a6bd110 7 [ 1722.511384] BUG: KASAN: slab-out-of-bounds in memcpy+0x1d/0x40 at addr ffff88003a6bd110
|
/external/syzkaller/pkg/report/testdata/akaros/report/ |
D | 0 | 1 TITLE: kernel panic: [German Accent]: OOM for a small slab growth!!! 4 / $ kernel panic at kern/src/slab.c:518, from core 1: [German Accent]: OOM for a small slab growth!…
|
/external/skia/src/compute/hs/cl/bench/ |
D | main.c | 119 uint32_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u32() local 124 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u32() 128 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u32() 144 uint64_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u64() local 149 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u64() 153 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u64() 693 uint32_t const kpb = hs_target->config.slab.height << hs_target->config.slab.width_log2; in main() 750 1 << hs_target->config.slab.width_log2, in main() 751 hs_target->config.slab.height, in main()
|
/external/skqp/src/compute/hs/cl/bench/ |
D | main.c | 119 uint32_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u32() local 124 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u32() 128 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u32() 144 uint64_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u64() local 149 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u64() 153 vout_h[col * hs_height + row] = slab[row * hs_width + col]; in hs_transpose_slabs_u64() 693 uint32_t const kpb = hs_target->config.slab.height << hs_target->config.slab.width_log2; in main() 750 1 << hs_target->config.slab.width_log2, in main() 751 hs_target->config.slab.height, in main()
|
/external/google-fonts/zilla-slab/ |
D | METADATA | 4 name: "zilla-slab" 7 …"A contemporary slab serif, based on Typotheque's Tesla, it is constructed with smooth curves and … 12 value: "https://github.com/mozilla/zilla-slab"
|
/external/swiftshader/third_party/subzero/docs/ |
D | ALLOCATION.rst | 66 this cheap, the Cfg includes a slab allocator from which these objects are 70 providing the container with an allocator that uses the Cfg-local slab 72 store a pointer to the slab allocator in thread-local storage (TLS). This is 91 This requires maintaining the proper slab allocator pointer in TLS. 94 slab allocator into its own TLS. This is used as the Cfg is built within the 99 When the translation thread grabs a new Cfg pointer, it installs the Cfg's slab 101 assembly buffer, it must take care not to use the Cfg's slab allocator. If 102 there is a slab allocator for the assembler buffer, a pointer to it can also be 106 the Cfg's slab allocator, and clears the allocator pointer from its TLS.
|