/external/jemalloc_new/src/ |
D | bin.c | 8 #define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ argument 9 {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, 10 #define BIN_INFO_bin_no(reg_size, slab_size, nregs) argument
|
D | stats.c | 294 size_t reg_size, slab_size, curregs; in stats_arena_bins_print() local 312 CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); in stats_arena_bins_print() 389 col_pgs.size_val = slab_size / page; in stats_arena_bins_print()
|
D | arena.c | 1104 bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); in arena_slab_alloc_hard() 1108 bin_info->slab_size); in arena_slab_alloc_hard() 1125 &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, in arena_slab_alloc() 1129 &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, in arena_slab_alloc()
|
D | ctl.c | 2432 CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) in CTL_RO_NL_GEN()
|
/external/jemalloc_new/test/unit/ |
D | slab.c | 10 extent_init(&slab, NULL, mallocx(bin_info->slab_size, in TEST_BEGIN() 11 MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, in TEST_BEGIN()
|
D | mallctl.c | 702 TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, in TEST_BEGIN() 703 bin_infos[0].slab_size); in TEST_BEGIN()
|
/external/skqp/src/compute/hs/cuda/bench/ |
D | main.c | 146 size_t const slab_size = sizeof(uint32_t) * hs_words * slab_keys; in hs_transpose_slabs_u32() local 147 uint32_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u32() 152 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u32() 171 size_t const slab_size = sizeof(uint32_t) * hs_words * slab_keys; in hs_transpose_slabs_u64() local 172 uint64_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u64() 177 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u64()
|
/external/skqp/src/compute/hs/cl/bench/ |
D | main.c | 118 size_t const slab_size = sizeof(uint32_t) * hs_words * slab_keys; in hs_transpose_slabs_u32() local 119 uint32_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u32() 124 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u32() 143 size_t const slab_size = sizeof(uint32_t) * hs_words * slab_keys; in hs_transpose_slabs_u64() local 144 uint64_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u64() 149 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u64()
|
/external/skqp/src/compute/hs/vk/bench/ |
D | main.c | 78 size_t const slab_size = sizeof(uint32_t) * hs_words * slab_keys; in hs_transpose_slabs_u32() local 79 uint32_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u32() 84 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u32() 103 size_t const slab_size = sizeof(uint32_t) * hs_words * slab_keys; in hs_transpose_slabs_u64() local 104 uint64_t * const slab = ALLOCA_MACRO(slab_size); in hs_transpose_slabs_u64() 109 memcpy(slab,vout_h,slab_size); in hs_transpose_slabs_u64() 533 uint32_t const slab_size = hs_target->config.slab.height << hs_target->config.slab.width_log2; in main() local 535 uint32_t const count_lo = (argc <= 4) ? slab_size : strtoul(argv[ 4],NULL,0); in main()
|
/external/jemalloc_new/include/jemalloc/internal/ |
D | bin.h | 39 size_t slab_size; member
|
D | size_classes.sh | 64 slab_size() { function 131 slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs}
|
/external/mesa3d/src/gallium/winsys/amdgpu/drm/ |
D | amdgpu_bo.c | 674 unsigned slab_size = 0; in amdgpu_bo_slab_alloc() local 691 slab_size = max_entry_size * 2; in amdgpu_bo_slab_alloc() 697 slab_size < ws->info.pte_fragment_size) in amdgpu_bo_slab_alloc() 698 slab_size = ws->info.pte_fragment_size; in amdgpu_bo_slab_alloc() 702 assert(slab_size != 0); in amdgpu_bo_slab_alloc() 705 slab_size, slab_size, in amdgpu_bo_slab_alloc()
|
/external/jemalloc_new/ |
D | ChangeLog | 214 + arenas.bin.<i>.slab_size
|