/external/mesa3d/src/gallium/auxiliary/pipebuffer/ |
D | pb_slab.c | 49 struct list_head slabs; member 54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) in pb_slab_reclaim() argument 64 struct pb_slab_group *group = &slabs->groups[entry->group_index]; in pb_slab_reclaim() 65 LIST_ADDTAIL(&slab->head, &group->slabs); in pb_slab_reclaim() 70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim() 75 pb_slabs_reclaim_locked(struct pb_slabs *slabs) in pb_slabs_reclaim_locked() argument 77 while (!LIST_IS_EMPTY(&slabs->reclaim)) { in pb_slabs_reclaim_locked() 79 LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head); in pb_slabs_reclaim_locked() 81 if (!slabs->can_reclaim(slabs->priv, entry)) in pb_slabs_reclaim_locked() 84 pb_slab_reclaim(slabs, entry); in pb_slabs_reclaim_locked() [all …]
|
D | pb_slab.h | 135 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap); 138 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry); 141 pb_slabs_reclaim(struct pb_slabs *slabs); 144 pb_slabs_init(struct pb_slabs *slabs, 153 pb_slabs_deinit(struct pb_slabs *slabs);
|
D | pb_bufmgr_slab.c | 128 struct list_head slabs; member 212 LIST_ADDTAIL(&slab->head, &mgr->slabs); in pb_slab_buffer_destroy() 359 LIST_ADDTAIL(&slab->head, &mgr->slabs); in pb_slab_create() 401 if (mgr->slabs.next == &mgr->slabs) { in pb_slab_manager_create_buffer() 403 if (mgr->slabs.next == &mgr->slabs) { in pb_slab_manager_create_buffer() 410 list = mgr->slabs.next; in pb_slab_manager_create_buffer() 473 LIST_INITHEAD(&mgr->slabs); in pb_slab_manager_create()
|
/external/skqp/src/compute/hs/vk/ |
D | hs_vk.c | 343 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm() 432 uint32_t const full_bs = slabs_in / hs->config.block.slabs; in hs_bs() 433 uint32_t const frac_bs = slabs_in - full_bs * hs->config.block.slabs; in hs_bs() 654 if (state.bx_ru > hs->config.block.slabs) in hs_vk_sort() 684 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_vk_sort() 829 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_vk_create() 830 uint32_t const bc_slabs_log2_max = msb_idx_u32(pow2_rd_u32(target->config.block.slabs)); in hs_vk_create() 845 uint32_t fm_left = (target->config.block.slabs / 2) << scale; in hs_vk_create() 1146 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_vk_pad() 1147 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_vk_pad() [all …]
|
D | hs_vk_target.h | 38 uint8_t slabs; member
|
/external/skia/src/compute/hs/vk/ |
D | hs_vk.c | 343 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm() 432 uint32_t const full_bs = slabs_in / hs->config.block.slabs; in hs_bs() 433 uint32_t const frac_bs = slabs_in - full_bs * hs->config.block.slabs; in hs_bs() 654 if (state.bx_ru > hs->config.block.slabs) in hs_vk_sort() 684 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_vk_sort() 829 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_vk_create() 830 uint32_t const bc_slabs_log2_max = msb_idx_u32(pow2_rd_u32(target->config.block.slabs)); in hs_vk_create() 845 uint32_t fm_left = (target->config.block.slabs / 2) << scale; in hs_vk_create() 1146 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_vk_pad() 1147 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_vk_pad() [all …]
|
D | hs_vk_target.h | 38 uint8_t slabs; member
|
/external/skia/src/compute/hs/cl/ |
D | hs_cl.c | 487 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm() 615 uint32_t const full = (slabs_in / hs->config.block.slabs) * hs->config.block.slabs; in hs_bs() 829 if (state.bx_ru > hs->config.block.slabs) in hs_cl_sort() 854 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_cl_sort() 889 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_cl_pad() 890 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_cl_pad() 892 uint32_t const slabs_ru_rem_ru = MIN_MACRO(pow2_ru_u32(slabs_ru_rem),hs->config.block.slabs); in hs_cl_pad() 900 if (slabs_ru > hs->config.block.slabs) in hs_cl_pad() 904 uint32_t const block_slabs_lo = blocks_lo * hs->config.block.slabs; in hs_cl_pad() 1040 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_cl_create() [all …]
|
D | hs_cl_target.h | 36 uint8_t slabs; member
|
/external/skqp/src/compute/hs/cl/ |
D | hs_cl.c | 487 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm() 615 uint32_t const full = (slabs_in / hs->config.block.slabs) * hs->config.block.slabs; in hs_bs() 829 if (state.bx_ru > hs->config.block.slabs) in hs_cl_sort() 854 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_cl_sort() 889 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_cl_pad() 890 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_cl_pad() 892 uint32_t const slabs_ru_rem_ru = MIN_MACRO(pow2_ru_u32(slabs_ru_rem),hs->config.block.slabs); in hs_cl_pad() 900 if (slabs_ru > hs->config.block.slabs) in hs_cl_pad() 904 uint32_t const block_slabs_lo = blocks_lo * hs->config.block.slabs; in hs_cl_pad() 1040 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_cl_create() [all …]
|
D | hs_cl_target.h | 36 uint8_t slabs; member
|
/external/mesa3d/src/gallium/state_trackers/nine/ |
D | nine_helpers.c | 37 pool->slabs = REALLOC(pool->slabs, in nine_range_pool_more() 42 pool->free = pool->slabs[pool->num_slabs++] = r; in nine_range_pool_more()
|
/external/skia/src/compute/hs/cuda/ |
D | hs_cuda.inl | 33 // 2) Blocks of slabs of keys are sorted. 35 // 3) If necesary, the blocks of slabs are merged until complete. 37 // 4) If requested, the slabs will be converted from slab ordering 690 // occupancy). This is useful when merging small numbers of slabs. 700 // number of slabs in a full-sized scaled flip-merge span 717 // the remaining slabs will be cleaned 915 // immediately sort blocks of slabs 921 // otherwise, merge sorted spans of slabs until done 933 // flip merge slabs -- return span of slabs that must be cleaned 967 // otherwise, merge twice as many slabs [all …]
|
/external/skqp/src/compute/hs/cuda/ |
D | hs_cuda.inl | 33 // 2) Blocks of slabs of keys are sorted. 35 // 3) If necesary, the blocks of slabs are merged until complete. 37 // 4) If requested, the slabs will be converted from slab ordering 690 // occupancy). This is useful when merging small numbers of slabs. 700 // number of slabs in a full-sized scaled flip-merge span 717 // the remaining slabs will be cleaned 915 // immediately sort blocks of slabs 921 // otherwise, merge sorted spans of slabs until done 933 // flip merge slabs -- return span of slabs that must be cleaned 967 // otherwise, merge twice as many slabs [all …]
|
/external/skqp/src/compute/hs/ |
D | README.md | 228 1. For each workgroup of slabs: 232 1. Until all slabs in the workgroup are merged: 235 1. Until all slabs are merged: 250 registers, sorts the slabs, merges all slabs in the workgroup, and 251 stores the slabs back to global memory.
|
/external/skia/src/compute/hs/ |
D | README.md | 228 1. For each workgroup of slabs: 232 1. Until all slabs in the workgroup are merged: 235 1. Until all slabs are merged: 250 registers, sorts the slabs, merges all slabs in the workgroup, and 251 stores the slabs back to global memory.
|
/external/skqp/src/compute/hs/cuda/bench/ |
D | main.c | 213 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local 215 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32() 233 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local 235 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
|
/external/skia/src/compute/hs/cuda/bench/ |
D | main.c | 213 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local 215 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32() 233 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local 235 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
|
/external/skia/src/compute/hs/cl/bench/ |
D | main.c | 185 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local 187 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32() 205 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local 207 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
|
/external/skqp/src/compute/hs/cl/bench/ |
D | main.c | 185 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local 187 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32() 205 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local 207 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
|
/external/skia/src/compute/hs/cl/intel/gen8/u64/ |
D | hs_intel_gen8_u64.c | 49 .slabs = HS_BS_SLABS
|
/external/skia/src/compute/hs/cl/intel/gen8/u32/ |
D | hs_intel_gen8_u32.c | 49 .slabs = HS_BS_SLABS
|
/external/skqp/src/compute/hs/cl/intel/gen8/u32/ |
D | hs_intel_gen8_u32.c | 49 .slabs = HS_BS_SLABS
|
/external/skqp/src/compute/hs/cl/intel/gen8/u64/ |
D | hs_intel_gen8_u64.c | 49 .slabs = HS_BS_SLABS
|
/external/skqp/src/compute/hs/vk/nvidia/sm_35/u32/ |
D | hs_nvidia_sm35_u32.c | 35 .slabs = HS_BS_SLABS
|