Home
last modified time | relevance | path

Searched refs:slabs (Results 1 – 25 of 43) sorted by relevance

12

/external/mesa3d/src/gallium/auxiliary/pipebuffer/
Dpb_slab.c49 struct list_head slabs; member
54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) in pb_slab_reclaim() argument
64 struct pb_slab_group *group = &slabs->groups[entry->group_index]; in pb_slab_reclaim()
65 LIST_ADDTAIL(&slab->head, &group->slabs); in pb_slab_reclaim()
70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim()
75 pb_slabs_reclaim_locked(struct pb_slabs *slabs) in pb_slabs_reclaim_locked() argument
77 while (!LIST_IS_EMPTY(&slabs->reclaim)) { in pb_slabs_reclaim_locked()
79 LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head); in pb_slabs_reclaim_locked()
81 if (!slabs->can_reclaim(slabs->priv, entry)) in pb_slabs_reclaim_locked()
84 pb_slab_reclaim(slabs, entry); in pb_slabs_reclaim_locked()
[all …]
Dpb_slab.h135 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
138 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
141 pb_slabs_reclaim(struct pb_slabs *slabs);
144 pb_slabs_init(struct pb_slabs *slabs,
153 pb_slabs_deinit(struct pb_slabs *slabs);
Dpb_bufmgr_slab.c128 struct list_head slabs; member
212 LIST_ADDTAIL(&slab->head, &mgr->slabs); in pb_slab_buffer_destroy()
359 LIST_ADDTAIL(&slab->head, &mgr->slabs); in pb_slab_create()
401 if (mgr->slabs.next == &mgr->slabs) { in pb_slab_manager_create_buffer()
403 if (mgr->slabs.next == &mgr->slabs) { in pb_slab_manager_create_buffer()
410 list = mgr->slabs.next; in pb_slab_manager_create_buffer()
473 LIST_INITHEAD(&mgr->slabs); in pb_slab_manager_create()
/external/skqp/src/compute/hs/vk/
Dhs_vk.c343 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm()
432 uint32_t const full_bs = slabs_in / hs->config.block.slabs; in hs_bs()
433 uint32_t const frac_bs = slabs_in - full_bs * hs->config.block.slabs; in hs_bs()
654 if (state.bx_ru > hs->config.block.slabs) in hs_vk_sort()
684 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_vk_sort()
829 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_vk_create()
830 uint32_t const bc_slabs_log2_max = msb_idx_u32(pow2_rd_u32(target->config.block.slabs)); in hs_vk_create()
845 uint32_t fm_left = (target->config.block.slabs / 2) << scale; in hs_vk_create()
1146 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_vk_pad()
1147 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_vk_pad()
[all …]
Dhs_vk_target.h38 uint8_t slabs; member
/external/skia/src/compute/hs/vk/
Dhs_vk.c343 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm()
432 uint32_t const full_bs = slabs_in / hs->config.block.slabs; in hs_bs()
433 uint32_t const frac_bs = slabs_in - full_bs * hs->config.block.slabs; in hs_bs()
654 if (state.bx_ru > hs->config.block.slabs) in hs_vk_sort()
684 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_vk_sort()
829 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_vk_create()
830 uint32_t const bc_slabs_log2_max = msb_idx_u32(pow2_rd_u32(target->config.block.slabs)); in hs_vk_create()
845 uint32_t fm_left = (target->config.block.slabs / 2) << scale; in hs_vk_create()
1146 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_vk_pad()
1147 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_vk_pad()
[all …]
Dhs_vk_target.h38 uint8_t slabs; member
/external/skia/src/compute/hs/cl/
Dhs_cl.c487 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm()
615 uint32_t const full = (slabs_in / hs->config.block.slabs) * hs->config.block.slabs; in hs_bs()
829 if (state.bx_ru > hs->config.block.slabs) in hs_cl_sort()
854 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_cl_sort()
889 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_cl_pad()
890 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_cl_pad()
892 uint32_t const slabs_ru_rem_ru = MIN_MACRO(pow2_ru_u32(slabs_ru_rem),hs->config.block.slabs); in hs_cl_pad()
900 if (slabs_ru > hs->config.block.slabs) in hs_cl_pad()
904 uint32_t const block_slabs_lo = blocks_lo * hs->config.block.slabs; in hs_cl_pad()
1040 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_cl_create()
[all …]
Dhs_cl_target.h36 uint8_t slabs; member
/external/skqp/src/compute/hs/cl/
Dhs_cl.c487 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm()
615 uint32_t const full = (slabs_in / hs->config.block.slabs) * hs->config.block.slabs; in hs_bs()
829 if (state.bx_ru > hs->config.block.slabs) in hs_cl_sort()
854 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_cl_sort()
889 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_cl_pad()
890 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_cl_pad()
892 uint32_t const slabs_ru_rem_ru = MIN_MACRO(pow2_ru_u32(slabs_ru_rem),hs->config.block.slabs); in hs_cl_pad()
900 if (slabs_ru > hs->config.block.slabs) in hs_cl_pad()
904 uint32_t const block_slabs_lo = blocks_lo * hs->config.block.slabs; in hs_cl_pad()
1040 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_cl_create()
[all …]
Dhs_cl_target.h36 uint8_t slabs; member
/external/mesa3d/src/gallium/state_trackers/nine/
Dnine_helpers.c37 pool->slabs = REALLOC(pool->slabs, in nine_range_pool_more()
42 pool->free = pool->slabs[pool->num_slabs++] = r; in nine_range_pool_more()
/external/skia/src/compute/hs/cuda/
Dhs_cuda.inl33 // 2) Blocks of slabs of keys are sorted.
35 // 3) If necesary, the blocks of slabs are merged until complete.
37 // 4) If requested, the slabs will be converted from slab ordering
690 // occupancy). This is useful when merging small numbers of slabs.
700 // number of slabs in a full-sized scaled flip-merge span
717 // the remaining slabs will be cleaned
915 // immediately sort blocks of slabs
921 // otherwise, merge sorted spans of slabs until done
933 // flip merge slabs -- return span of slabs that must be cleaned
967 // otherwise, merge twice as many slabs
[all …]
/external/skqp/src/compute/hs/cuda/
Dhs_cuda.inl33 // 2) Blocks of slabs of keys are sorted.
35 // 3) If necesary, the blocks of slabs are merged until complete.
37 // 4) If requested, the slabs will be converted from slab ordering
690 // occupancy). This is useful when merging small numbers of slabs.
700 // number of slabs in a full-sized scaled flip-merge span
717 // the remaining slabs will be cleaned
915 // immediately sort blocks of slabs
921 // otherwise, merge sorted spans of slabs until done
933 // flip merge slabs -- return span of slabs that must be cleaned
967 // otherwise, merge twice as many slabs
[all …]
/external/skqp/src/compute/hs/
DREADME.md228 1. For each workgroup of slabs:
232 1. Until all slabs in the workgroup are merged:
235 1. Until all slabs are merged:
250 registers, sorts the slabs, merges all slabs in the workgroup, and
251 stores the slabs back to global memory.
/external/skia/src/compute/hs/
DREADME.md228 1. For each workgroup of slabs:
232 1. Until all slabs in the workgroup are merged:
235 1. Until all slabs are merged:
250 registers, sorts the slabs, merges all slabs in the workgroup, and
251 stores the slabs back to global memory.
/external/skqp/src/compute/hs/cuda/bench/
Dmain.c213 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local
215 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32()
233 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local
235 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
/external/skia/src/compute/hs/cuda/bench/
Dmain.c213 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local
215 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32()
233 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local
235 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
/external/skia/src/compute/hs/cl/bench/
Dmain.c185 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local
187 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32()
205 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local
207 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
/external/skqp/src/compute/hs/cl/bench/
Dmain.c185 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local
187 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32()
205 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local
207 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
/external/skia/src/compute/hs/cl/intel/gen8/u64/
Dhs_intel_gen8_u64.c49 .slabs = HS_BS_SLABS
/external/skia/src/compute/hs/cl/intel/gen8/u32/
Dhs_intel_gen8_u32.c49 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/cl/intel/gen8/u32/
Dhs_intel_gen8_u32.c49 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/cl/intel/gen8/u64/
Dhs_intel_gen8_u64.c49 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/vk/nvidia/sm_35/u32/
Dhs_nvidia_sm35_u32.c35 .slabs = HS_BS_SLABS

12