Home
last modified time | relevance | path

Searched refs:slabs (Results 1 – 25 of 37) sorted by relevance

12

/external/mesa3d/src/gallium/auxiliary/pipebuffer/
Dpb_slab.c49 struct list_head slabs; member
54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) in pb_slab_reclaim() argument
64 struct pb_slab_group *group = &slabs->groups[entry->group_index]; in pb_slab_reclaim()
65 list_addtail(&slab->head, &group->slabs); in pb_slab_reclaim()
70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim()
75 pb_slabs_reclaim_locked(struct pb_slabs *slabs) in pb_slabs_reclaim_locked() argument
77 while (!list_is_empty(&slabs->reclaim)) { in pb_slabs_reclaim_locked()
79 LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head); in pb_slabs_reclaim_locked()
81 if (!slabs->can_reclaim(slabs->priv, entry)) in pb_slabs_reclaim_locked()
84 pb_slab_reclaim(slabs, entry); in pb_slabs_reclaim_locked()
[all …]
Dpb_slab.h135 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
138 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
141 pb_slabs_reclaim(struct pb_slabs *slabs);
144 pb_slabs_init(struct pb_slabs *slabs,
153 pb_slabs_deinit(struct pb_slabs *slabs);
Dpb_bufmgr_slab.c124 struct list_head slabs; member
208 list_addtail(&slab->head, &mgr->slabs); in pb_slab_buffer_destroy()
354 list_addtail(&slab->head, &mgr->slabs); in pb_slab_create()
396 if (mgr->slabs.next == &mgr->slabs) { in pb_slab_manager_create_buffer()
398 if (mgr->slabs.next == &mgr->slabs) { in pb_slab_manager_create_buffer()
405 list = mgr->slabs.next; in pb_slab_manager_create_buffer()
468 list_inithead(&mgr->slabs); in pb_slab_manager_create()
/external/skqp/src/compute/hs/vk/
Dhs_vk.c343 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm()
432 uint32_t const full_bs = slabs_in / hs->config.block.slabs; in hs_bs()
433 uint32_t const frac_bs = slabs_in - full_bs * hs->config.block.slabs; in hs_bs()
654 if (state.bx_ru > hs->config.block.slabs) in hs_vk_sort()
684 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_vk_sort()
829 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_vk_create()
830 uint32_t const bc_slabs_log2_max = msb_idx_u32(pow2_rd_u32(target->config.block.slabs)); in hs_vk_create()
845 uint32_t fm_left = (target->config.block.slabs / 2) << scale; in hs_vk_create()
1146 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_vk_pad()
1147 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_vk_pad()
[all …]
Dhs_vk_target.h38 uint8_t slabs; member
/external/skqp/src/compute/hs/cl/
Dhs_cl.c487 uint32_t const full_span_slabs = hs->config.block.slabs << up_scale_log2; in hs_fm()
615 uint32_t const full = (slabs_in / hs->config.block.slabs) * hs->config.block.slabs; in hs_bs()
829 if (state.bx_ru > hs->config.block.slabs) in hs_cl_sort()
854 if (((uint32_t)hs->config.block.slabs << up_scale_log2) >= state.bx_ru) in hs_cl_sort()
889 uint32_t const blocks = slabs_ru / hs->config.block.slabs; in hs_cl_pad()
890 uint32_t const block_slabs = blocks * hs->config.block.slabs; in hs_cl_pad()
892 uint32_t const slabs_ru_rem_ru = MIN_MACRO(pow2_ru_u32(slabs_ru_rem),hs->config.block.slabs); in hs_cl_pad()
900 if (slabs_ru > hs->config.block.slabs) in hs_cl_pad()
904 uint32_t const block_slabs_lo = blocks_lo * hs->config.block.slabs; in hs_cl_pad()
1040 uint32_t const bs_slabs_log2_ru = msb_idx_u32(pow2_ru_u32(target->config.block.slabs)); in hs_cl_create()
[all …]
Dhs_cl_target.h36 uint8_t slabs; member
/external/mesa3d/src/gallium/frontends/nine/
Dnine_helpers.c37 pool->slabs = REALLOC(pool->slabs, in nine_range_pool_more()
42 pool->free = pool->slabs[pool->num_slabs++] = r; in nine_range_pool_more()
Dnine_helpers.h175 struct nine_range **slabs; member
/external/skqp/src/compute/hs/cuda/
Dhs_cuda.inl33 // 2) Blocks of slabs of keys are sorted.
35 // 3) If necesary, the blocks of slabs are merged until complete.
37 // 4) If requested, the slabs will be converted from slab ordering
690 // occupancy). This is useful when merging small numbers of slabs.
700 // number of slabs in a full-sized scaled flip-merge span
717 // the remaining slabs will be cleaned
915 // immediately sort blocks of slabs
921 // otherwise, merge sorted spans of slabs until done
933 // flip merge slabs -- return span of slabs that must be cleaned
967 // otherwise, merge twice as many slabs
[all …]
/external/llvm-project/clang-tools-extra/clangd/test/
Dmemory_tree.test25 # CHECK-NEXT: "slabs": {
53 # CHECK-NEXT: "slabs": {
/external/skqp/src/compute/hs/
DREADME.md228 1. For each workgroup of slabs:
232 1. Until all slabs in the workgroup are merged:
235 1. Until all slabs are merged:
250 registers, sorts the slabs, merges all slabs in the workgroup, and
251 stores the slabs back to global memory.
/external/skqp/src/compute/hs/cuda/bench/
Dmain.c213 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local
215 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32()
233 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local
235 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
/external/mesa3d/src/gallium/winsys/amdgpu/drm/
Damdgpu_bo.c633 struct pb_slabs *slabs = &bo_slabs[i]; in get_slabs() local
635 if (size <= 1 << (slabs->min_order + slabs->num_orders - 1)) in get_slabs()
636 return slabs; in get_slabs()
682 struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ? in amdgpu_bo_slab_alloc() local
687 unsigned max_entry_size = 1 << (slabs[i].min_order + slabs[i].num_orders - 1); in amdgpu_bo_slab_alloc()
1301 struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ? in amdgpu_bo_create() local
1303 struct pb_slabs *last_slab = &slabs[NUM_SLAB_ALLOCATORS - 1]; in amdgpu_bo_create()
1311 alignment <= MAX2(1 << slabs[0].min_order, util_next_power_of_two(size))) { in amdgpu_bo_create()
1318 struct pb_slabs *slabs = get_slabs(ws, size, flags); in amdgpu_bo_create() local
1319 entry = pb_slab_alloc(slabs, size, heap); in amdgpu_bo_create()
[all …]
/external/skqp/src/compute/hs/cl/bench/
Dmain.c185 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local
187 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32()
205 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local
207 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()
/external/skqp/src/compute/hs/cl/intel/gen8/u64/
Dhs_intel_gen8_u64.c49 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/cl/intel/gen8/u32/
Dhs_intel_gen8_u32.c49 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/vk/amd/gcn/u64/
Dhs_amd_gcn_u64.c35 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/vk/amd/gcn/u32/
Dhs_amd_gcn_u32.c35 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/vk/intel/gen8/u64/
Dhs_intel_gen8_u64.c35 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/vk/intel/gen8/u32/
Dhs_intel_gen8_u32.c35 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/vk/nvidia/sm_35/u64/
Dhs_nvidia_sm35_u64.c35 .slabs = HS_BS_SLABS
/external/skqp/src/compute/hs/vk/nvidia/sm_35/u32/
Dhs_nvidia_sm35_u32.c35 .slabs = HS_BS_SLABS
/external/llvm-project/clang-tools-extra/clangd/test/index-serialization/
Dversion-is-correct.test12 Also if you've introduced new slabs/chunks to serialized index, make sure
/external/skqp/src/compute/hs/vk/bench/
Dmain.c233 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u32() local
235 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u32()
253 uint32_t const slabs = (count + slab_keys - 1) / slab_keys; in hs_debug_u64() local
255 for (uint32_t ss=0; ss<slabs; ss++) { in hs_debug_u64()

12