Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 88) sorted by relevance

1234

/third_party/mesa3d/src/gallium/drivers/nouveau/
Dnouveau_mm.c55 mm_slab_alloc(struct mm_slab *slab) in mm_slab_alloc() argument
59 if (slab->free == 0) in mm_slab_alloc()
62 for (i = 0; i < (slab->count + 31) / 32; ++i) { in mm_slab_alloc()
63 b = ffs(slab->bits[i]) - 1; in mm_slab_alloc()
66 assert(n < slab->count); in mm_slab_alloc()
67 slab->free--; in mm_slab_alloc()
68 slab->bits[i] &= ~(1 << b); in mm_slab_alloc()
76 mm_slab_free(struct mm_slab *slab, int i) in mm_slab_free() argument
78 assert(i < slab->count); in mm_slab_free()
79 slab->bits[i / 32] |= 1 << (i % 32); in mm_slab_free()
[all …]
/third_party/skia/third_party/externals/dawn/src/common/
DSlabAllocator.cpp45 Slab* slab = this->next; in ~SentinelSlab() local
46 while (slab != nullptr) { in ~SentinelSlab()
47 Slab* next = slab->next; in ~SentinelSlab()
48 ASSERT(slab->blocksInUse == 0); in ~SentinelSlab()
50 delete[] slab->allocation; in ~SentinelSlab()
51 slab = next; in ~SentinelSlab()
107 bool SlabAllocatorImpl::IsNodeInSlab(Slab* slab, IndexLinkNode* node) const { in IsNodeInSlab() argument
108 char* firstObjectPtr = reinterpret_cast<char*>(slab) + mSlabBlocksOffset; in IsNodeInSlab()
114 void SlabAllocatorImpl::PushFront(Slab* slab, IndexLinkNode* node) const { in PushFront() argument
115 ASSERT(IsNodeInSlab(slab, node)); in PushFront()
[all …]
DSlabAllocator.h114 bool IsNodeInSlab(Slab* slab, IndexLinkNode* node) const;
118 void PushFront(Slab* slab, IndexLinkNode* node) const;
119 IndexLinkNode* PopFront(Slab* slab) const;
155 void Prepend(Slab* slab);
/third_party/mesa3d/src/gallium/auxiliary/pipebuffer/
Dpb_bufmgr_slab.c62 struct pb_slab *slab; member
193 struct pb_slab *slab = buf->slab; in pb_slab_buffer_destroy() local
194 struct pb_slab_manager *mgr = slab->mgr; in pb_slab_buffer_destroy()
204 list_addtail(list, &slab->freeBuffers); in pb_slab_buffer_destroy()
205 slab->numFree++; in pb_slab_buffer_destroy()
207 if (slab->head.next == &slab->head) in pb_slab_buffer_destroy()
208 list_addtail(&slab->head, &mgr->slabs); in pb_slab_buffer_destroy()
211 if (slab->numFree == slab->numBuffers) { in pb_slab_buffer_destroy()
212 list = &slab->head; in pb_slab_buffer_destroy()
214 pb_unmap(slab->bo); in pb_slab_buffer_destroy()
[all …]
Dpb_slab.c56 struct pb_slab *slab = entry->slab; in pb_slab_reclaim() local
59 list_add(&entry->head, &slab->free); in pb_slab_reclaim()
60 slab->num_free++; in pb_slab_reclaim()
63 if (!list_is_linked(&slab->head)) { in pb_slab_reclaim()
65 list_addtail(&slab->head, &group->slabs); in pb_slab_reclaim()
68 if (slab->num_free >= slab->num_entries) { in pb_slab_reclaim()
69 list_del(&slab->head); in pb_slab_reclaim()
70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim()
124 struct pb_slab *slab; in pb_slab_alloc_reclaimed() local
159 slab = list_entry(group->slabs.next, struct pb_slab, head); in pb_slab_alloc_reclaimed()
[all …]
/third_party/mesa3d/src/gallium/winsys/radeon/drm/
Dradeon_drm_bo.c80 for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) { in radeon_bo_is_busy()
81 if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) { in radeon_bo_is_busy()
85 radeon_ws_bo_reference(&bo->u.slab.fences[num_idle], NULL); in radeon_bo_is_busy()
87 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle], in radeon_bo_is_busy()
88 (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0])); in radeon_bo_is_busy()
89 bo->u.slab.num_fences -= num_idle; in radeon_bo_is_busy()
110 while (bo->u.slab.num_fences) { in radeon_bo_wait_idle()
112 radeon_ws_bo_reference(&fence, bo->u.slab.fences[0]); in radeon_bo_wait_idle()
119 if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) { in radeon_bo_wait_idle()
120 radeon_ws_bo_reference(&bo->u.slab.fences[0], NULL); in radeon_bo_wait_idle()
[all …]
Dradeon_drm_cs.c335 real_idx = radeon_lookup_or_add_real_buffer(cs, bo->u.slab.real); in radeon_lookup_or_add_slab_buffer()
359 item->u.slab.real_idx = real_idx; in radeon_lookup_or_add_slab_buffer()
395 index = cs->csc->slab_buffers[index].u.slab.real_idx; in radeon_drm_cs_add_buffer()
551 for (unsigned src = 0; src < bo->u.slab.num_fences; ++src) { in radeon_bo_slab_fence()
552 if (bo->u.slab.fences[src]->num_cs_references) { in radeon_bo_slab_fence()
553 bo->u.slab.fences[dst] = bo->u.slab.fences[src]; in radeon_bo_slab_fence()
556 radeon_ws_bo_reference(&bo->u.slab.fences[src], NULL); in radeon_bo_slab_fence()
559 bo->u.slab.num_fences = dst; in radeon_bo_slab_fence()
562 if (bo->u.slab.num_fences >= bo->u.slab.max_fences) { in radeon_bo_slab_fence()
563 unsigned new_max_fences = bo->u.slab.max_fences + 1; in radeon_bo_slab_fence()
[all …]
/third_party/mesa3d/src/gallium/winsys/amdgpu/drm/
Damdgpu_bo.c373 real = bo->u.slab.real; in amdgpu_bo_map()
413 real = bo->bo ? bo : bo->u.slab.real; in amdgpu_bo_unmap()
634 struct amdgpu_winsys_bo *bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry); in amdgpu_bo_can_reclaim_slab()
655 assert(bo->base.size <= bo->u.slab.entry.entry_size); in get_slab_wasted_size()
658 bo->base.size > bo->u.slab.entry.entry_size / 2); in get_slab_wasted_size()
659 return bo->u.slab.entry.entry_size - bo->base.size; in get_slab_wasted_size()
677 pb_slab_free(slabs, &bo->u.slab.entry); in amdgpu_bo_slab_destroy()
710 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab); in amdgpu_bo_slab_alloc() local
716 if (!slab) in amdgpu_bo_slab_alloc()
753 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws, in amdgpu_bo_slab_alloc()
[all …]
Damdgpu_bo.h81 } slab; member
137 void amdgpu_bo_slab_free(struct amdgpu_winsys *ws, struct pb_slab *slab);
146 struct amdgpu_slab *amdgpu_slab(struct pb_slab *slab) in amdgpu_slab() argument
148 return (struct amdgpu_slab *)slab; in amdgpu_slab()
/third_party/mesa3d/src/gallium/drivers/zink/
Dzink_bo.c166 struct zink_bo *bo = container_of(entry, struct zink_bo, u.slab.entry); in bo_can_reclaim_slab()
174 struct zink_slab *slab = zink_slab(pslab); in bo_slab_free() local
175 ASSERTED unsigned slab_size = slab->buffer->base.size; in bo_slab_free()
177 assert(slab->base.num_entries * slab->entry_size <= slab_size); in bo_slab_free()
178 FREE(slab->entries); in bo_slab_free()
179 zink_bo_unref(screen, slab->buffer); in bo_slab_free()
180 FREE(slab); in bo_slab_free()
193 pb_slab_free(get_slabs(screen, bo->base.size, 0), &bo->u.slab.entry); in bo_slab_destroy()
615 bo = container_of(entry, struct zink_bo, u.slab.entry); in zink_bo_create()
673 real = bo->u.slab.real; in zink_bo_map()
[all …]
/third_party/libuv/test/
Dtest-tcp-close-after-read-timeout.c61 static char slab[8]; in on_client_alloc() local
62 buf->base = slab; in on_client_alloc()
63 buf->len = sizeof(slab); in on_client_alloc()
86 static char slab[8]; in on_connection_alloc() local
87 buf->base = slab; in on_connection_alloc()
88 buf->len = sizeof(slab); in on_connection_alloc()
Dtest-udp-try-send.c43 static char slab[65536]; in alloc_cb() local
45 ASSERT(suggested_size <= sizeof(slab)); in alloc_cb()
46 buf->base = slab; in alloc_cb()
47 buf->len = sizeof(slab); in alloc_cb()
Dtest-udp-send-immediate.c43 static char slab[65536]; in alloc_cb() local
45 ASSERT(suggested_size <= sizeof(slab)); in alloc_cb()
46 buf->base = slab; in alloc_cb()
47 buf->len = sizeof(slab); in alloc_cb()
Dtest-udp-multicast-join.c48 static char slab[65536]; in alloc_cb() local
50 ASSERT(suggested_size <= sizeof(slab)); in alloc_cb()
51 buf->base = slab; in alloc_cb()
52 buf->len = sizeof(slab); in alloc_cb()
Dtest-udp-send-and-recv.c47 static char slab[65536]; in alloc_cb() local
49 ASSERT(suggested_size <= sizeof(slab)); in alloc_cb()
50 buf->base = slab; in alloc_cb()
51 buf->len = sizeof(slab); in alloc_cb()
Dtest-udp-multicast-join6.c60 static char slab[65536]; in alloc_cb() local
62 ASSERT(suggested_size <= sizeof(slab)); in alloc_cb()
63 buf->base = slab; in alloc_cb()
64 buf->len = sizeof(slab); in alloc_cb()
Dtest-close-fd.c31 static char slab[1]; in alloc_cb() local
32 buf->base = slab; in alloc_cb()
33 buf->len = sizeof(slab); in alloc_cb()
Dtest-udp-send-unreachable.c47 static char slab[65536]; in alloc_cb() local
49 ASSERT_LE(suggested_size, sizeof(slab)); in alloc_cb()
50 buf->base = slab; in alloc_cb()
51 buf->len = sizeof(slab); in alloc_cb()
Dtest-not-readable-nor-writable-on-read-error.c44 static char slab[64]; in alloc_cb() local
45 buf->base = slab; in alloc_cb()
46 buf->len = sizeof(slab); in alloc_cb()
Dtest-watcher-cross-stop.c37 static char slab[1]; variable
43 buf->base = slab; in alloc_cb()
44 buf->len = sizeof(slab); in alloc_cb()
Dtest-readable-on-eof.c44 static char slab[64]; in alloc_cb() local
45 buf->base = slab; in alloc_cb()
46 buf->len = sizeof(slab); in alloc_cb()
Dtest-udp-connect6.c46 static char slab[65536]; in alloc_cb() local
48 ASSERT_LE(suggested_size, sizeof(slab)); in alloc_cb()
49 buf->base = slab; in alloc_cb()
50 buf->len = sizeof(slab); in alloc_cb()
/third_party/mesa3d/src/amd/vulkan/
Dradv_pipeline_cache.c43 struct radv_pipeline_slab *slab; member
109 if (cache->hash_table[i]->slab) in radv_pipeline_cache_finish()
110 radv_pipeline_slab_destroy(cache->device, cache->hash_table[i]->slab); in radv_pipeline_cache_finish()
412 entry->slab = pipeline->slab; in radv_create_shaders_from_pipeline_cache()
414 pipeline->slab = entry->slab; in radv_create_shaders_from_pipeline_cache()
415 pipeline->slab_bo = pipeline->slab->alloc->arena->bo; in radv_create_shaders_from_pipeline_cache()
436 p_atomic_inc(&entry->slab->ref_count); in radv_create_shaders_from_pipeline_cache()
467 radv_pipeline_slab_destroy(cache->device, pipeline->slab); in radv_pipeline_cache_insert_shaders()
469 pipeline->slab = entry->slab; in radv_pipeline_cache_insert_shaders()
470 p_atomic_inc(&pipeline->slab->ref_count); in radv_pipeline_cache_insert_shaders()
[all …]
/third_party/mesa3d/src/gallium/drivers/iris/
Diris_bufmgr.c612 struct iris_bo *bo = container_of(entry, struct iris_bo, slab.entry); in iris_can_reclaim_slab()
621 struct iris_slab *slab = (void *) pslab; in iris_slab_free() local
624 assert(!slab->bo->aux_map_address); in iris_slab_free()
632 struct iris_bo *bo = &slab->entries[i]; in iris_slab_free()
648 iris_bo_unreference(slab->bo); in iris_slab_free()
650 free(slab->entries); in iris_slab_free()
651 free(slab); in iris_slab_free()
661 struct iris_slab *slab = calloc(1, sizeof(struct iris_slab)); in iris_slab_alloc() local
668 if (!slab) in iris_slab_alloc()
715 slab->bo = in iris_slab_alloc()
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/docs/
DALLOCATION.rst66 this cheap, the Cfg includes a slab allocator from which these objects are
70 providing the container with an allocator that uses the Cfg-local slab
72 store a pointer to the slab allocator in thread-local storage (TLS). This is
91 This requires maintaining the proper slab allocator pointer in TLS.
94 slab allocator into its own TLS. This is used as the Cfg is built within the
99 When the translation thread grabs a new Cfg pointer, it installs the Cfg's slab
101 assembly buffer, it must take care not to use the Cfg's slab allocator. If
102 there is a slab allocator for the assembler buffer, a pointer to it can also be
106 the Cfg's slab allocator, and clears the allocator pointer from its TLS.

1234