Searched refs:slab (Results 1 – 25 of 28) sorted by relevance
12
53 mm_slab_alloc(struct mm_slab *slab) in mm_slab_alloc() argument57 if (slab->free == 0) in mm_slab_alloc()60 for (i = 0; i < (slab->count + 31) / 32; ++i) { in mm_slab_alloc()61 b = ffs(slab->bits[i]) - 1; in mm_slab_alloc()64 assert(n < slab->count); in mm_slab_alloc()65 slab->free--; in mm_slab_alloc()66 slab->bits[i] &= ~(1 << b); in mm_slab_alloc()74 mm_slab_free(struct mm_slab *slab, int i) in mm_slab_free() argument76 assert(i < slab->count); in mm_slab_free()77 slab->bits[i / 32] |= 1 << (i % 32); in mm_slab_free()[all …]
63 struct pb_slab *slab; member198 struct pb_slab *slab = buf->slab; in pb_slab_buffer_destroy() local199 struct pb_slab_manager *mgr = slab->mgr; in pb_slab_buffer_destroy()209 LIST_ADDTAIL(list, &slab->freeBuffers); in pb_slab_buffer_destroy()210 slab->numFree++; in pb_slab_buffer_destroy()212 if (slab->head.next == &slab->head) in pb_slab_buffer_destroy()213 LIST_ADDTAIL(&slab->head, &mgr->slabs); in pb_slab_buffer_destroy()216 if (slab->numFree == slab->numBuffers) { in pb_slab_buffer_destroy()217 list = &slab->head; in pb_slab_buffer_destroy()219 pb_reference(&slab->bo, NULL); in pb_slab_buffer_destroy()[all …]
56 struct pb_slab *slab = entry->slab; in pb_slab_reclaim() local59 LIST_ADD(&entry->head, &slab->free); in pb_slab_reclaim()60 slab->num_free++; in pb_slab_reclaim()63 if (!slab->head.next) { in pb_slab_reclaim()65 LIST_ADDTAIL(&slab->head, &group->slabs); in pb_slab_reclaim()68 if (slab->num_free >= slab->num_entries) { in pb_slab_reclaim()69 LIST_DEL(&slab->head); in pb_slab_reclaim()70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim()103 struct pb_slab *slab; in pb_slab_alloc() local123 slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head); in pb_slab_alloc()[all …]
63 struct pb_slab *slab; /* the slab that contains this buffer */ member
81 for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) { in radeon_bo_is_busy()82 if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) { in radeon_bo_is_busy()86 radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL); in radeon_bo_is_busy()88 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle], in radeon_bo_is_busy()89 (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0])); in radeon_bo_is_busy()90 bo->u.slab.num_fences -= num_idle; in radeon_bo_is_busy()111 while (bo->u.slab.num_fences) { in radeon_bo_wait_idle()113 radeon_bo_reference(&fence, bo->u.slab.fences[0]); in radeon_bo_wait_idle()120 if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) { in radeon_bo_wait_idle()121 radeon_bo_reference(&bo->u.slab.fences[0], NULL); in radeon_bo_wait_idle()[all …]
307 real_idx = radeon_lookup_or_add_real_buffer(cs, bo->u.slab.real); in radeon_lookup_or_add_slab_buffer()331 item->u.slab.real_idx = real_idx; in radeon_lookup_or_add_slab_buffer()360 index = cs->csc->slab_buffers[index].u.slab.real_idx; in radeon_drm_cs_add_buffer()511 for (unsigned src = 0; src < bo->u.slab.num_fences; ++src) { in radeon_bo_slab_fence()512 if (bo->u.slab.fences[src]->num_cs_references) { in radeon_bo_slab_fence()513 bo->u.slab.fences[dst] = bo->u.slab.fences[src]; in radeon_bo_slab_fence()516 radeon_bo_reference(&bo->u.slab.fences[src], NULL); in radeon_bo_slab_fence()519 bo->u.slab.num_fences = dst; in radeon_bo_slab_fence()522 if (bo->u.slab.num_fences >= bo->u.slab.max_fences) { in radeon_bo_slab_fence()523 unsigned new_max_fences = bo->u.slab.max_fences + 1; in radeon_bo_slab_fence()[all …]
57 } slab; member91 void radeon_bo_slab_free(void *priv, struct pb_slab *slab);
40 } slab; member123 index = cs->csc->slab_buffers[index].u.slab.real_idx; in radeon_bo_is_referenced_by_cs_for_write()
296 real = bo->u.slab.real; in amdgpu_bo_map()326 real = bo->bo ? bo : bo->u.slab.real; in amdgpu_bo_unmap()459 bo = container_of(entry, bo, u.slab.entry); in amdgpu_bo_can_reclaim_slab()470 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry); in amdgpu_bo_slab_destroy()483 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab); in amdgpu_bo_slab_alloc() local488 if (!slab) in amdgpu_bo_slab_alloc()509 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base, in amdgpu_bo_slab_alloc()512 if (!slab->buffer) in amdgpu_bo_slab_alloc()515 assert(slab->buffer->bo); in amdgpu_bo_slab_alloc()517 slab->base.num_entries = slab->buffer->base.size / entry_size; in amdgpu_bo_slab_alloc()[all …]
55 } slab; member98 void amdgpu_bo_slab_free(void *priv, struct pb_slab *slab);107 struct amdgpu_slab *amdgpu_slab(struct pb_slab *slab) in amdgpu_slab() argument109 return (struct amdgpu_slab *)slab; in amdgpu_slab()
54 } slab; member
387 real_idx = amdgpu_lookup_or_add_real_buffer(acs, bo->u.slab.real); in amdgpu_lookup_or_add_slab_buffer()414 buffer->u.slab.real_idx = real_idx; in amdgpu_lookup_or_add_slab_buffer()448 index = buffer->u.slab.real_idx; in amdgpu_cs_add_buffer()
66 this cheap, the Cfg includes a slab allocator from which these objects are70 providing the container with an allocator that uses the Cfg-local slab72 store a pointer to the slab allocator in thread-local storage (TLS). This is91 This requires maintaining the proper slab allocator pointer in TLS.94 slab allocator into its own TLS. This is used as the Cfg is built within the99 When the translation thread grabs a new Cfg pointer, it installs the Cfg's slab101 assembly buffer, it must take care not to use the Cfg's slab allocator. If102 there is a slab allocator for the assembler buffer, a pointer to it can also be106 the Cfg's slab allocator, and clears the allocator pointer from its TLS.
34 slab.c \35 slab.h \
64 memory_spread_slab which is used to control the allocation of slab objects.65 Because there are much factor to affect the result of slab spread test, it
8 Check for leaks of vfsmount structs by diffing vfs slab cache obj numbers
5 # "mm_struct slab leak (affected only some architectures)"
347 int slab = 0; member
583 res->SetInteger("slab", slab); in ToValue()653 target = &meminfo->slab; in ParseProcMeminfo()
229 EXPECT_EQ(meminfo.slab, 54212); in TEST_F()
26 /// AllocateRWX - Allocate a slab of memory with read/write/execute
169 /// AllocateRWX - Allocate a slab of memory with read/write/execute
174 /// AllocateRWX - Allocate a slab of memory with read/write/execute
1Index: linux/arch/i386/Kconfig.debug 2====== ...
3593 碪>'[stone slab used for washing clot]';