Lines Matching refs:pool
124 void slab_create_child(struct slab_child_pool *pool, in slab_create_child() argument
127 pool->parent = parent; in slab_create_child()
128 pool->pages = NULL; in slab_create_child()
129 pool->free = NULL; in slab_create_child()
130 pool->migrated = NULL; in slab_create_child()
139 void slab_destroy_child(struct slab_child_pool *pool) in slab_destroy_child() argument
141 if (!pool->parent) in slab_destroy_child()
144 mtx_lock(&pool->parent->mutex); in slab_destroy_child()
146 while (pool->pages) { in slab_destroy_child()
147 struct slab_page_header *page = pool->pages; in slab_destroy_child()
148 pool->pages = page->u.next; in slab_destroy_child()
149 p_atomic_set(&page->u.num_remaining, pool->parent->num_elements); in slab_destroy_child()
151 for (unsigned i = 0; i < pool->parent->num_elements; ++i) { in slab_destroy_child()
152 struct slab_element_header *elt = slab_get_element(pool->parent, page, i); in slab_destroy_child()
157 while (pool->migrated) { in slab_destroy_child()
158 struct slab_element_header *elt = pool->migrated; in slab_destroy_child()
159 pool->migrated = elt->next; in slab_destroy_child()
163 mtx_unlock(&pool->parent->mutex); in slab_destroy_child()
165 while (pool->free) { in slab_destroy_child()
166 struct slab_element_header *elt = pool->free; in slab_destroy_child()
167 pool->free = elt->next; in slab_destroy_child()
172 pool->parent = NULL; in slab_destroy_child()
176 slab_add_new_page(struct slab_child_pool *pool) in slab_add_new_page() argument
179 pool->parent->num_elements * pool->parent->element_size); in slab_add_new_page()
184 for (unsigned i = 0; i < pool->parent->num_elements; ++i) { in slab_add_new_page()
185 struct slab_element_header *elt = slab_get_element(pool->parent, page, i); in slab_add_new_page()
186 elt->owner = (intptr_t)pool; in slab_add_new_page()
189 elt->next = pool->free; in slab_add_new_page()
190 pool->free = elt; in slab_add_new_page()
194 page->u.next = pool->pages; in slab_add_new_page()
195 pool->pages = page; in slab_add_new_page()
206 slab_alloc(struct slab_child_pool *pool) in slab_alloc() argument
210 if (!pool->free) { in slab_alloc()
214 mtx_lock(&pool->parent->mutex); in slab_alloc()
215 pool->free = pool->migrated; in slab_alloc()
216 pool->migrated = NULL; in slab_alloc()
217 mtx_unlock(&pool->parent->mutex); in slab_alloc()
220 if (!pool->free && !slab_add_new_page(pool)) in slab_alloc()
224 elt = pool->free; in slab_alloc()
225 pool->free = elt->next; in slab_alloc()
242 void slab_free(struct slab_child_pool *pool, void *ptr) in slab_free() argument
250 if (p_atomic_read(&elt->owner) == (intptr_t)pool) { in slab_free()
254 elt->next = pool->free; in slab_free()
255 pool->free = elt; in slab_free()
260 mtx_lock(&pool->parent->mutex); in slab_free()
271 mtx_unlock(&pool->parent->mutex); in slab_free()
273 mtx_unlock(&pool->parent->mutex); in slab_free()