• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3  * Copyright 2016 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23 
24 #include "slab.h"
25 #include "macros.h"
26 #include "u_atomic.h"
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30 
31 #define SLAB_MAGIC_ALLOCATED 0xcafe4321
32 #define SLAB_MAGIC_FREE 0x7ee01234
33 
34 #ifndef NDEBUG
35 #define SET_MAGIC(element, value)   (element)->magic = (value)
36 #define CHECK_MAGIC(element, value) assert((element)->magic == (value))
37 #else
38 #define SET_MAGIC(element, value)
39 #define CHECK_MAGIC(element, value)
40 #endif
41 
42 /* One array element within a big buffer. */
43 struct slab_element_header {
44    /* The next element in the free or migrated list. */
45    struct slab_element_header *next;
46 
47    /* This is either
48     * - a pointer to the child pool to which this element belongs, or
49     * - a pointer to the orphaned page of the element, with the least
50     *   significant bit set to 1.
51     */
52    intptr_t owner;
53 
54 #ifndef NDEBUG
55    intptr_t magic;
56 #endif
57 };
58 
59 /* The page is an array of allocations in one block. */
60 struct slab_page_header {
61    union {
62       /* Next page in the same child pool. */
63       struct slab_page_header *next;
64 
65       /* Number of remaining, non-freed elements (for orphaned pages). */
66       unsigned num_remaining;
67    } u;
68    /* Memory after the last member is dedicated to the page itself.
69     * The allocated size is always larger than this structure.
70     */
71 };
72 
73 
74 static struct slab_element_header *
slab_get_element(struct slab_parent_pool * parent,struct slab_page_header * page,unsigned index)75 slab_get_element(struct slab_parent_pool *parent,
76                  struct slab_page_header *page, unsigned index)
77 {
78    return (struct slab_element_header*)
79           ((uint8_t*)&page[1] + (parent->element_size * index));
80 }
81 
82 /* The given object/element belongs to an orphaned page (i.e. the owning child
83  * pool has been destroyed). Mark the element as freed and free the whole page
84  * when no elements are left in it.
85  */
86 static void
slab_free_orphaned(struct slab_element_header * elt)87 slab_free_orphaned(struct slab_element_header *elt)
88 {
89    struct slab_page_header *page;
90 
91    assert(elt->owner & 1);
92 
93    page = (struct slab_page_header *)(elt->owner & ~(intptr_t)1);
94    if (!p_atomic_dec_return(&page->u.num_remaining))
95       free(page);
96 }
97 
98 /**
99  * Create a parent pool for the allocation of same-sized objects.
100  *
101  * \param item_size     Size of one object.
102  * \param num_items     Number of objects to allocate at once.
103  */
104 void
slab_create_parent(struct slab_parent_pool * parent,unsigned item_size,unsigned num_items)105 slab_create_parent(struct slab_parent_pool *parent,
106                    unsigned item_size,
107                    unsigned num_items)
108 {
109    simple_mtx_init(&parent->mutex, mtx_plain);
110    parent->element_size = ALIGN_POT(sizeof(struct slab_element_header) + item_size,
111                                     sizeof(intptr_t));
112    parent->num_elements = num_items;
113    parent->item_size = item_size;
114 }
115 
116 void
slab_destroy_parent(struct slab_parent_pool * parent)117 slab_destroy_parent(struct slab_parent_pool *parent)
118 {
119    simple_mtx_destroy(&parent->mutex);
120 }
121 
122 /**
123  * Create a child pool linked to the given parent.
124  */
slab_create_child(struct slab_child_pool * pool,struct slab_parent_pool * parent)125 void slab_create_child(struct slab_child_pool *pool,
126                        struct slab_parent_pool *parent)
127 {
128    pool->parent = parent;
129    pool->pages = NULL;
130    pool->free = NULL;
131    pool->migrated = NULL;
132 }
133 
134 /**
135  * Destroy the child pool.
136  *
137  * Pages associated to the pool will be orphaned. They are eventually freed
138  * when all objects in them are freed.
139  */
slab_destroy_child(struct slab_child_pool * pool)140 void slab_destroy_child(struct slab_child_pool *pool)
141 {
142    if (!pool->parent)
143       return; /* the slab probably wasn't even created */
144 
145    simple_mtx_lock(&pool->parent->mutex);
146 
147    while (pool->pages) {
148       struct slab_page_header *page = pool->pages;
149       pool->pages = page->u.next;
150       p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
151 
152       for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
153          struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
154          p_atomic_set(&elt->owner, (intptr_t)page | 1);
155       }
156    }
157 
158    while (pool->migrated) {
159       struct slab_element_header *elt = pool->migrated;
160       pool->migrated = elt->next;
161       slab_free_orphaned(elt);
162    }
163 
164    simple_mtx_unlock(&pool->parent->mutex);
165 
166    while (pool->free) {
167       struct slab_element_header *elt = pool->free;
168       pool->free = elt->next;
169       slab_free_orphaned(elt);
170    }
171 
172    /* Guard against use-after-free. */
173    pool->parent = NULL;
174 }
175 
176 static bool
slab_add_new_page(struct slab_child_pool * pool)177 slab_add_new_page(struct slab_child_pool *pool)
178 {
179    struct slab_page_header *page = malloc(sizeof(struct slab_page_header) +
180       pool->parent->num_elements * pool->parent->element_size);
181 
182    if (!page)
183       return false;
184 
185    for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
186       struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
187       elt->owner = (intptr_t)pool;
188       assert(!(elt->owner & 1));
189 
190       elt->next = pool->free;
191       pool->free = elt;
192       SET_MAGIC(elt, SLAB_MAGIC_FREE);
193    }
194 
195    page->u.next = pool->pages;
196    pool->pages = page;
197 
198    return true;
199 }
200 
201 /**
202  * Allocate an object from the child pool. Single-threaded (i.e. the caller
203  * must ensure that no operation happens on the same child pool in another
204  * thread).
205  */
206 void *
slab_alloc(struct slab_child_pool * pool)207 slab_alloc(struct slab_child_pool *pool)
208 {
209    struct slab_element_header *elt;
210 
211    if (!pool->free) {
212       /* First, collect elements that belong to us but were freed from a
213        * different child pool.
214        */
215       simple_mtx_lock(&pool->parent->mutex);
216       pool->free = pool->migrated;
217       pool->migrated = NULL;
218       simple_mtx_unlock(&pool->parent->mutex);
219 
220       /* Now allocate a new page. */
221       if (!pool->free && !slab_add_new_page(pool))
222          return NULL;
223    }
224 
225    elt = pool->free;
226    pool->free = elt->next;
227 
228    CHECK_MAGIC(elt, SLAB_MAGIC_FREE);
229    SET_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
230 
231    return &elt[1];
232 }
233 
234 /**
235  * Same as slab_alloc but memset the returned object to 0.
236  */
237 void *
slab_zalloc(struct slab_child_pool * pool)238 slab_zalloc(struct slab_child_pool *pool)
239 {
240    void *r = slab_alloc(pool);
241    if (r)
242       memset(r, 0, pool->parent->item_size);
243    return r;
244 }
245 
246 /**
247  * Free an object allocated from the slab. Single-threaded (i.e. the caller
248  * must ensure that no operation happens on the same child pool in another
249  * thread).
250  *
251  * Freeing an object in a different child pool from the one where it was
252  * allocated is allowed, as long the pool belong to the same parent. No
253  * additional locking is required in this case.
254  */
slab_free(struct slab_child_pool * pool,void * ptr)255 void slab_free(struct slab_child_pool *pool, void *ptr)
256 {
257    struct slab_element_header *elt = ((struct slab_element_header*)ptr - 1);
258    intptr_t owner_int;
259 
260    CHECK_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
261    SET_MAGIC(elt, SLAB_MAGIC_FREE);
262 
263    if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
264       /* This is the simple case: The caller guarantees that we can safely
265        * access the free list.
266        */
267       elt->next = pool->free;
268       pool->free = elt;
269       return;
270    }
271 
272    /* The slow case: migration or an orphaned page. */
273    if (pool->parent)
274       simple_mtx_lock(&pool->parent->mutex);
275 
276    /* Note: we _must_ re-read elt->owner here because the owning child pool
277     * may have been destroyed by another thread in the meantime.
278     */
279    owner_int = p_atomic_read(&elt->owner);
280 
281    if (!(owner_int & 1)) {
282       struct slab_child_pool *owner = (struct slab_child_pool *)owner_int;
283       elt->next = owner->migrated;
284       owner->migrated = elt;
285       if (pool->parent)
286          simple_mtx_unlock(&pool->parent->mutex);
287    } else {
288       if (pool->parent)
289          simple_mtx_unlock(&pool->parent->mutex);
290 
291       slab_free_orphaned(elt);
292    }
293 }
294 
295 /**
296  * Allocate an object from the slab. Single-threaded (no mutex).
297  */
298 void *
slab_alloc_st(struct slab_mempool * mempool)299 slab_alloc_st(struct slab_mempool *mempool)
300 {
301    return slab_alloc(&mempool->child);
302 }
303 
304 /**
305  * Free an object allocated from the slab. Single-threaded (no mutex).
306  */
307 void
slab_free_st(struct slab_mempool * mempool,void * ptr)308 slab_free_st(struct slab_mempool *mempool, void *ptr)
309 {
310    slab_free(&mempool->child, ptr);
311 }
312 
313 void
slab_destroy(struct slab_mempool * mempool)314 slab_destroy(struct slab_mempool *mempool)
315 {
316    slab_destroy_child(&mempool->child);
317    slab_destroy_parent(&mempool->parent);
318 }
319 
320 /**
321  * Create an allocator for same-sized objects.
322  *
323  * \param item_size     Size of one object.
324  * \param num_items     Number of objects to allocate at once.
325  */
326 void
slab_create(struct slab_mempool * mempool,unsigned item_size,unsigned num_items)327 slab_create(struct slab_mempool *mempool,
328             unsigned item_size,
329             unsigned num_items)
330 {
331    slab_create_parent(&mempool->parent, item_size, num_items);
332    slab_create_child(&mempool->child, &mempool->parent);
333 }
334