• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3  * Copyright 2016 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23 
24 #include "slab.h"
25 #include "macros.h"
26 #include "u_atomic.h"
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30 
31 #define ALIGN(value, align) (((value) + (align) - 1) & ~((align) - 1))
32 
33 #define SLAB_MAGIC_ALLOCATED 0xcafe4321
34 #define SLAB_MAGIC_FREE 0x7ee01234
35 
36 #ifdef DEBUG
37 #define SET_MAGIC(element, value)   (element)->magic = (value)
38 #define CHECK_MAGIC(element, value) assert((element)->magic == (value))
39 #else
40 #define SET_MAGIC(element, value)
41 #define CHECK_MAGIC(element, value)
42 #endif
43 
44 /* One array element within a big buffer. */
45 struct slab_element_header {
46    /* The next element in the free or migrated list. */
47    struct slab_element_header *next;
48 
49    /* This is either
50     * - a pointer to the child pool to which this element belongs, or
51     * - a pointer to the orphaned page of the element, with the least
52     *   significant bit set to 1.
53     */
54    intptr_t owner;
55 
56 #ifdef DEBUG
57    intptr_t magic;
58 #endif
59 };
60 
61 /* The page is an array of allocations in one block. */
62 struct slab_page_header {
63    union {
64       /* Next page in the same child pool. */
65       struct slab_page_header *next;
66 
67       /* Number of remaining, non-freed elements (for orphaned pages). */
68       unsigned num_remaining;
69    } u;
70    /* Memory after the last member is dedicated to the page itself.
71     * The allocated size is always larger than this structure.
72     */
73 };
74 
75 
76 static struct slab_element_header *
slab_get_element(struct slab_parent_pool * parent,struct slab_page_header * page,unsigned index)77 slab_get_element(struct slab_parent_pool *parent,
78                  struct slab_page_header *page, unsigned index)
79 {
80    return (struct slab_element_header*)
81           ((uint8_t*)&page[1] + (parent->element_size * index));
82 }
83 
84 /* The given object/element belongs to an orphaned page (i.e. the owning child
85  * pool has been destroyed). Mark the element as freed and free the whole page
86  * when no elements are left in it.
87  */
88 static void
slab_free_orphaned(struct slab_element_header * elt)89 slab_free_orphaned(struct slab_element_header *elt)
90 {
91    struct slab_page_header *page;
92 
93    assert(elt->owner & 1);
94 
95    page = (struct slab_page_header *)(elt->owner & ~(intptr_t)1);
96    if (!p_atomic_dec_return(&page->u.num_remaining))
97       free(page);
98 }
99 
100 /**
101  * Create a parent pool for the allocation of same-sized objects.
102  *
103  * \param item_size     Size of one object.
104  * \param num_items     Number of objects to allocate at once.
105  */
106 void
slab_create_parent(struct slab_parent_pool * parent,unsigned item_size,unsigned num_items)107 slab_create_parent(struct slab_parent_pool *parent,
108                    unsigned item_size,
109                    unsigned num_items)
110 {
111    mtx_init(&parent->mutex, mtx_plain);
112    parent->element_size = ALIGN(sizeof(struct slab_element_header) + item_size,
113                                 sizeof(intptr_t));
114    parent->num_elements = num_items;
115 }
116 
117 void
slab_destroy_parent(struct slab_parent_pool * parent)118 slab_destroy_parent(struct slab_parent_pool *parent)
119 {
120    mtx_destroy(&parent->mutex);
121 }
122 
123 /**
124  * Create a child pool linked to the given parent.
125  */
slab_create_child(struct slab_child_pool * pool,struct slab_parent_pool * parent)126 void slab_create_child(struct slab_child_pool *pool,
127                        struct slab_parent_pool *parent)
128 {
129    pool->parent = parent;
130    pool->pages = NULL;
131    pool->free = NULL;
132    pool->migrated = NULL;
133 }
134 
135 /**
136  * Destroy the child pool.
137  *
138  * Pages associated to the pool will be orphaned. They are eventually freed
139  * when all objects in them are freed.
140  */
slab_destroy_child(struct slab_child_pool * pool)141 void slab_destroy_child(struct slab_child_pool *pool)
142 {
143    if (!pool->parent)
144       return; /* the slab probably wasn't even created */
145 
146    mtx_lock(&pool->parent->mutex);
147 
148    while (pool->pages) {
149       struct slab_page_header *page = pool->pages;
150       pool->pages = page->u.next;
151       p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
152 
153       for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
154          struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
155          p_atomic_set(&elt->owner, (intptr_t)page | 1);
156       }
157    }
158 
159    while (pool->migrated) {
160       struct slab_element_header *elt = pool->migrated;
161       pool->migrated = elt->next;
162       slab_free_orphaned(elt);
163    }
164 
165    mtx_unlock(&pool->parent->mutex);
166 
167    while (pool->free) {
168       struct slab_element_header *elt = pool->free;
169       pool->free = elt->next;
170       slab_free_orphaned(elt);
171    }
172 
173    /* Guard against use-after-free. */
174    pool->parent = NULL;
175 }
176 
177 static bool
slab_add_new_page(struct slab_child_pool * pool)178 slab_add_new_page(struct slab_child_pool *pool)
179 {
180    struct slab_page_header *page = malloc(sizeof(struct slab_page_header) +
181       pool->parent->num_elements * pool->parent->element_size);
182 
183    if (!page)
184       return false;
185 
186    for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
187       struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
188       elt->owner = (intptr_t)pool;
189       assert(!(elt->owner & 1));
190 
191       elt->next = pool->free;
192       pool->free = elt;
193       SET_MAGIC(elt, SLAB_MAGIC_FREE);
194    }
195 
196    page->u.next = pool->pages;
197    pool->pages = page;
198 
199    return true;
200 }
201 
202 /**
203  * Allocate an object from the child pool. Single-threaded (i.e. the caller
204  * must ensure that no operation happens on the same child pool in another
205  * thread).
206  */
207 void *
slab_alloc(struct slab_child_pool * pool)208 slab_alloc(struct slab_child_pool *pool)
209 {
210    struct slab_element_header *elt;
211 
212    if (!pool->free) {
213       /* First, collect elements that belong to us but were freed from a
214        * different child pool.
215        */
216       mtx_lock(&pool->parent->mutex);
217       pool->free = pool->migrated;
218       pool->migrated = NULL;
219       mtx_unlock(&pool->parent->mutex);
220 
221       /* Now allocate a new page. */
222       if (!pool->free && !slab_add_new_page(pool))
223          return NULL;
224    }
225 
226    elt = pool->free;
227    pool->free = elt->next;
228 
229    CHECK_MAGIC(elt, SLAB_MAGIC_FREE);
230    SET_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
231 
232    return &elt[1];
233 }
234 
235 /**
236  * Free an object allocated from the slab. Single-threaded (i.e. the caller
237  * must ensure that no operation happens on the same child pool in another
238  * thread).
239  *
240  * Freeing an object in a different child pool from the one where it was
241  * allocated is allowed, as long the pool belong to the same parent. No
242  * additional locking is required in this case.
243  */
slab_free(struct slab_child_pool * pool,void * ptr)244 void slab_free(struct slab_child_pool *pool, void *ptr)
245 {
246    struct slab_element_header *elt = ((struct slab_element_header*)ptr - 1);
247    intptr_t owner_int;
248 
249    CHECK_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
250    SET_MAGIC(elt, SLAB_MAGIC_FREE);
251 
252    if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
253       /* This is the simple case: The caller guarantees that we can safely
254        * access the free list.
255        */
256       elt->next = pool->free;
257       pool->free = elt;
258       return;
259    }
260 
261    /* The slow case: migration or an orphaned page. */
262    mtx_lock(&pool->parent->mutex);
263 
264    /* Note: we _must_ re-read elt->owner here because the owning child pool
265     * may have been destroyed by another thread in the meantime.
266     */
267    owner_int = p_atomic_read(&elt->owner);
268 
269    if (!(owner_int & 1)) {
270       struct slab_child_pool *owner = (struct slab_child_pool *)owner_int;
271       elt->next = owner->migrated;
272       owner->migrated = elt;
273       mtx_unlock(&pool->parent->mutex);
274    } else {
275       mtx_unlock(&pool->parent->mutex);
276 
277       slab_free_orphaned(elt);
278    }
279 }
280 
281 /**
282  * Allocate an object from the slab. Single-threaded (no mutex).
283  */
284 void *
slab_alloc_st(struct slab_mempool * pool)285 slab_alloc_st(struct slab_mempool *pool)
286 {
287    return slab_alloc(&pool->child);
288 }
289 
290 /**
291  * Free an object allocated from the slab. Single-threaded (no mutex).
292  */
293 void
slab_free_st(struct slab_mempool * pool,void * ptr)294 slab_free_st(struct slab_mempool *pool, void *ptr)
295 {
296    slab_free(&pool->child, ptr);
297 }
298 
299 void
slab_destroy(struct slab_mempool * pool)300 slab_destroy(struct slab_mempool *pool)
301 {
302    slab_destroy_child(&pool->child);
303    slab_destroy_parent(&pool->parent);
304 }
305 
306 /**
307  * Create an allocator for same-sized objects.
308  *
309  * \param item_size     Size of one object.
310  * \param num_items     Number of objects to allocate at once.
311  */
312 void
slab_create(struct slab_mempool * pool,unsigned item_size,unsigned num_items)313 slab_create(struct slab_mempool *pool,
314             unsigned item_size,
315             unsigned num_items)
316 {
317    slab_create_parent(&pool->parent, item_size, num_items);
318    slab_create_child(&pool->child, &pool->parent);
319 }
320