1 /*
2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24 #include "slab.h"
25 #include "macros.h"
26 #include "u_atomic.h"
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30
31 #define ALIGN(value, align) (((value) + (align) - 1) & ~((align) - 1))
32
33 #define SLAB_MAGIC_ALLOCATED 0xcafe4321
34 #define SLAB_MAGIC_FREE 0x7ee01234
35
36 #ifdef DEBUG
37 #define SET_MAGIC(element, value) (element)->magic = (value)
38 #define CHECK_MAGIC(element, value) assert((element)->magic == (value))
39 #else
40 #define SET_MAGIC(element, value)
41 #define CHECK_MAGIC(element, value)
42 #endif
43
44 /* One array element within a big buffer. */
45 struct slab_element_header {
46 /* The next element in the free or migrated list. */
47 struct slab_element_header *next;
48
49 /* This is either
50 * - a pointer to the child pool to which this element belongs, or
51 * - a pointer to the orphaned page of the element, with the least
52 * significant bit set to 1.
53 */
54 intptr_t owner;
55
56 #ifdef DEBUG
57 intptr_t magic;
58 #endif
59 };
60
61 /* The page is an array of allocations in one block. */
62 struct slab_page_header {
63 union {
64 /* Next page in the same child pool. */
65 struct slab_page_header *next;
66
67 /* Number of remaining, non-freed elements (for orphaned pages). */
68 unsigned num_remaining;
69 } u;
70 /* Memory after the last member is dedicated to the page itself.
71 * The allocated size is always larger than this structure.
72 */
73 };
74
75
76 static struct slab_element_header *
slab_get_element(struct slab_parent_pool * parent,struct slab_page_header * page,unsigned index)77 slab_get_element(struct slab_parent_pool *parent,
78 struct slab_page_header *page, unsigned index)
79 {
80 return (struct slab_element_header*)
81 ((uint8_t*)&page[1] + (parent->element_size * index));
82 }
83
84 /* The given object/element belongs to an orphaned page (i.e. the owning child
85 * pool has been destroyed). Mark the element as freed and free the whole page
86 * when no elements are left in it.
87 */
88 static void
slab_free_orphaned(struct slab_element_header * elt)89 slab_free_orphaned(struct slab_element_header *elt)
90 {
91 struct slab_page_header *page;
92
93 assert(elt->owner & 1);
94
95 page = (struct slab_page_header *)(elt->owner & ~(intptr_t)1);
96 if (!p_atomic_dec_return(&page->u.num_remaining))
97 free(page);
98 }
99
100 /**
101 * Create a parent pool for the allocation of same-sized objects.
102 *
103 * \param item_size Size of one object.
104 * \param num_items Number of objects to allocate at once.
105 */
106 void
slab_create_parent(struct slab_parent_pool * parent,unsigned item_size,unsigned num_items)107 slab_create_parent(struct slab_parent_pool *parent,
108 unsigned item_size,
109 unsigned num_items)
110 {
111 mtx_init(&parent->mutex, mtx_plain);
112 parent->element_size = ALIGN(sizeof(struct slab_element_header) + item_size,
113 sizeof(intptr_t));
114 parent->num_elements = num_items;
115 }
116
117 void
slab_destroy_parent(struct slab_parent_pool * parent)118 slab_destroy_parent(struct slab_parent_pool *parent)
119 {
120 mtx_destroy(&parent->mutex);
121 }
122
123 /**
124 * Create a child pool linked to the given parent.
125 */
slab_create_child(struct slab_child_pool * pool,struct slab_parent_pool * parent)126 void slab_create_child(struct slab_child_pool *pool,
127 struct slab_parent_pool *parent)
128 {
129 pool->parent = parent;
130 pool->pages = NULL;
131 pool->free = NULL;
132 pool->migrated = NULL;
133 }
134
135 /**
136 * Destroy the child pool.
137 *
138 * Pages associated to the pool will be orphaned. They are eventually freed
139 * when all objects in them are freed.
140 */
slab_destroy_child(struct slab_child_pool * pool)141 void slab_destroy_child(struct slab_child_pool *pool)
142 {
143 mtx_lock(&pool->parent->mutex);
144
145 while (pool->pages) {
146 struct slab_page_header *page = pool->pages;
147 pool->pages = page->u.next;
148 p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
149
150 for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
151 struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
152 p_atomic_set(&elt->owner, (intptr_t)page | 1);
153 }
154 }
155
156 while (pool->migrated) {
157 struct slab_element_header *elt = pool->migrated;
158 pool->migrated = elt->next;
159 slab_free_orphaned(elt);
160 }
161
162 mtx_unlock(&pool->parent->mutex);
163
164 while (pool->free) {
165 struct slab_element_header *elt = pool->free;
166 pool->free = elt->next;
167 slab_free_orphaned(elt);
168 }
169
170 /* Guard against use-after-free. */
171 pool->parent = NULL;
172 }
173
174 static bool
slab_add_new_page(struct slab_child_pool * pool)175 slab_add_new_page(struct slab_child_pool *pool)
176 {
177 struct slab_page_header *page = malloc(sizeof(struct slab_page_header) +
178 pool->parent->num_elements * pool->parent->element_size);
179
180 if (!page)
181 return false;
182
183 for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
184 struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
185 elt->owner = (intptr_t)pool;
186 assert(!(elt->owner & 1));
187
188 elt->next = pool->free;
189 pool->free = elt;
190 SET_MAGIC(elt, SLAB_MAGIC_FREE);
191 }
192
193 page->u.next = pool->pages;
194 pool->pages = page;
195
196 return true;
197 }
198
199 /**
200 * Allocate an object from the child pool. Single-threaded (i.e. the caller
201 * must ensure that no operation happens on the same child pool in another
202 * thread).
203 */
204 void *
slab_alloc(struct slab_child_pool * pool)205 slab_alloc(struct slab_child_pool *pool)
206 {
207 struct slab_element_header *elt;
208
209 if (!pool->free) {
210 /* First, collect elements that belong to us but were freed from a
211 * different child pool.
212 */
213 mtx_lock(&pool->parent->mutex);
214 pool->free = pool->migrated;
215 pool->migrated = NULL;
216 mtx_unlock(&pool->parent->mutex);
217
218 /* Now allocate a new page. */
219 if (!pool->free && !slab_add_new_page(pool))
220 return NULL;
221 }
222
223 elt = pool->free;
224 pool->free = elt->next;
225
226 CHECK_MAGIC(elt, SLAB_MAGIC_FREE);
227 SET_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
228
229 return &elt[1];
230 }
231
232 /**
233 * Free an object allocated from the slab. Single-threaded (i.e. the caller
234 * must ensure that no operation happens on the same child pool in another
235 * thread).
236 *
237 * Freeing an object in a different child pool from the one where it was
238 * allocated is allowed, as long the pool belong to the same parent. No
239 * additional locking is required in this case.
240 */
slab_free(struct slab_child_pool * pool,void * ptr)241 void slab_free(struct slab_child_pool *pool, void *ptr)
242 {
243 struct slab_element_header *elt = ((struct slab_element_header*)ptr - 1);
244 intptr_t owner_int;
245
246 CHECK_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
247 SET_MAGIC(elt, SLAB_MAGIC_FREE);
248
249 if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
250 /* This is the simple case: The caller guarantees that we can safely
251 * access the free list.
252 */
253 elt->next = pool->free;
254 pool->free = elt;
255 return;
256 }
257
258 /* The slow case: migration or an orphaned page. */
259 mtx_lock(&pool->parent->mutex);
260
261 /* Note: we _must_ re-read elt->owner here because the owning child pool
262 * may have been destroyed by another thread in the meantime.
263 */
264 owner_int = p_atomic_read(&elt->owner);
265
266 if (!(owner_int & 1)) {
267 struct slab_child_pool *owner = (struct slab_child_pool *)owner_int;
268 elt->next = owner->migrated;
269 owner->migrated = elt;
270 mtx_unlock(&pool->parent->mutex);
271 } else {
272 mtx_unlock(&pool->parent->mutex);
273
274 slab_free_orphaned(elt);
275 }
276 }
277
278 /**
279 * Allocate an object from the slab. Single-threaded (no mutex).
280 */
281 void *
slab_alloc_st(struct slab_mempool * pool)282 slab_alloc_st(struct slab_mempool *pool)
283 {
284 return slab_alloc(&pool->child);
285 }
286
287 /**
288 * Free an object allocated from the slab. Single-threaded (no mutex).
289 */
290 void
slab_free_st(struct slab_mempool * pool,void * ptr)291 slab_free_st(struct slab_mempool *pool, void *ptr)
292 {
293 slab_free(&pool->child, ptr);
294 }
295
296 void
slab_destroy(struct slab_mempool * pool)297 slab_destroy(struct slab_mempool *pool)
298 {
299 slab_destroy_child(&pool->child);
300 slab_destroy_parent(&pool->parent);
301 }
302
303 /**
304 * Create an allocator for same-sized objects.
305 *
306 * \param item_size Size of one object.
307 * \param num_items Number of objects to allocate at once.
308 */
309 void
slab_create(struct slab_mempool * pool,unsigned item_size,unsigned num_items)310 slab_create(struct slab_mempool *pool,
311 unsigned item_size,
312 unsigned num_items)
313 {
314 slab_create_parent(&pool->parent, item_size, num_items);
315 slab_create_child(&pool->child, &pool->parent);
316 }
317