1 /**************************************************************************
2 *
3 * Copyright 2006-2008 VMware, Inc., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * S-lab pool implementation.
32 *
33 * @sa http://en.wikipedia.org/wiki/Slab_allocation
34 *
35 * @author Thomas Hellstrom <thellstrom-at-vmware-dot-com>
36 * @author Jose Fonseca <jfonseca@vmware.com>
37 */
38
39 #include "pipe/p_compiler.h"
40 #include "util/u_debug.h"
41 #include "os/os_thread.h"
42 #include "pipe/p_defines.h"
43 #include "util/u_memory.h"
44 #include "util/list.h"
45
46 #include "pb_buffer.h"
47 #include "pb_bufmgr.h"
48
49
50 struct pb_slab;
51
52
53 /**
54 * Buffer in a slab.
55 *
56 * Sub-allocation of a contiguous buffer.
57 */
58 struct pb_slab_buffer
59 {
60 struct pb_buffer base;
61
62 struct pb_slab *slab;
63
64 struct list_head head;
65
66 unsigned mapCount;
67
68 /** Offset relative to the start of the slab buffer. */
69 pb_size start;
70 };
71
72
73 /**
74 * Slab -- a contiguous piece of memory.
75 */
76 struct pb_slab
77 {
78 struct list_head head;
79 struct list_head freeBuffers;
80 pb_size numBuffers;
81 pb_size numFree;
82
83 struct pb_slab_buffer *buffers;
84 struct pb_slab_manager *mgr;
85
86 /** Buffer from the provider */
87 struct pb_buffer *bo;
88
89 void *virtual;
90 };
91
92
93 /**
94 * It adds/removes slabs as needed in order to meet the allocation/destruction
95 * of individual buffers.
96 */
97 struct pb_slab_manager
98 {
99 struct pb_manager base;
100
101 /** From where we get our buffers */
102 struct pb_manager *provider;
103
104 /** Size of the buffers we hand on downstream */
105 pb_size bufSize;
106
107 /** Size of the buffers we request upstream */
108 pb_size slabSize;
109
110 /**
111 * Alignment, usage to be used to allocate the slab buffers.
112 *
113 * We can only provide buffers which are consistent (in alignment, usage)
114 * with this description.
115 */
116 struct pb_desc desc;
117
118 /**
119 * Partial slabs
120 *
121 * Full slabs are not stored in any list. Empty slabs are destroyed
122 * immediatly.
123 */
124 struct list_head slabs;
125
126 mtx_t mutex;
127 };
128
129
130 /**
131 * Wrapper around several slabs, therefore capable of handling buffers of
132 * multiple sizes.
133 *
134 * This buffer manager just dispatches buffer allocations to the appropriate slab
135 * manager, according to the requested buffer size, or by passes the slab
136 * managers altogether for even greater sizes.
137 *
138 * The data of this structure remains constant after
139 * initialization and thus needs no mutex protection.
140 */
141 struct pb_slab_range_manager
142 {
143 struct pb_manager base;
144
145 struct pb_manager *provider;
146
147 pb_size minBufSize;
148 pb_size maxBufSize;
149
150 /** @sa pb_slab_manager::desc */
151 struct pb_desc desc;
152
153 unsigned numBuckets;
154 pb_size *bucketSizes;
155
156 /** Array of pb_slab_manager, one for each bucket size */
157 struct pb_manager **buckets;
158 };
159
160
161 static inline struct pb_slab_buffer *
pb_slab_buffer(struct pb_buffer * buf)162 pb_slab_buffer(struct pb_buffer *buf)
163 {
164 assert(buf);
165 return (struct pb_slab_buffer *)buf;
166 }
167
168
169 static inline struct pb_slab_manager *
pb_slab_manager(struct pb_manager * mgr)170 pb_slab_manager(struct pb_manager *mgr)
171 {
172 assert(mgr);
173 return (struct pb_slab_manager *)mgr;
174 }
175
176
177 static inline struct pb_slab_range_manager *
pb_slab_range_manager(struct pb_manager * mgr)178 pb_slab_range_manager(struct pb_manager *mgr)
179 {
180 assert(mgr);
181 return (struct pb_slab_range_manager *)mgr;
182 }
183
184
185 /**
186 * Delete a buffer from the slab delayed list and put
187 * it on the slab FREE list.
188 */
189 static void
pb_slab_buffer_destroy(struct pb_buffer * _buf)190 pb_slab_buffer_destroy(struct pb_buffer *_buf)
191 {
192 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
193 struct pb_slab *slab = buf->slab;
194 struct pb_slab_manager *mgr = slab->mgr;
195 struct list_head *list = &buf->head;
196
197 mtx_lock(&mgr->mutex);
198
199 assert(!pipe_is_referenced(&buf->base.reference));
200
201 buf->mapCount = 0;
202
203 list_del(list);
204 list_addtail(list, &slab->freeBuffers);
205 slab->numFree++;
206
207 if (slab->head.next == &slab->head)
208 list_addtail(&slab->head, &mgr->slabs);
209
210 /* If the slab becomes totally empty, free it */
211 if (slab->numFree == slab->numBuffers) {
212 list = &slab->head;
213 list_delinit(list);
214 pb_unmap(slab->bo);
215 pb_reference(&slab->bo, NULL);
216 FREE(slab->buffers);
217 FREE(slab);
218 }
219
220 mtx_unlock(&mgr->mutex);
221 }
222
223
224 static void *
pb_slab_buffer_map(struct pb_buffer * _buf,enum pb_usage_flags flags,void * flush_ctx)225 pb_slab_buffer_map(struct pb_buffer *_buf,
226 enum pb_usage_flags flags,
227 void *flush_ctx)
228 {
229 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
230
231 /* XXX: it will be necessary to remap here to propagate flush_ctx */
232
233 ++buf->mapCount;
234 return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
235 }
236
237
238 static void
pb_slab_buffer_unmap(struct pb_buffer * _buf)239 pb_slab_buffer_unmap(struct pb_buffer *_buf)
240 {
241 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
242
243 --buf->mapCount;
244 }
245
246
247 static enum pipe_error
pb_slab_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,enum pb_usage_flags flags)248 pb_slab_buffer_validate(struct pb_buffer *_buf,
249 struct pb_validate *vl,
250 enum pb_usage_flags flags)
251 {
252 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
253 return pb_validate(buf->slab->bo, vl, flags);
254 }
255
256
257 static void
pb_slab_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)258 pb_slab_buffer_fence(struct pb_buffer *_buf,
259 struct pipe_fence_handle *fence)
260 {
261 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
262 pb_fence(buf->slab->bo, fence);
263 }
264
265
266 static void
pb_slab_buffer_get_base_buffer(struct pb_buffer * _buf,struct pb_buffer ** base_buf,pb_size * offset)267 pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
268 struct pb_buffer **base_buf,
269 pb_size *offset)
270 {
271 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
272 pb_get_base_buffer(buf->slab->bo, base_buf, offset);
273 *offset += buf->start;
274 }
275
276
277 static const struct pb_vtbl
278 pb_slab_buffer_vtbl = {
279 pb_slab_buffer_destroy,
280 pb_slab_buffer_map,
281 pb_slab_buffer_unmap,
282 pb_slab_buffer_validate,
283 pb_slab_buffer_fence,
284 pb_slab_buffer_get_base_buffer
285 };
286
287
288 /**
289 * Create a new slab.
290 *
291 * Called when we ran out of free slabs.
292 */
293 static enum pipe_error
pb_slab_create(struct pb_slab_manager * mgr)294 pb_slab_create(struct pb_slab_manager *mgr)
295 {
296 struct pb_slab *slab;
297 struct pb_slab_buffer *buf;
298 unsigned numBuffers;
299 unsigned i;
300 enum pipe_error ret;
301
302 slab = CALLOC_STRUCT(pb_slab);
303 if (!slab)
304 return PIPE_ERROR_OUT_OF_MEMORY;
305
306 slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
307 if(!slab->bo) {
308 ret = PIPE_ERROR_OUT_OF_MEMORY;
309 goto out_err0;
310 }
311
312 /* Note down the slab virtual address. All mappings are accessed directly
313 * through this address so it is required that the buffer is mapped
314 * persistent */
315 slab->virtual = pb_map(slab->bo,
316 PB_USAGE_CPU_READ |
317 PB_USAGE_CPU_WRITE |
318 PB_USAGE_PERSISTENT, NULL);
319 if(!slab->virtual) {
320 ret = PIPE_ERROR_OUT_OF_MEMORY;
321 goto out_err1;
322 }
323
324 numBuffers = slab->bo->size / mgr->bufSize;
325
326 slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
327 if (!slab->buffers) {
328 ret = PIPE_ERROR_OUT_OF_MEMORY;
329 goto out_err1;
330 }
331
332 list_inithead(&slab->head);
333 list_inithead(&slab->freeBuffers);
334 slab->numBuffers = numBuffers;
335 slab->numFree = 0;
336 slab->mgr = mgr;
337
338 buf = slab->buffers;
339 for (i=0; i < numBuffers; ++i) {
340 pipe_reference_init(&buf->base.reference, 0);
341 buf->base.size = mgr->bufSize;
342 buf->base.alignment = 0;
343 buf->base.usage = 0;
344 buf->base.vtbl = &pb_slab_buffer_vtbl;
345 buf->slab = slab;
346 buf->start = i* mgr->bufSize;
347 buf->mapCount = 0;
348 list_addtail(&buf->head, &slab->freeBuffers);
349 slab->numFree++;
350 buf++;
351 }
352
353 /* Add this slab to the list of partial slabs */
354 list_addtail(&slab->head, &mgr->slabs);
355
356 return PIPE_OK;
357
358 out_err1:
359 pb_reference(&slab->bo, NULL);
360 out_err0:
361 FREE(slab);
362 return ret;
363 }
364
365
366 static struct pb_buffer *
pb_slab_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)367 pb_slab_manager_create_buffer(struct pb_manager *_mgr,
368 pb_size size,
369 const struct pb_desc *desc)
370 {
371 struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
372 static struct pb_slab_buffer *buf;
373 struct pb_slab *slab;
374 struct list_head *list;
375
376 /* check size */
377 assert(size <= mgr->bufSize);
378 if(size > mgr->bufSize)
379 return NULL;
380
381 /* check if we can provide the requested alignment */
382 assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
383 if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
384 return NULL;
385 assert(pb_check_alignment(desc->alignment, mgr->bufSize));
386 if(!pb_check_alignment(desc->alignment, mgr->bufSize))
387 return NULL;
388
389 assert(pb_check_usage(desc->usage, mgr->desc.usage));
390 if(!pb_check_usage(desc->usage, mgr->desc.usage))
391 return NULL;
392
393 mtx_lock(&mgr->mutex);
394
395 /* Create a new slab, if we run out of partial slabs */
396 if (mgr->slabs.next == &mgr->slabs) {
397 (void) pb_slab_create(mgr);
398 if (mgr->slabs.next == &mgr->slabs) {
399 mtx_unlock(&mgr->mutex);
400 return NULL;
401 }
402 }
403
404 /* Allocate the buffer from a partial (or just created) slab */
405 list = mgr->slabs.next;
406 slab = LIST_ENTRY(struct pb_slab, list, head);
407
408 /* If totally full remove from the partial slab list */
409 if (--slab->numFree == 0)
410 list_delinit(list);
411
412 list = slab->freeBuffers.next;
413 list_delinit(list);
414
415 mtx_unlock(&mgr->mutex);
416 buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
417
418 pipe_reference_init(&buf->base.reference, 1);
419 buf->base.alignment = desc->alignment;
420 buf->base.usage = desc->usage;
421
422 return &buf->base;
423 }
424
425
426 static void
pb_slab_manager_flush(struct pb_manager * _mgr)427 pb_slab_manager_flush(struct pb_manager *_mgr)
428 {
429 struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
430
431 assert(mgr->provider->flush);
432 if(mgr->provider->flush)
433 mgr->provider->flush(mgr->provider);
434 }
435
436
437 static void
pb_slab_manager_destroy(struct pb_manager * _mgr)438 pb_slab_manager_destroy(struct pb_manager *_mgr)
439 {
440 struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
441
442 /* TODO: cleanup all allocated buffers */
443 FREE(mgr);
444 }
445
446
447 struct pb_manager *
pb_slab_manager_create(struct pb_manager * provider,pb_size bufSize,pb_size slabSize,const struct pb_desc * desc)448 pb_slab_manager_create(struct pb_manager *provider,
449 pb_size bufSize,
450 pb_size slabSize,
451 const struct pb_desc *desc)
452 {
453 struct pb_slab_manager *mgr;
454
455 mgr = CALLOC_STRUCT(pb_slab_manager);
456 if (!mgr)
457 return NULL;
458
459 mgr->base.destroy = pb_slab_manager_destroy;
460 mgr->base.create_buffer = pb_slab_manager_create_buffer;
461 mgr->base.flush = pb_slab_manager_flush;
462
463 mgr->provider = provider;
464 mgr->bufSize = bufSize;
465 mgr->slabSize = slabSize;
466 mgr->desc = *desc;
467
468 list_inithead(&mgr->slabs);
469
470 (void) mtx_init(&mgr->mutex, mtx_plain);
471
472 return &mgr->base;
473 }
474
475
476 static struct pb_buffer *
pb_slab_range_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)477 pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
478 pb_size size,
479 const struct pb_desc *desc)
480 {
481 struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
482 pb_size bufSize;
483 pb_size reqSize = size;
484 enum pb_usage_flags i;
485
486 if(desc->alignment > reqSize)
487 reqSize = desc->alignment;
488
489 bufSize = mgr->minBufSize;
490 for (i = 0; i < mgr->numBuckets; ++i) {
491 if(bufSize >= reqSize)
492 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
493 bufSize *= 2;
494 }
495
496 /* Fall back to allocate a buffer object directly from the provider. */
497 return mgr->provider->create_buffer(mgr->provider, size, desc);
498 }
499
500
501 static void
pb_slab_range_manager_flush(struct pb_manager * _mgr)502 pb_slab_range_manager_flush(struct pb_manager *_mgr)
503 {
504 struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
505
506 /* Individual slabs don't hold any temporary buffers so no need to call them */
507
508 assert(mgr->provider->flush);
509 if(mgr->provider->flush)
510 mgr->provider->flush(mgr->provider);
511 }
512
513
514 static void
pb_slab_range_manager_destroy(struct pb_manager * _mgr)515 pb_slab_range_manager_destroy(struct pb_manager *_mgr)
516 {
517 struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
518 unsigned i;
519
520 for (i = 0; i < mgr->numBuckets; ++i)
521 mgr->buckets[i]->destroy(mgr->buckets[i]);
522 FREE(mgr->buckets);
523 FREE(mgr->bucketSizes);
524 FREE(mgr);
525 }
526
527
528 struct pb_manager *
pb_slab_range_manager_create(struct pb_manager * provider,pb_size minBufSize,pb_size maxBufSize,pb_size slabSize,const struct pb_desc * desc)529 pb_slab_range_manager_create(struct pb_manager *provider,
530 pb_size minBufSize,
531 pb_size maxBufSize,
532 pb_size slabSize,
533 const struct pb_desc *desc)
534 {
535 struct pb_slab_range_manager *mgr;
536 pb_size bufSize;
537 unsigned i;
538
539 if (!provider)
540 return NULL;
541
542 mgr = CALLOC_STRUCT(pb_slab_range_manager);
543 if (!mgr)
544 goto out_err0;
545
546 mgr->base.destroy = pb_slab_range_manager_destroy;
547 mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
548 mgr->base.flush = pb_slab_range_manager_flush;
549
550 mgr->provider = provider;
551 mgr->minBufSize = minBufSize;
552 mgr->maxBufSize = maxBufSize;
553
554 mgr->numBuckets = 1;
555 bufSize = minBufSize;
556 while(bufSize < maxBufSize) {
557 bufSize *= 2;
558 ++mgr->numBuckets;
559 }
560
561 mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
562 if (!mgr->buckets)
563 goto out_err1;
564
565 bufSize = minBufSize;
566 for (i = 0; i < mgr->numBuckets; ++i) {
567 mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
568 if(!mgr->buckets[i])
569 goto out_err2;
570 bufSize *= 2;
571 }
572
573 return &mgr->base;
574
575 out_err2:
576 for (i = 0; i < mgr->numBuckets; ++i)
577 if(mgr->buckets[i])
578 mgr->buckets[i]->destroy(mgr->buckets[i]);
579 FREE(mgr->buckets);
580 out_err1:
581 FREE(mgr);
582 out_err0:
583 return NULL;
584 }
585