• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2006-2008 VMware, Inc., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, FREE of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  *
27  **************************************************************************/
28 
29 /**
30  * @file
31  * S-lab pool implementation.
32  *
33  * @sa http://en.wikipedia.org/wiki/Slab_allocation
34  *
35  * @author Thomas Hellstrom <thellstrom-at-vmware-dot-com>
36  * @author Jose Fonseca <jfonseca@vmware.com>
37  */
38 
39 #include "pipe/p_compiler.h"
40 #include "util/u_debug.h"
41 #include "os/os_thread.h"
42 #include "pipe/p_defines.h"
43 #include "util/u_memory.h"
44 #include "util/list.h"
45 
46 #include "pb_buffer.h"
47 #include "pb_bufmgr.h"
48 
49 
50 struct pb_slab;
51 
52 
53 /**
54  * Buffer in a slab.
55  *
56  * Sub-allocation of a contiguous buffer.
57  */
58 struct pb_slab_buffer
59 {
60    struct pb_buffer base;
61 
62    struct pb_slab *slab;
63 
64    struct list_head head;
65 
66    unsigned mapCount;
67 
68    /** Offset relative to the start of the slab buffer. */
69    pb_size start;
70 
71    /** Use when validating, to signal that all mappings are finished */
72    /* TODO: Actually validation does not reach this stage yet */
73    cnd_t event;
74 };
75 
76 
77 /**
78  * Slab -- a contiguous piece of memory.
79  */
80 struct pb_slab
81 {
82    struct list_head head;
83    struct list_head freeBuffers;
84    pb_size numBuffers;
85    pb_size numFree;
86 
87    struct pb_slab_buffer *buffers;
88    struct pb_slab_manager *mgr;
89 
90    /** Buffer from the provider */
91    struct pb_buffer *bo;
92 
93    void *virtual;
94 };
95 
96 
97 /**
98  * It adds/removes slabs as needed in order to meet the allocation/destruction
99  * of individual buffers.
100  */
101 struct pb_slab_manager
102 {
103    struct pb_manager base;
104 
105    /** From where we get our buffers */
106    struct pb_manager *provider;
107 
108    /** Size of the buffers we hand on downstream */
109    pb_size bufSize;
110 
111    /** Size of the buffers we request upstream */
112    pb_size slabSize;
113 
114    /**
115     * Alignment, usage to be used to allocate the slab buffers.
116     *
117     * We can only provide buffers which are consistent (in alignment, usage)
118     * with this description.
119     */
120    struct pb_desc desc;
121 
122    /**
123     * Partial slabs
124     *
125     * Full slabs are not stored in any list. Empty slabs are destroyed
126     * immediatly.
127     */
128    struct list_head slabs;
129 
130    mtx_t mutex;
131 };
132 
133 
134 /**
135  * Wrapper around several slabs, therefore capable of handling buffers of
136  * multiple sizes.
137  *
138  * This buffer manager just dispatches buffer allocations to the appropriate slab
139  * manager, according to the requested buffer size, or by passes the slab
140  * managers altogether for even greater sizes.
141  *
142  * The data of this structure remains constant after
143  * initialization and thus needs no mutex protection.
144  */
145 struct pb_slab_range_manager
146 {
147    struct pb_manager base;
148 
149    struct pb_manager *provider;
150 
151    pb_size minBufSize;
152    pb_size maxBufSize;
153 
154    /** @sa pb_slab_manager::desc */
155    struct pb_desc desc;
156 
157    unsigned numBuckets;
158    pb_size *bucketSizes;
159 
160    /** Array of pb_slab_manager, one for each bucket size */
161    struct pb_manager **buckets;
162 };
163 
164 
165 static inline struct pb_slab_buffer *
pb_slab_buffer(struct pb_buffer * buf)166 pb_slab_buffer(struct pb_buffer *buf)
167 {
168    assert(buf);
169    return (struct pb_slab_buffer *)buf;
170 }
171 
172 
173 static inline struct pb_slab_manager *
pb_slab_manager(struct pb_manager * mgr)174 pb_slab_manager(struct pb_manager *mgr)
175 {
176    assert(mgr);
177    return (struct pb_slab_manager *)mgr;
178 }
179 
180 
181 static inline struct pb_slab_range_manager *
pb_slab_range_manager(struct pb_manager * mgr)182 pb_slab_range_manager(struct pb_manager *mgr)
183 {
184    assert(mgr);
185    return (struct pb_slab_range_manager *)mgr;
186 }
187 
188 
189 /**
190  * Delete a buffer from the slab delayed list and put
191  * it on the slab FREE list.
192  */
193 static void
pb_slab_buffer_destroy(struct pb_buffer * _buf)194 pb_slab_buffer_destroy(struct pb_buffer *_buf)
195 {
196    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
197    struct pb_slab *slab = buf->slab;
198    struct pb_slab_manager *mgr = slab->mgr;
199    struct list_head *list = &buf->head;
200 
201    mtx_lock(&mgr->mutex);
202 
203    assert(!pipe_is_referenced(&buf->base.reference));
204 
205    buf->mapCount = 0;
206 
207    LIST_DEL(list);
208    LIST_ADDTAIL(list, &slab->freeBuffers);
209    slab->numFree++;
210 
211    if (slab->head.next == &slab->head)
212       LIST_ADDTAIL(&slab->head, &mgr->slabs);
213 
214    /* If the slab becomes totally empty, free it */
215    if (slab->numFree == slab->numBuffers) {
216       list = &slab->head;
217       LIST_DELINIT(list);
218       pb_reference(&slab->bo, NULL);
219       FREE(slab->buffers);
220       FREE(slab);
221    }
222 
223    mtx_unlock(&mgr->mutex);
224 }
225 
226 
227 static void *
pb_slab_buffer_map(struct pb_buffer * _buf,unsigned flags,void * flush_ctx)228 pb_slab_buffer_map(struct pb_buffer *_buf,
229                    unsigned flags,
230                    void *flush_ctx)
231 {
232    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
233 
234    /* XXX: it will be necessary to remap here to propagate flush_ctx */
235 
236    ++buf->mapCount;
237    return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
238 }
239 
240 
241 static void
pb_slab_buffer_unmap(struct pb_buffer * _buf)242 pb_slab_buffer_unmap(struct pb_buffer *_buf)
243 {
244    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
245 
246    --buf->mapCount;
247    if (buf->mapCount == 0)
248        cnd_broadcast(&buf->event);
249 }
250 
251 
252 static enum pipe_error
pb_slab_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,unsigned flags)253 pb_slab_buffer_validate(struct pb_buffer *_buf,
254                          struct pb_validate *vl,
255                          unsigned flags)
256 {
257    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
258    return pb_validate(buf->slab->bo, vl, flags);
259 }
260 
261 
262 static void
pb_slab_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)263 pb_slab_buffer_fence(struct pb_buffer *_buf,
264                       struct pipe_fence_handle *fence)
265 {
266    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
267    pb_fence(buf->slab->bo, fence);
268 }
269 
270 
271 static void
pb_slab_buffer_get_base_buffer(struct pb_buffer * _buf,struct pb_buffer ** base_buf,pb_size * offset)272 pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
273                                struct pb_buffer **base_buf,
274                                pb_size *offset)
275 {
276    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
277    pb_get_base_buffer(buf->slab->bo, base_buf, offset);
278    *offset += buf->start;
279 }
280 
281 
282 static const struct pb_vtbl
283 pb_slab_buffer_vtbl = {
284       pb_slab_buffer_destroy,
285       pb_slab_buffer_map,
286       pb_slab_buffer_unmap,
287       pb_slab_buffer_validate,
288       pb_slab_buffer_fence,
289       pb_slab_buffer_get_base_buffer
290 };
291 
292 
293 /**
294  * Create a new slab.
295  *
296  * Called when we ran out of free slabs.
297  */
298 static enum pipe_error
pb_slab_create(struct pb_slab_manager * mgr)299 pb_slab_create(struct pb_slab_manager *mgr)
300 {
301    struct pb_slab *slab;
302    struct pb_slab_buffer *buf;
303    unsigned numBuffers;
304    unsigned i;
305    enum pipe_error ret;
306 
307    slab = CALLOC_STRUCT(pb_slab);
308    if (!slab)
309       return PIPE_ERROR_OUT_OF_MEMORY;
310 
311    slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
312    if(!slab->bo) {
313       ret = PIPE_ERROR_OUT_OF_MEMORY;
314       goto out_err0;
315    }
316 
317    /* Note down the slab virtual address. All mappings are accessed directly
318     * through this address so it is required that the buffer is pinned. */
319    slab->virtual = pb_map(slab->bo,
320                           PB_USAGE_CPU_READ |
321                           PB_USAGE_CPU_WRITE, NULL);
322    if(!slab->virtual) {
323       ret = PIPE_ERROR_OUT_OF_MEMORY;
324       goto out_err1;
325    }
326    pb_unmap(slab->bo);
327 
328    numBuffers = slab->bo->size / mgr->bufSize;
329 
330    slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
331    if (!slab->buffers) {
332       ret = PIPE_ERROR_OUT_OF_MEMORY;
333       goto out_err1;
334    }
335 
336    LIST_INITHEAD(&slab->head);
337    LIST_INITHEAD(&slab->freeBuffers);
338    slab->numBuffers = numBuffers;
339    slab->numFree = 0;
340    slab->mgr = mgr;
341 
342    buf = slab->buffers;
343    for (i=0; i < numBuffers; ++i) {
344       pipe_reference_init(&buf->base.reference, 0);
345       buf->base.size = mgr->bufSize;
346       buf->base.alignment = 0;
347       buf->base.usage = 0;
348       buf->base.vtbl = &pb_slab_buffer_vtbl;
349       buf->slab = slab;
350       buf->start = i* mgr->bufSize;
351       buf->mapCount = 0;
352       cnd_init(&buf->event);
353       LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
354       slab->numFree++;
355       buf++;
356    }
357 
358    /* Add this slab to the list of partial slabs */
359    LIST_ADDTAIL(&slab->head, &mgr->slabs);
360 
361    return PIPE_OK;
362 
363 out_err1:
364    pb_reference(&slab->bo, NULL);
365 out_err0:
366    FREE(slab);
367    return ret;
368 }
369 
370 
371 static struct pb_buffer *
pb_slab_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)372 pb_slab_manager_create_buffer(struct pb_manager *_mgr,
373                               pb_size size,
374                               const struct pb_desc *desc)
375 {
376    struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
377    static struct pb_slab_buffer *buf;
378    struct pb_slab *slab;
379    struct list_head *list;
380 
381    /* check size */
382    assert(size <= mgr->bufSize);
383    if(size > mgr->bufSize)
384       return NULL;
385 
386    /* check if we can provide the requested alignment */
387    assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
388    if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
389       return NULL;
390    assert(pb_check_alignment(desc->alignment, mgr->bufSize));
391    if(!pb_check_alignment(desc->alignment, mgr->bufSize))
392       return NULL;
393 
394    assert(pb_check_usage(desc->usage, mgr->desc.usage));
395    if(!pb_check_usage(desc->usage, mgr->desc.usage))
396       return NULL;
397 
398    mtx_lock(&mgr->mutex);
399 
400    /* Create a new slab, if we run out of partial slabs */
401    if (mgr->slabs.next == &mgr->slabs) {
402       (void) pb_slab_create(mgr);
403       if (mgr->slabs.next == &mgr->slabs) {
404 	 mtx_unlock(&mgr->mutex);
405 	 return NULL;
406       }
407    }
408 
409    /* Allocate the buffer from a partial (or just created) slab */
410    list = mgr->slabs.next;
411    slab = LIST_ENTRY(struct pb_slab, list, head);
412 
413    /* If totally full remove from the partial slab list */
414    if (--slab->numFree == 0)
415       LIST_DELINIT(list);
416 
417    list = slab->freeBuffers.next;
418    LIST_DELINIT(list);
419 
420    mtx_unlock(&mgr->mutex);
421    buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
422 
423    pipe_reference_init(&buf->base.reference, 1);
424    buf->base.alignment = desc->alignment;
425    buf->base.usage = desc->usage;
426 
427    return &buf->base;
428 }
429 
430 
431 static void
pb_slab_manager_flush(struct pb_manager * _mgr)432 pb_slab_manager_flush(struct pb_manager *_mgr)
433 {
434    struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
435 
436    assert(mgr->provider->flush);
437    if(mgr->provider->flush)
438       mgr->provider->flush(mgr->provider);
439 }
440 
441 
442 static void
pb_slab_manager_destroy(struct pb_manager * _mgr)443 pb_slab_manager_destroy(struct pb_manager *_mgr)
444 {
445    struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
446 
447    /* TODO: cleanup all allocated buffers */
448    FREE(mgr);
449 }
450 
451 
452 struct pb_manager *
pb_slab_manager_create(struct pb_manager * provider,pb_size bufSize,pb_size slabSize,const struct pb_desc * desc)453 pb_slab_manager_create(struct pb_manager *provider,
454                        pb_size bufSize,
455                        pb_size slabSize,
456                        const struct pb_desc *desc)
457 {
458    struct pb_slab_manager *mgr;
459 
460    mgr = CALLOC_STRUCT(pb_slab_manager);
461    if (!mgr)
462       return NULL;
463 
464    mgr->base.destroy = pb_slab_manager_destroy;
465    mgr->base.create_buffer = pb_slab_manager_create_buffer;
466    mgr->base.flush = pb_slab_manager_flush;
467 
468    mgr->provider = provider;
469    mgr->bufSize = bufSize;
470    mgr->slabSize = slabSize;
471    mgr->desc = *desc;
472 
473    LIST_INITHEAD(&mgr->slabs);
474 
475    (void) mtx_init(&mgr->mutex, mtx_plain);
476 
477    return &mgr->base;
478 }
479 
480 
481 static struct pb_buffer *
pb_slab_range_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)482 pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
483                                     pb_size size,
484                                     const struct pb_desc *desc)
485 {
486    struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
487    pb_size bufSize;
488    pb_size reqSize = size;
489    unsigned i;
490 
491    if(desc->alignment > reqSize)
492 	   reqSize = desc->alignment;
493 
494    bufSize = mgr->minBufSize;
495    for (i = 0; i < mgr->numBuckets; ++i) {
496       if(bufSize >= reqSize)
497 	 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
498       bufSize *= 2;
499    }
500 
501    /* Fall back to allocate a buffer object directly from the provider. */
502    return mgr->provider->create_buffer(mgr->provider, size, desc);
503 }
504 
505 
506 static void
pb_slab_range_manager_flush(struct pb_manager * _mgr)507 pb_slab_range_manager_flush(struct pb_manager *_mgr)
508 {
509    struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
510 
511    /* Individual slabs don't hold any temporary buffers so no need to call them */
512 
513    assert(mgr->provider->flush);
514    if(mgr->provider->flush)
515       mgr->provider->flush(mgr->provider);
516 }
517 
518 
519 static void
pb_slab_range_manager_destroy(struct pb_manager * _mgr)520 pb_slab_range_manager_destroy(struct pb_manager *_mgr)
521 {
522    struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
523    unsigned i;
524 
525    for (i = 0; i < mgr->numBuckets; ++i)
526       mgr->buckets[i]->destroy(mgr->buckets[i]);
527    FREE(mgr->buckets);
528    FREE(mgr->bucketSizes);
529    FREE(mgr);
530 }
531 
532 
533 struct pb_manager *
pb_slab_range_manager_create(struct pb_manager * provider,pb_size minBufSize,pb_size maxBufSize,pb_size slabSize,const struct pb_desc * desc)534 pb_slab_range_manager_create(struct pb_manager *provider,
535                              pb_size minBufSize,
536                              pb_size maxBufSize,
537                              pb_size slabSize,
538                              const struct pb_desc *desc)
539 {
540    struct pb_slab_range_manager *mgr;
541    pb_size bufSize;
542    unsigned i;
543 
544    if (!provider)
545       return NULL;
546 
547    mgr = CALLOC_STRUCT(pb_slab_range_manager);
548    if (!mgr)
549       goto out_err0;
550 
551    mgr->base.destroy = pb_slab_range_manager_destroy;
552    mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
553    mgr->base.flush = pb_slab_range_manager_flush;
554 
555    mgr->provider = provider;
556    mgr->minBufSize = minBufSize;
557    mgr->maxBufSize = maxBufSize;
558 
559    mgr->numBuckets = 1;
560    bufSize = minBufSize;
561    while(bufSize < maxBufSize) {
562       bufSize *= 2;
563       ++mgr->numBuckets;
564    }
565 
566    mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
567    if (!mgr->buckets)
568       goto out_err1;
569 
570    bufSize = minBufSize;
571    for (i = 0; i < mgr->numBuckets; ++i) {
572       mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
573       if(!mgr->buckets[i])
574 	 goto out_err2;
575       bufSize *= 2;
576    }
577 
578    return &mgr->base;
579 
580 out_err2:
581    for (i = 0; i < mgr->numBuckets; ++i)
582       if(mgr->buckets[i])
583 	    mgr->buckets[i]->destroy(mgr->buckets[i]);
584    FREE(mgr->buckets);
585 out_err1:
586    FREE(mgr);
587 out_err0:
588    return NULL;
589 }
590