• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2006-2008 VMware, Inc., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, FREE of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  *
27  **************************************************************************/
28 
29 /**
30  * @file
31  * S-lab pool implementation.
32  *
33  * @sa http://en.wikipedia.org/wiki/Slab_allocation
34  *
35  * @author Thomas Hellstrom <thellstom-at-vmware-dot-com>
36  * @author Jose Fonseca <jfonseca@vmware.com>
37  */
38 
39 #include "pipe/p_compiler.h"
40 #include "util/u_debug.h"
41 #include "os/os_thread.h"
42 #include "pipe/p_defines.h"
43 #include "util/u_memory.h"
44 #include "util/list.h"
45 #include "util/u_time.h"
46 
47 #include "pb_buffer.h"
48 #include "pb_bufmgr.h"
49 
50 
51 struct pb_slab;
52 
53 
54 /**
55  * Buffer in a slab.
56  *
57  * Sub-allocation of a contiguous buffer.
58  */
59 struct pb_slab_buffer
60 {
61    struct pb_buffer base;
62 
63    struct pb_slab *slab;
64 
65    struct list_head head;
66 
67    unsigned mapCount;
68 
69    /** Offset relative to the start of the slab buffer. */
70    pb_size start;
71 
72    /** Use when validating, to signal that all mappings are finished */
73    /* TODO: Actually validation does not reach this stage yet */
74    pipe_condvar event;
75 };
76 
77 
78 /**
79  * Slab -- a contiguous piece of memory.
80  */
81 struct pb_slab
82 {
83    struct list_head head;
84    struct list_head freeBuffers;
85    pb_size numBuffers;
86    pb_size numFree;
87 
88    struct pb_slab_buffer *buffers;
89    struct pb_slab_manager *mgr;
90 
91    /** Buffer from the provider */
92    struct pb_buffer *bo;
93 
94    void *virtual;
95 };
96 
97 
98 /**
99  * It adds/removes slabs as needed in order to meet the allocation/destruction
100  * of individual buffers.
101  */
102 struct pb_slab_manager
103 {
104    struct pb_manager base;
105 
106    /** From where we get our buffers */
107    struct pb_manager *provider;
108 
109    /** Size of the buffers we hand on downstream */
110    pb_size bufSize;
111 
112    /** Size of the buffers we request upstream */
113    pb_size slabSize;
114 
115    /**
116     * Alignment, usage to be used to allocate the slab buffers.
117     *
118     * We can only provide buffers which are consistent (in alignment, usage)
119     * with this description.
120     */
121    struct pb_desc desc;
122 
123    /**
124     * Partial slabs
125     *
126     * Full slabs are not stored in any list. Empty slabs are destroyed
127     * immediatly.
128     */
129    struct list_head slabs;
130 
131    pipe_mutex mutex;
132 };
133 
134 
135 /**
136  * Wrapper around several slabs, therefore capable of handling buffers of
137  * multiple sizes.
138  *
139  * This buffer manager just dispatches buffer allocations to the appropriate slab
140  * manager, according to the requested buffer size, or by passes the slab
141  * managers altogether for even greater sizes.
142  *
143  * The data of this structure remains constant after
144  * initialization and thus needs no mutex protection.
145  */
146 struct pb_slab_range_manager
147 {
148    struct pb_manager base;
149 
150    struct pb_manager *provider;
151 
152    pb_size minBufSize;
153    pb_size maxBufSize;
154 
155    /** @sa pb_slab_manager::desc */
156    struct pb_desc desc;
157 
158    unsigned numBuckets;
159    pb_size *bucketSizes;
160 
161    /** Array of pb_slab_manager, one for each bucket size */
162    struct pb_manager **buckets;
163 };
164 
165 
166 static inline struct pb_slab_buffer *
pb_slab_buffer(struct pb_buffer * buf)167 pb_slab_buffer(struct pb_buffer *buf)
168 {
169    assert(buf);
170    return (struct pb_slab_buffer *)buf;
171 }
172 
173 
174 static inline struct pb_slab_manager *
pb_slab_manager(struct pb_manager * mgr)175 pb_slab_manager(struct pb_manager *mgr)
176 {
177    assert(mgr);
178    return (struct pb_slab_manager *)mgr;
179 }
180 
181 
182 static inline struct pb_slab_range_manager *
pb_slab_range_manager(struct pb_manager * mgr)183 pb_slab_range_manager(struct pb_manager *mgr)
184 {
185    assert(mgr);
186    return (struct pb_slab_range_manager *)mgr;
187 }
188 
189 
190 /**
191  * Delete a buffer from the slab delayed list and put
192  * it on the slab FREE list.
193  */
194 static void
pb_slab_buffer_destroy(struct pb_buffer * _buf)195 pb_slab_buffer_destroy(struct pb_buffer *_buf)
196 {
197    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
198    struct pb_slab *slab = buf->slab;
199    struct pb_slab_manager *mgr = slab->mgr;
200    struct list_head *list = &buf->head;
201 
202    pipe_mutex_lock(mgr->mutex);
203 
204    assert(!pipe_is_referenced(&buf->base.reference));
205 
206    buf->mapCount = 0;
207 
208    LIST_DEL(list);
209    LIST_ADDTAIL(list, &slab->freeBuffers);
210    slab->numFree++;
211 
212    if (slab->head.next == &slab->head)
213       LIST_ADDTAIL(&slab->head, &mgr->slabs);
214 
215    /* If the slab becomes totally empty, free it */
216    if (slab->numFree == slab->numBuffers) {
217       list = &slab->head;
218       LIST_DELINIT(list);
219       pb_reference(&slab->bo, NULL);
220       FREE(slab->buffers);
221       FREE(slab);
222    }
223 
224    pipe_mutex_unlock(mgr->mutex);
225 }
226 
227 
228 static void *
pb_slab_buffer_map(struct pb_buffer * _buf,unsigned flags,void * flush_ctx)229 pb_slab_buffer_map(struct pb_buffer *_buf,
230                    unsigned flags,
231                    void *flush_ctx)
232 {
233    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
234 
235    /* XXX: it will be necessary to remap here to propagate flush_ctx */
236 
237    ++buf->mapCount;
238    return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
239 }
240 
241 
242 static void
pb_slab_buffer_unmap(struct pb_buffer * _buf)243 pb_slab_buffer_unmap(struct pb_buffer *_buf)
244 {
245    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
246 
247    --buf->mapCount;
248    if (buf->mapCount == 0)
249        pipe_condvar_broadcast(buf->event);
250 }
251 
252 
253 static enum pipe_error
pb_slab_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,unsigned flags)254 pb_slab_buffer_validate(struct pb_buffer *_buf,
255                          struct pb_validate *vl,
256                          unsigned flags)
257 {
258    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
259    return pb_validate(buf->slab->bo, vl, flags);
260 }
261 
262 
263 static void
pb_slab_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)264 pb_slab_buffer_fence(struct pb_buffer *_buf,
265                       struct pipe_fence_handle *fence)
266 {
267    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
268    pb_fence(buf->slab->bo, fence);
269 }
270 
271 
272 static void
pb_slab_buffer_get_base_buffer(struct pb_buffer * _buf,struct pb_buffer ** base_buf,pb_size * offset)273 pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
274                                struct pb_buffer **base_buf,
275                                pb_size *offset)
276 {
277    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
278    pb_get_base_buffer(buf->slab->bo, base_buf, offset);
279    *offset += buf->start;
280 }
281 
282 
283 static const struct pb_vtbl
284 pb_slab_buffer_vtbl = {
285       pb_slab_buffer_destroy,
286       pb_slab_buffer_map,
287       pb_slab_buffer_unmap,
288       pb_slab_buffer_validate,
289       pb_slab_buffer_fence,
290       pb_slab_buffer_get_base_buffer
291 };
292 
293 
294 /**
295  * Create a new slab.
296  *
297  * Called when we ran out of free slabs.
298  */
299 static enum pipe_error
pb_slab_create(struct pb_slab_manager * mgr)300 pb_slab_create(struct pb_slab_manager *mgr)
301 {
302    struct pb_slab *slab;
303    struct pb_slab_buffer *buf;
304    unsigned numBuffers;
305    unsigned i;
306    enum pipe_error ret;
307 
308    slab = CALLOC_STRUCT(pb_slab);
309    if (!slab)
310       return PIPE_ERROR_OUT_OF_MEMORY;
311 
312    slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
313    if(!slab->bo) {
314       ret = PIPE_ERROR_OUT_OF_MEMORY;
315       goto out_err0;
316    }
317 
318    /* Note down the slab virtual address. All mappings are accessed directly
319     * through this address so it is required that the buffer is pinned. */
320    slab->virtual = pb_map(slab->bo,
321                           PB_USAGE_CPU_READ |
322                           PB_USAGE_CPU_WRITE, NULL);
323    if(!slab->virtual) {
324       ret = PIPE_ERROR_OUT_OF_MEMORY;
325       goto out_err1;
326    }
327    pb_unmap(slab->bo);
328 
329    numBuffers = slab->bo->size / mgr->bufSize;
330 
331    slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
332    if (!slab->buffers) {
333       ret = PIPE_ERROR_OUT_OF_MEMORY;
334       goto out_err1;
335    }
336 
337    LIST_INITHEAD(&slab->head);
338    LIST_INITHEAD(&slab->freeBuffers);
339    slab->numBuffers = numBuffers;
340    slab->numFree = 0;
341    slab->mgr = mgr;
342 
343    buf = slab->buffers;
344    for (i=0; i < numBuffers; ++i) {
345       pipe_reference_init(&buf->base.reference, 0);
346       buf->base.size = mgr->bufSize;
347       buf->base.alignment = 0;
348       buf->base.usage = 0;
349       buf->base.vtbl = &pb_slab_buffer_vtbl;
350       buf->slab = slab;
351       buf->start = i* mgr->bufSize;
352       buf->mapCount = 0;
353       pipe_condvar_init(buf->event);
354       LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
355       slab->numFree++;
356       buf++;
357    }
358 
359    /* Add this slab to the list of partial slabs */
360    LIST_ADDTAIL(&slab->head, &mgr->slabs);
361 
362    return PIPE_OK;
363 
364 out_err1:
365    pb_reference(&slab->bo, NULL);
366 out_err0:
367    FREE(slab);
368    return ret;
369 }
370 
371 
372 static struct pb_buffer *
pb_slab_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)373 pb_slab_manager_create_buffer(struct pb_manager *_mgr,
374                               pb_size size,
375                               const struct pb_desc *desc)
376 {
377    struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
378    static struct pb_slab_buffer *buf;
379    struct pb_slab *slab;
380    struct list_head *list;
381 
382    /* check size */
383    assert(size <= mgr->bufSize);
384    if(size > mgr->bufSize)
385       return NULL;
386 
387    /* check if we can provide the requested alignment */
388    assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
389    if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
390       return NULL;
391    assert(pb_check_alignment(desc->alignment, mgr->bufSize));
392    if(!pb_check_alignment(desc->alignment, mgr->bufSize))
393       return NULL;
394 
395    assert(pb_check_usage(desc->usage, mgr->desc.usage));
396    if(!pb_check_usage(desc->usage, mgr->desc.usage))
397       return NULL;
398 
399    pipe_mutex_lock(mgr->mutex);
400 
401    /* Create a new slab, if we run out of partial slabs */
402    if (mgr->slabs.next == &mgr->slabs) {
403       (void) pb_slab_create(mgr);
404       if (mgr->slabs.next == &mgr->slabs) {
405 	 pipe_mutex_unlock(mgr->mutex);
406 	 return NULL;
407       }
408    }
409 
410    /* Allocate the buffer from a partial (or just created) slab */
411    list = mgr->slabs.next;
412    slab = LIST_ENTRY(struct pb_slab, list, head);
413 
414    /* If totally full remove from the partial slab list */
415    if (--slab->numFree == 0)
416       LIST_DELINIT(list);
417 
418    list = slab->freeBuffers.next;
419    LIST_DELINIT(list);
420 
421    pipe_mutex_unlock(mgr->mutex);
422    buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
423 
424    pipe_reference_init(&buf->base.reference, 1);
425    buf->base.alignment = desc->alignment;
426    buf->base.usage = desc->usage;
427 
428    return &buf->base;
429 }
430 
431 
432 static void
pb_slab_manager_flush(struct pb_manager * _mgr)433 pb_slab_manager_flush(struct pb_manager *_mgr)
434 {
435    struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
436 
437    assert(mgr->provider->flush);
438    if(mgr->provider->flush)
439       mgr->provider->flush(mgr->provider);
440 }
441 
442 
443 static void
pb_slab_manager_destroy(struct pb_manager * _mgr)444 pb_slab_manager_destroy(struct pb_manager *_mgr)
445 {
446    struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
447 
448    /* TODO: cleanup all allocated buffers */
449    FREE(mgr);
450 }
451 
452 
453 struct pb_manager *
pb_slab_manager_create(struct pb_manager * provider,pb_size bufSize,pb_size slabSize,const struct pb_desc * desc)454 pb_slab_manager_create(struct pb_manager *provider,
455                        pb_size bufSize,
456                        pb_size slabSize,
457                        const struct pb_desc *desc)
458 {
459    struct pb_slab_manager *mgr;
460 
461    mgr = CALLOC_STRUCT(pb_slab_manager);
462    if (!mgr)
463       return NULL;
464 
465    mgr->base.destroy = pb_slab_manager_destroy;
466    mgr->base.create_buffer = pb_slab_manager_create_buffer;
467    mgr->base.flush = pb_slab_manager_flush;
468 
469    mgr->provider = provider;
470    mgr->bufSize = bufSize;
471    mgr->slabSize = slabSize;
472    mgr->desc = *desc;
473 
474    LIST_INITHEAD(&mgr->slabs);
475 
476    pipe_mutex_init(mgr->mutex);
477 
478    return &mgr->base;
479 }
480 
481 
482 static struct pb_buffer *
pb_slab_range_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)483 pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
484                                     pb_size size,
485                                     const struct pb_desc *desc)
486 {
487    struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
488    pb_size bufSize;
489    pb_size reqSize = size;
490    unsigned i;
491 
492    if(desc->alignment > reqSize)
493 	   reqSize = desc->alignment;
494 
495    bufSize = mgr->minBufSize;
496    for (i = 0; i < mgr->numBuckets; ++i) {
497       if(bufSize >= reqSize)
498 	 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
499       bufSize *= 2;
500    }
501 
502    /* Fall back to allocate a buffer object directly from the provider. */
503    return mgr->provider->create_buffer(mgr->provider, size, desc);
504 }
505 
506 
507 static void
pb_slab_range_manager_flush(struct pb_manager * _mgr)508 pb_slab_range_manager_flush(struct pb_manager *_mgr)
509 {
510    struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
511 
512    /* Individual slabs don't hold any temporary buffers so no need to call them */
513 
514    assert(mgr->provider->flush);
515    if(mgr->provider->flush)
516       mgr->provider->flush(mgr->provider);
517 }
518 
519 
520 static void
pb_slab_range_manager_destroy(struct pb_manager * _mgr)521 pb_slab_range_manager_destroy(struct pb_manager *_mgr)
522 {
523    struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
524    unsigned i;
525 
526    for (i = 0; i < mgr->numBuckets; ++i)
527       mgr->buckets[i]->destroy(mgr->buckets[i]);
528    FREE(mgr->buckets);
529    FREE(mgr->bucketSizes);
530    FREE(mgr);
531 }
532 
533 
534 struct pb_manager *
pb_slab_range_manager_create(struct pb_manager * provider,pb_size minBufSize,pb_size maxBufSize,pb_size slabSize,const struct pb_desc * desc)535 pb_slab_range_manager_create(struct pb_manager *provider,
536                              pb_size minBufSize,
537                              pb_size maxBufSize,
538                              pb_size slabSize,
539                              const struct pb_desc *desc)
540 {
541    struct pb_slab_range_manager *mgr;
542    pb_size bufSize;
543    unsigned i;
544 
545    if (!provider)
546       return NULL;
547 
548    mgr = CALLOC_STRUCT(pb_slab_range_manager);
549    if (!mgr)
550       goto out_err0;
551 
552    mgr->base.destroy = pb_slab_range_manager_destroy;
553    mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
554    mgr->base.flush = pb_slab_range_manager_flush;
555 
556    mgr->provider = provider;
557    mgr->minBufSize = minBufSize;
558    mgr->maxBufSize = maxBufSize;
559 
560    mgr->numBuckets = 1;
561    bufSize = minBufSize;
562    while(bufSize < maxBufSize) {
563       bufSize *= 2;
564       ++mgr->numBuckets;
565    }
566 
567    mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
568    if (!mgr->buckets)
569       goto out_err1;
570 
571    bufSize = minBufSize;
572    for (i = 0; i < mgr->numBuckets; ++i) {
573       mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
574       if(!mgr->buckets[i])
575 	 goto out_err2;
576       bufSize *= 2;
577    }
578 
579    return &mgr->base;
580 
581 out_err2:
582    for (i = 0; i < mgr->numBuckets; ++i)
583       if(mgr->buckets[i])
584 	    mgr->buckets[i]->destroy(mgr->buckets[i]);
585    FREE(mgr->buckets);
586 out_err1:
587    FREE(mgr);
588 out_err0:
589    return NULL;
590 }
591