• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2006 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * \file
30  * Buffer manager using the old texture memory manager.
31  *
32  * \author Jose Fonseca <jfonseca@vmware.com>
33  */
34 
35 
36 #include "pipe/p_defines.h"
37 #include "util/u_debug.h"
38 #include "os/os_thread.h"
39 #include "util/u_memory.h"
40 #include "util/list.h"
41 #include "util/u_mm.h"
42 #include "pb_buffer.h"
43 #include "pb_bufmgr.h"
44 
45 
46 /**
47  * Convenience macro (type safe).
48  */
49 #define SUPER(__derived) (&(__derived)->base)
50 
51 
52 struct mm_pb_manager
53 {
54    struct pb_manager base;
55 
56    pipe_mutex mutex;
57 
58    pb_size size;
59    struct mem_block *heap;
60 
61    pb_size align2;
62 
63    struct pb_buffer *buffer;
64    void *map;
65 };
66 
67 
68 static inline struct mm_pb_manager *
mm_pb_manager(struct pb_manager * mgr)69 mm_pb_manager(struct pb_manager *mgr)
70 {
71    assert(mgr);
72    return (struct mm_pb_manager *)mgr;
73 }
74 
75 
76 struct mm_buffer
77 {
78    struct pb_buffer base;
79 
80    struct mm_pb_manager *mgr;
81 
82    struct mem_block *block;
83 };
84 
85 
86 static inline struct mm_buffer *
mm_buffer(struct pb_buffer * buf)87 mm_buffer(struct pb_buffer *buf)
88 {
89    assert(buf);
90    return (struct mm_buffer *)buf;
91 }
92 
93 
94 static void
mm_buffer_destroy(struct pb_buffer * buf)95 mm_buffer_destroy(struct pb_buffer *buf)
96 {
97    struct mm_buffer *mm_buf = mm_buffer(buf);
98    struct mm_pb_manager *mm = mm_buf->mgr;
99 
100    assert(!pipe_is_referenced(&mm_buf->base.reference));
101 
102    pipe_mutex_lock(mm->mutex);
103    u_mmFreeMem(mm_buf->block);
104    FREE(mm_buf);
105    pipe_mutex_unlock(mm->mutex);
106 }
107 
108 
109 static void *
mm_buffer_map(struct pb_buffer * buf,unsigned flags,void * flush_ctx)110 mm_buffer_map(struct pb_buffer *buf,
111               unsigned flags,
112               void *flush_ctx)
113 {
114    struct mm_buffer *mm_buf = mm_buffer(buf);
115    struct mm_pb_manager *mm = mm_buf->mgr;
116 
117    /* XXX: it will be necessary to remap here to propagate flush_ctx */
118 
119    return (unsigned char *) mm->map + mm_buf->block->ofs;
120 }
121 
122 
123 static void
mm_buffer_unmap(struct pb_buffer * buf)124 mm_buffer_unmap(struct pb_buffer *buf)
125 {
126    /* No-op */
127 }
128 
129 
130 static enum pipe_error
mm_buffer_validate(struct pb_buffer * buf,struct pb_validate * vl,unsigned flags)131 mm_buffer_validate(struct pb_buffer *buf,
132                    struct pb_validate *vl,
133                    unsigned flags)
134 {
135    struct mm_buffer *mm_buf = mm_buffer(buf);
136    struct mm_pb_manager *mm = mm_buf->mgr;
137    return pb_validate(mm->buffer, vl, flags);
138 }
139 
140 
141 static void
mm_buffer_fence(struct pb_buffer * buf,struct pipe_fence_handle * fence)142 mm_buffer_fence(struct pb_buffer *buf,
143                 struct pipe_fence_handle *fence)
144 {
145    struct mm_buffer *mm_buf = mm_buffer(buf);
146    struct mm_pb_manager *mm = mm_buf->mgr;
147    pb_fence(mm->buffer, fence);
148 }
149 
150 
151 static void
mm_buffer_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,pb_size * offset)152 mm_buffer_get_base_buffer(struct pb_buffer *buf,
153                           struct pb_buffer **base_buf,
154                           pb_size *offset)
155 {
156    struct mm_buffer *mm_buf = mm_buffer(buf);
157    struct mm_pb_manager *mm = mm_buf->mgr;
158    pb_get_base_buffer(mm->buffer, base_buf, offset);
159    *offset += mm_buf->block->ofs;
160 }
161 
162 
163 static const struct pb_vtbl
164 mm_buffer_vtbl = {
165       mm_buffer_destroy,
166       mm_buffer_map,
167       mm_buffer_unmap,
168       mm_buffer_validate,
169       mm_buffer_fence,
170       mm_buffer_get_base_buffer
171 };
172 
173 
174 static struct pb_buffer *
mm_bufmgr_create_buffer(struct pb_manager * mgr,pb_size size,const struct pb_desc * desc)175 mm_bufmgr_create_buffer(struct pb_manager *mgr,
176                         pb_size size,
177                         const struct pb_desc *desc)
178 {
179    struct mm_pb_manager *mm = mm_pb_manager(mgr);
180    struct mm_buffer *mm_buf;
181 
182    /* We don't handle alignments larger then the one initially setup */
183    assert(pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2));
184    if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
185       return NULL;
186 
187    pipe_mutex_lock(mm->mutex);
188 
189    mm_buf = CALLOC_STRUCT(mm_buffer);
190    if (!mm_buf) {
191       pipe_mutex_unlock(mm->mutex);
192       return NULL;
193    }
194 
195    pipe_reference_init(&mm_buf->base.reference, 1);
196    mm_buf->base.alignment = desc->alignment;
197    mm_buf->base.usage = desc->usage;
198    mm_buf->base.size = size;
199 
200    mm_buf->base.vtbl = &mm_buffer_vtbl;
201 
202    mm_buf->mgr = mm;
203 
204    mm_buf->block = u_mmAllocMem(mm->heap, (int)size, (int)mm->align2, 0);
205    if(!mm_buf->block) {
206 #if 0
207       debug_printf("warning: heap full\n");
208       mmDumpMemInfo(mm->heap);
209 #endif
210       FREE(mm_buf);
211       pipe_mutex_unlock(mm->mutex);
212       return NULL;
213    }
214 
215    /* Some sanity checks */
216    assert(0 <= (pb_size)mm_buf->block->ofs && (pb_size)mm_buf->block->ofs < mm->size);
217    assert(size <= (pb_size)mm_buf->block->size && (pb_size)mm_buf->block->ofs + (pb_size)mm_buf->block->size <= mm->size);
218 
219    pipe_mutex_unlock(mm->mutex);
220    return SUPER(mm_buf);
221 }
222 
223 
224 static void
mm_bufmgr_flush(struct pb_manager * mgr)225 mm_bufmgr_flush(struct pb_manager *mgr)
226 {
227    /* No-op */
228 }
229 
230 
231 static void
mm_bufmgr_destroy(struct pb_manager * mgr)232 mm_bufmgr_destroy(struct pb_manager *mgr)
233 {
234    struct mm_pb_manager *mm = mm_pb_manager(mgr);
235 
236    pipe_mutex_lock(mm->mutex);
237 
238    u_mmDestroy(mm->heap);
239 
240    pb_unmap(mm->buffer);
241    pb_reference(&mm->buffer, NULL);
242 
243    pipe_mutex_unlock(mm->mutex);
244 
245    FREE(mgr);
246 }
247 
248 
249 struct pb_manager *
mm_bufmgr_create_from_buffer(struct pb_buffer * buffer,pb_size size,pb_size align2)250 mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
251                              pb_size size, pb_size align2)
252 {
253    struct mm_pb_manager *mm;
254 
255    if (!buffer)
256       return NULL;
257 
258    mm = CALLOC_STRUCT(mm_pb_manager);
259    if (!mm)
260       return NULL;
261 
262    mm->base.destroy = mm_bufmgr_destroy;
263    mm->base.create_buffer = mm_bufmgr_create_buffer;
264    mm->base.flush = mm_bufmgr_flush;
265 
266    mm->size = size;
267    mm->align2 = align2; /* 64-byte alignment */
268 
269    pipe_mutex_init(mm->mutex);
270 
271    mm->buffer = buffer;
272 
273    mm->map = pb_map(mm->buffer,
274 		    PB_USAGE_CPU_READ |
275 		    PB_USAGE_CPU_WRITE, NULL);
276    if(!mm->map)
277       goto failure;
278 
279    mm->heap = u_mmInit(0, (int)size);
280    if (!mm->heap)
281       goto failure;
282 
283    return SUPER(mm);
284 
285 failure:
286    if(mm->heap)
287       u_mmDestroy(mm->heap);
288    if(mm->map)
289       pb_unmap(mm->buffer);
290    FREE(mm);
291    return NULL;
292 }
293 
294 
295 struct pb_manager *
mm_bufmgr_create(struct pb_manager * provider,pb_size size,pb_size align2)296 mm_bufmgr_create(struct pb_manager *provider,
297                  pb_size size, pb_size align2)
298 {
299    struct pb_buffer *buffer;
300    struct pb_manager *mgr;
301    struct pb_desc desc;
302 
303    if (!provider)
304       return NULL;
305 
306    memset(&desc, 0, sizeof(desc));
307    desc.alignment = 1 << align2;
308 
309    buffer = provider->create_buffer(provider, size, &desc);
310    if (!buffer)
311       return NULL;
312 
313    mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
314    if (!mgr) {
315       pb_reference(&buffer, NULL);
316       return NULL;
317    }
318 
319   return mgr;
320 }
321