• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**********************************************************
2  * Copyright 2009-2015 VMware, Inc.  All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person
5  * obtaining a copy of this software and associated documentation
6  * files (the "Software"), to deal in the Software without
7  * restriction, including without limitation the rights to use, copy,
8  * modify, merge, publish, distribute, sublicense, and/or sell copies
9  * of the Software, and to permit persons to whom the Software is
10  * furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **********************************************************/
25 
26 /**
27  * @file
28  * SVGA buffer manager for Guest Memory Regions (GMRs).
29  *
30  * GMRs are used for pixel and vertex data upload/download to/from the virtual
31  * SVGA hardware. There is a limited number of GMRs available, and
32  * creating/destroying them is also a slow operation so we must suballocate
33  * them.
34  *
35  * This file implements a pipebuffer library's buffer manager, so that we can
36  * use pipepbuffer's suballocation, fencing, and debugging facilities with GMRs.
37  *
38  * @author Jose Fonseca <jfonseca@vmware.com>
39  */
40 
41 
42 #include "svga_cmd.h"
43 
44 #include "util/u_inlines.h"
45 #include "util/u_memory.h"
46 #include "pipebuffer/pb_buffer.h"
47 #include "pipebuffer/pb_bufmgr.h"
48 
49 #include "svga_winsys.h"
50 
51 #include "vmw_screen.h"
52 #include "vmw_buffer.h"
53 
54 struct vmw_gmr_bufmgr;
55 
56 
57 struct vmw_gmr_buffer
58 {
59    struct pb_buffer base;
60 
61    struct vmw_gmr_bufmgr *mgr;
62 
63    struct vmw_region *region;
64    void *map;
65    unsigned map_flags;
66    unsigned map_count;
67 };
68 
69 
70 extern const struct pb_vtbl vmw_gmr_buffer_vtbl;
71 
72 
73 static inline struct vmw_gmr_buffer *
vmw_gmr_buffer(struct pb_buffer * buf)74 vmw_gmr_buffer(struct pb_buffer *buf)
75 {
76    assert(buf);
77    assert(buf->vtbl == &vmw_gmr_buffer_vtbl);
78    return (struct vmw_gmr_buffer *)buf;
79 }
80 
81 
82 struct vmw_gmr_bufmgr
83 {
84    struct pb_manager base;
85 
86    struct vmw_winsys_screen *vws;
87 };
88 
89 
90 static inline struct vmw_gmr_bufmgr *
vmw_gmr_bufmgr(struct pb_manager * mgr)91 vmw_gmr_bufmgr(struct pb_manager *mgr)
92 {
93    assert(mgr);
94 
95    /* Make sure our extra flags don't collide with pipebuffer's flags */
96    STATIC_ASSERT((VMW_BUFFER_USAGE_SHARED & PB_USAGE_ALL) == 0);
97    STATIC_ASSERT((VMW_BUFFER_USAGE_SYNC & PB_USAGE_ALL) == 0);
98 
99    return (struct vmw_gmr_bufmgr *)mgr;
100 }
101 
102 
103 static void
vmw_gmr_buffer_destroy(struct pb_buffer * _buf)104 vmw_gmr_buffer_destroy(struct pb_buffer *_buf)
105 {
106    struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
107 
108    assert(buf->map_count == 0);
109    if (buf->map) {
110       assert(buf->mgr->vws->cache_maps);
111       vmw_ioctl_region_unmap(buf->region);
112    }
113 
114    vmw_ioctl_region_destroy(buf->region);
115 
116    FREE(buf);
117 }
118 
119 
120 static void *
vmw_gmr_buffer_map(struct pb_buffer * _buf,enum pb_usage_flags flags,void * flush_ctx)121 vmw_gmr_buffer_map(struct pb_buffer *_buf,
122                    enum pb_usage_flags flags,
123                    void *flush_ctx)
124 {
125    struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
126    int ret;
127 
128    if (!buf->map)
129       buf->map = vmw_ioctl_region_map(buf->region);
130 
131    if (!buf->map)
132       return NULL;
133 
134    if ((_buf->usage & VMW_BUFFER_USAGE_SYNC) &&
135        !(flags & PB_USAGE_UNSYNCHRONIZED)) {
136       ret = vmw_ioctl_syncforcpu(buf->region,
137                                  !!(flags & PB_USAGE_DONTBLOCK),
138                                  !(flags & PB_USAGE_CPU_WRITE),
139                                  FALSE);
140       if (ret)
141          return NULL;
142    }
143 
144    buf->map_count++;
145    return buf->map;
146 }
147 
148 
149 static void
vmw_gmr_buffer_unmap(struct pb_buffer * _buf)150 vmw_gmr_buffer_unmap(struct pb_buffer *_buf)
151 {
152    struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
153    enum pb_usage_flags flags = buf->map_flags;
154 
155    if ((_buf->usage & VMW_BUFFER_USAGE_SYNC) &&
156        !(flags & PB_USAGE_UNSYNCHRONIZED)) {
157       vmw_ioctl_releasefromcpu(buf->region,
158                                !(flags & PB_USAGE_CPU_WRITE),
159                                FALSE);
160    }
161 
162    assert(buf->map_count > 0);
163    if (!--buf->map_count && !buf->mgr->vws->cache_maps) {
164       vmw_ioctl_region_unmap(buf->region);
165       buf->map = NULL;
166    }
167 }
168 
169 
170 static void
vmw_gmr_buffer_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,pb_size * offset)171 vmw_gmr_buffer_get_base_buffer(struct pb_buffer *buf,
172                            struct pb_buffer **base_buf,
173                            pb_size *offset)
174 {
175    *base_buf = buf;
176    *offset = 0;
177 }
178 
179 
180 static enum pipe_error
vmw_gmr_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,enum pb_usage_flags flags)181 vmw_gmr_buffer_validate( struct pb_buffer *_buf,
182                          struct pb_validate *vl,
183                          enum pb_usage_flags flags )
184 {
185    /* Always pinned */
186    return PIPE_OK;
187 }
188 
189 
190 static void
vmw_gmr_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)191 vmw_gmr_buffer_fence( struct pb_buffer *_buf,
192                       struct pipe_fence_handle *fence )
193 {
194    /* We don't need to do anything, as the pipebuffer library
195     * will take care of delaying the destruction of fenced buffers */
196 }
197 
198 
199 const struct pb_vtbl vmw_gmr_buffer_vtbl = {
200    vmw_gmr_buffer_destroy,
201    vmw_gmr_buffer_map,
202    vmw_gmr_buffer_unmap,
203    vmw_gmr_buffer_validate,
204    vmw_gmr_buffer_fence,
205    vmw_gmr_buffer_get_base_buffer
206 };
207 
208 
209 static struct pb_buffer *
vmw_gmr_bufmgr_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * pb_desc)210 vmw_gmr_bufmgr_create_buffer(struct pb_manager *_mgr,
211                          pb_size size,
212                          const struct pb_desc *pb_desc)
213 {
214    struct vmw_gmr_bufmgr *mgr = vmw_gmr_bufmgr(_mgr);
215    struct vmw_winsys_screen *vws = mgr->vws;
216    struct vmw_gmr_buffer *buf;
217    const struct vmw_buffer_desc *desc =
218       (const struct vmw_buffer_desc *) pb_desc;
219 
220    buf = CALLOC_STRUCT(vmw_gmr_buffer);
221    if(!buf)
222       goto error1;
223 
224    pipe_reference_init(&buf->base.reference, 1);
225    buf->base.alignment = pb_desc->alignment;
226    buf->base.usage = pb_desc->usage & ~VMW_BUFFER_USAGE_SHARED;
227    buf->base.vtbl = &vmw_gmr_buffer_vtbl;
228    buf->mgr = mgr;
229    buf->base.size = size;
230    if ((pb_desc->usage & VMW_BUFFER_USAGE_SHARED) && desc->region) {
231       buf->region = desc->region;
232    } else {
233       buf->region = vmw_ioctl_region_create(vws, size);
234       if(!buf->region)
235 	 goto error2;
236    }
237 
238    return &buf->base;
239 error2:
240    FREE(buf);
241 error1:
242    return NULL;
243 }
244 
245 
246 static void
vmw_gmr_bufmgr_flush(struct pb_manager * mgr)247 vmw_gmr_bufmgr_flush(struct pb_manager *mgr)
248 {
249    /* No-op */
250 }
251 
252 
253 static void
vmw_gmr_bufmgr_destroy(struct pb_manager * _mgr)254 vmw_gmr_bufmgr_destroy(struct pb_manager *_mgr)
255 {
256    struct vmw_gmr_bufmgr *mgr = vmw_gmr_bufmgr(_mgr);
257    FREE(mgr);
258 }
259 
260 
261 struct pb_manager *
vmw_gmr_bufmgr_create(struct vmw_winsys_screen * vws)262 vmw_gmr_bufmgr_create(struct vmw_winsys_screen *vws)
263 {
264    struct vmw_gmr_bufmgr *mgr;
265 
266    mgr = CALLOC_STRUCT(vmw_gmr_bufmgr);
267    if(!mgr)
268       return NULL;
269 
270    mgr->base.destroy = vmw_gmr_bufmgr_destroy;
271    mgr->base.create_buffer = vmw_gmr_bufmgr_create_buffer;
272    mgr->base.flush = vmw_gmr_bufmgr_flush;
273 
274    mgr->vws = vws;
275 
276    return &mgr->base;
277 }
278 
279 
280 boolean
vmw_gmr_bufmgr_region_ptr(struct pb_buffer * buf,struct SVGAGuestPtr * ptr)281 vmw_gmr_bufmgr_region_ptr(struct pb_buffer *buf,
282                           struct SVGAGuestPtr *ptr)
283 {
284    struct pb_buffer *base_buf;
285    pb_size offset = 0;
286    struct vmw_gmr_buffer *gmr_buf;
287 
288    pb_get_base_buffer( buf, &base_buf, &offset );
289 
290    gmr_buf = vmw_gmr_buffer(base_buf);
291    if(!gmr_buf)
292       return FALSE;
293 
294    *ptr = vmw_ioctl_region_ptr(gmr_buf->region);
295 
296    ptr->offset += offset;
297 
298    return TRUE;
299 }
300 
301 #ifdef DEBUG
302 struct svga_winsys_buffer {
303    struct pb_buffer *pb_buf;
304    struct debug_flush_buf *fbuf;
305 };
306 
307 struct pb_buffer *
vmw_pb_buffer(struct svga_winsys_buffer * buffer)308 vmw_pb_buffer(struct svga_winsys_buffer *buffer)
309 {
310    assert(buffer);
311    return buffer->pb_buf;
312 }
313 
314 struct svga_winsys_buffer *
vmw_svga_winsys_buffer_wrap(struct pb_buffer * buffer)315 vmw_svga_winsys_buffer_wrap(struct pb_buffer *buffer)
316 {
317    struct svga_winsys_buffer *buf;
318 
319    if (!buffer)
320       return NULL;
321 
322    buf = CALLOC_STRUCT(svga_winsys_buffer);
323    if (!buf) {
324       pb_reference(&buffer, NULL);
325       return NULL;
326    }
327 
328    buf->pb_buf = buffer;
329    buf->fbuf = debug_flush_buf_create(FALSE, VMW_DEBUG_FLUSH_STACK);
330    return buf;
331 }
332 
333 struct debug_flush_buf *
vmw_debug_flush_buf(struct svga_winsys_buffer * buffer)334 vmw_debug_flush_buf(struct svga_winsys_buffer *buffer)
335 {
336    return buffer->fbuf;
337 }
338 
339 #endif
340 
341 void
vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf)342 vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen *sws,
343                                struct svga_winsys_buffer *buf)
344 {
345    struct pb_buffer *pbuf = vmw_pb_buffer(buf);
346    (void)sws;
347    pb_reference(&pbuf, NULL);
348 #ifdef DEBUG
349    debug_flush_buf_reference(&buf->fbuf, NULL);
350    FREE(buf);
351 #endif
352 }
353 
354 void *
vmw_svga_winsys_buffer_map(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf,enum pipe_map_flags flags)355 vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
356                            struct svga_winsys_buffer *buf,
357                            enum pipe_map_flags flags)
358 {
359    void *map;
360 
361    (void)sws;
362    if (flags & PIPE_MAP_UNSYNCHRONIZED)
363       flags &= ~PIPE_MAP_DONTBLOCK;
364 
365    /* NOTE: we're passing PIPE_MAP_x flags instead of
366     * PB_USAGE_x flags here.  We should probably fix that.
367     */
368    STATIC_ASSERT((unsigned) PB_USAGE_CPU_READ ==
369                  (unsigned) PIPE_MAP_READ);
370    STATIC_ASSERT((unsigned) PB_USAGE_CPU_WRITE ==
371                  (unsigned) PIPE_MAP_WRITE);
372    STATIC_ASSERT((unsigned) PB_USAGE_GPU_READ ==
373                  (unsigned) PIPE_MAP_DIRECTLY);
374    STATIC_ASSERT((unsigned) PB_USAGE_DONTBLOCK ==
375                  (unsigned) PIPE_MAP_DONTBLOCK);
376    STATIC_ASSERT((unsigned) PB_USAGE_UNSYNCHRONIZED ==
377                  (unsigned) PIPE_MAP_UNSYNCHRONIZED);
378    STATIC_ASSERT((unsigned) PB_USAGE_PERSISTENT ==
379                  (unsigned) PIPE_MAP_PERSISTENT);
380 
381    map = pb_map(vmw_pb_buffer(buf), flags & PB_USAGE_ALL, NULL);
382 
383 #ifdef DEBUG
384    if (map != NULL)
385       debug_flush_map(buf->fbuf, flags);
386 #endif
387 
388    return map;
389 }
390 
391 
392 void
vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf)393 vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen *sws,
394                              struct svga_winsys_buffer *buf)
395 {
396    (void)sws;
397 
398 #ifdef DEBUG
399    debug_flush_unmap(buf->fbuf);
400 #endif
401 
402    pb_unmap(vmw_pb_buffer(buf));
403 }
404