1 /**********************************************************
2 * Copyright 2009-2023 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file
28 * SVGA buffer manager for DMA buffers.
29 *
30 * DMA buffers are used for pixel and vertex data upload/download to/from
31 * the virtual SVGA hardware.
32 *
33 * This file implements a pipebuffer library's buffer manager, so that we can
34 * use pipepbuffer's suballocation, fencing, and debugging facilities with
35 * DMA buffers.
36 *
37 * @author Jose Fonseca <jfonseca@vmware.com>
38 */
39
40
41 #include "svga_cmd.h"
42
43 #include "util/u_inlines.h"
44 #include "util/u_memory.h"
45 #include "pipebuffer/pb_buffer.h"
46 #include "pipebuffer/pb_bufmgr.h"
47
48 #include "svga_winsys.h"
49
50 #include "vmw_screen.h"
51 #include "vmw_buffer.h"
52
53 struct vmw_dma_bufmgr;
54
55
56 struct vmw_dma_buffer
57 {
58 struct pb_buffer base;
59
60 struct vmw_dma_bufmgr *mgr;
61
62 struct vmw_region *region;
63 void *map;
64 unsigned map_flags;
65 unsigned map_count;
66 };
67
68
69 extern const struct pb_vtbl vmw_dma_buffer_vtbl;
70
71
72 static inline struct vmw_dma_buffer *
vmw_pb_to_dma_buffer(struct pb_buffer * buf)73 vmw_pb_to_dma_buffer(struct pb_buffer *buf)
74 {
75 assert(buf);
76 assert(buf->vtbl == &vmw_dma_buffer_vtbl);
77 return container_of(buf, struct vmw_dma_buffer, base);
78 }
79
80
81 struct vmw_dma_bufmgr
82 {
83 struct pb_manager base;
84
85 struct vmw_winsys_screen *vws;
86 };
87
88
89 static inline struct vmw_dma_bufmgr *
vmw_pb_to_dma_bufmgr(struct pb_manager * mgr)90 vmw_pb_to_dma_bufmgr(struct pb_manager *mgr)
91 {
92 assert(mgr);
93
94 /* Make sure our extra flags don't collide with pipebuffer's flags */
95 STATIC_ASSERT((VMW_BUFFER_USAGE_SHARED & PB_USAGE_ALL) == 0);
96 STATIC_ASSERT((VMW_BUFFER_USAGE_SYNC & PB_USAGE_ALL) == 0);
97
98 return container_of(mgr, struct vmw_dma_bufmgr, base);
99 }
100
101
102 static void
vmw_dma_buffer_destroy(void * winsys,struct pb_buffer * _buf)103 vmw_dma_buffer_destroy(void *winsys, struct pb_buffer *_buf)
104 {
105 struct vmw_dma_buffer *buf = vmw_pb_to_dma_buffer(_buf);
106
107 assert(buf->map_count == 0);
108 if (buf->map) {
109 assert(buf->mgr->vws->cache_maps);
110 vmw_ioctl_region_unmap(buf->region);
111 }
112
113 vmw_ioctl_region_destroy(buf->region);
114
115 FREE(buf);
116 }
117
118
119 static void *
vmw_dma_buffer_map(struct pb_buffer * _buf,enum pb_usage_flags flags,void * flush_ctx)120 vmw_dma_buffer_map(struct pb_buffer *_buf,
121 enum pb_usage_flags flags,
122 void *flush_ctx)
123 {
124 struct vmw_dma_buffer *buf = vmw_pb_to_dma_buffer(_buf);
125 int ret;
126
127 if (!buf->map)
128 buf->map = vmw_ioctl_region_map(buf->region);
129
130 if (!buf->map)
131 return NULL;
132
133 if ((_buf->base.usage & VMW_BUFFER_USAGE_SYNC) &&
134 !(flags & PB_USAGE_UNSYNCHRONIZED)) {
135 ret = vmw_ioctl_syncforcpu(buf->region,
136 !!(flags & PB_USAGE_DONTBLOCK),
137 !(flags & PB_USAGE_CPU_WRITE),
138 false);
139 if (ret)
140 return NULL;
141 }
142
143 buf->map_count++;
144 return buf->map;
145 }
146
147
148 static void
vmw_dma_buffer_unmap(struct pb_buffer * _buf)149 vmw_dma_buffer_unmap(struct pb_buffer *_buf)
150 {
151 struct vmw_dma_buffer *buf = vmw_pb_to_dma_buffer(_buf);
152 enum pb_usage_flags flags = buf->map_flags;
153
154 if ((_buf->base.usage & VMW_BUFFER_USAGE_SYNC) &&
155 !(flags & PB_USAGE_UNSYNCHRONIZED)) {
156 vmw_ioctl_releasefromcpu(buf->region,
157 !(flags & PB_USAGE_CPU_WRITE),
158 false);
159 }
160
161 assert(buf->map_count > 0);
162 if (!--buf->map_count && !buf->mgr->vws->cache_maps) {
163 vmw_ioctl_region_unmap(buf->region);
164 buf->map = NULL;
165 }
166 }
167
168
169 static void
vmw_dma_buffer_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,pb_size * offset)170 vmw_dma_buffer_get_base_buffer(struct pb_buffer *buf,
171 struct pb_buffer **base_buf,
172 pb_size *offset)
173 {
174 *base_buf = buf;
175 *offset = 0;
176 }
177
178
179 static enum pipe_error
vmw_dma_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,enum pb_usage_flags flags)180 vmw_dma_buffer_validate( struct pb_buffer *_buf,
181 struct pb_validate *vl,
182 enum pb_usage_flags flags )
183 {
184 /* Always pinned */
185 return PIPE_OK;
186 }
187
188
189 static void
vmw_dma_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)190 vmw_dma_buffer_fence( struct pb_buffer *_buf,
191 struct pipe_fence_handle *fence )
192 {
193 /* We don't need to do anything, as the pipebuffer library
194 * will take care of delaying the destruction of fenced buffers */
195 }
196
197
198 const struct pb_vtbl vmw_dma_buffer_vtbl = {
199 .destroy = vmw_dma_buffer_destroy,
200 .map = vmw_dma_buffer_map,
201 .unmap = vmw_dma_buffer_unmap,
202 .validate = vmw_dma_buffer_validate,
203 .fence = vmw_dma_buffer_fence,
204 .get_base_buffer = vmw_dma_buffer_get_base_buffer
205 };
206
207
208 static struct pb_buffer *
vmw_dma_bufmgr_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * pb_desc)209 vmw_dma_bufmgr_create_buffer(struct pb_manager *_mgr,
210 pb_size size,
211 const struct pb_desc *pb_desc)
212 {
213 struct vmw_dma_bufmgr *mgr = vmw_pb_to_dma_bufmgr(_mgr);
214 struct vmw_winsys_screen *vws = mgr->vws;
215 struct vmw_dma_buffer *buf;
216 const struct vmw_buffer_desc *desc =
217 (const struct vmw_buffer_desc *) pb_desc;
218
219 buf = CALLOC_STRUCT(vmw_dma_buffer);
220 if(!buf)
221 goto error1;
222
223 pipe_reference_init(&buf->base.base.reference, 1);
224 buf->base.base.alignment_log2 = util_logbase2(pb_desc->alignment);
225 buf->base.base.usage = pb_desc->usage & ~VMW_BUFFER_USAGE_SHARED;
226 buf->base.vtbl = &vmw_dma_buffer_vtbl;
227 buf->mgr = mgr;
228 buf->base.base.size = size;
229 if ((pb_desc->usage & VMW_BUFFER_USAGE_SHARED) && desc->region) {
230 buf->region = desc->region;
231 } else {
232 buf->region = vmw_ioctl_region_create(vws, size);
233 if(!buf->region)
234 goto error2;
235 }
236
237 return &buf->base;
238 error2:
239 FREE(buf);
240 error1:
241 return NULL;
242 }
243
244
245 static void
vmw_dma_bufmgr_flush(struct pb_manager * mgr)246 vmw_dma_bufmgr_flush(struct pb_manager *mgr)
247 {
248 /* No-op */
249 }
250
251
252 static void
vmw_dma_bufmgr_destroy(struct pb_manager * _mgr)253 vmw_dma_bufmgr_destroy(struct pb_manager *_mgr)
254 {
255 struct vmw_dma_bufmgr *mgr = vmw_pb_to_dma_bufmgr(_mgr);
256 FREE(mgr);
257 }
258
259
260 struct pb_manager *
vmw_dma_bufmgr_create(struct vmw_winsys_screen * vws)261 vmw_dma_bufmgr_create(struct vmw_winsys_screen *vws)
262 {
263 struct vmw_dma_bufmgr *mgr;
264
265 mgr = CALLOC_STRUCT(vmw_dma_bufmgr);
266 if(!mgr)
267 return NULL;
268
269 mgr->base.destroy = vmw_dma_bufmgr_destroy;
270 mgr->base.create_buffer = vmw_dma_bufmgr_create_buffer;
271 mgr->base.flush = vmw_dma_bufmgr_flush;
272
273 mgr->vws = vws;
274
275 return &mgr->base;
276 }
277
278
279 bool
vmw_dma_bufmgr_region_ptr(struct pb_buffer * buf,struct SVGAGuestPtr * ptr)280 vmw_dma_bufmgr_region_ptr(struct pb_buffer *buf,
281 struct SVGAGuestPtr *ptr)
282 {
283 struct pb_buffer *base_buf;
284 pb_size offset = 0;
285 struct vmw_dma_buffer *dma_buf;
286
287 pb_get_base_buffer( buf, &base_buf, &offset );
288
289 dma_buf = vmw_pb_to_dma_buffer(base_buf);
290 if(!dma_buf)
291 return false;
292
293 *ptr = vmw_ioctl_region_ptr(dma_buf->region);
294
295 ptr->offset += offset;
296
297 return true;
298 }
299
300 #ifdef DEBUG
301 struct svga_winsys_buffer {
302 struct pb_buffer *pb_buf;
303 struct debug_flush_buf *fbuf;
304 };
305
306 struct pb_buffer *
vmw_pb_buffer(struct svga_winsys_buffer * buffer)307 vmw_pb_buffer(struct svga_winsys_buffer *buffer)
308 {
309 assert(buffer);
310 return buffer->pb_buf;
311 }
312
313 struct svga_winsys_buffer *
vmw_svga_winsys_buffer_wrap(struct pb_buffer * buffer)314 vmw_svga_winsys_buffer_wrap(struct pb_buffer *buffer)
315 {
316 struct svga_winsys_buffer *buf;
317
318 if (!buffer)
319 return NULL;
320
321 buf = CALLOC_STRUCT(svga_winsys_buffer);
322 if (!buf) {
323 pb_reference(&buffer, NULL);
324 return NULL;
325 }
326
327 buf->pb_buf = buffer;
328 buf->fbuf = debug_flush_buf_create(false, VMW_DEBUG_FLUSH_STACK);
329 return buf;
330 }
331
332 struct debug_flush_buf *
vmw_debug_flush_buf(struct svga_winsys_buffer * buffer)333 vmw_debug_flush_buf(struct svga_winsys_buffer *buffer)
334 {
335 return buffer->fbuf;
336 }
337
338 #endif
339
340 void
vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf)341 vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen *sws,
342 struct svga_winsys_buffer *buf)
343 {
344 struct pb_buffer *pbuf = vmw_pb_buffer(buf);
345 (void)sws;
346 pb_reference(&pbuf, NULL);
347 #ifdef DEBUG
348 debug_flush_buf_reference(&buf->fbuf, NULL);
349 FREE(buf);
350 #endif
351 }
352
353 void *
vmw_svga_winsys_buffer_map(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf,enum pipe_map_flags flags)354 vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
355 struct svga_winsys_buffer *buf,
356 enum pipe_map_flags flags)
357 {
358 void *map;
359 enum pb_usage_flags pb_flags = 0;
360
361 (void)sws;
362 if (flags & PIPE_MAP_UNSYNCHRONIZED)
363 flags &= ~PIPE_MAP_DONTBLOCK;
364
365 if (flags & PIPE_MAP_READ)
366 pb_flags |= PB_USAGE_CPU_READ;
367 if (flags & PIPE_MAP_WRITE)
368 pb_flags |= PB_USAGE_CPU_WRITE;
369 if (flags & PIPE_MAP_DIRECTLY)
370 pb_flags |= PB_USAGE_GPU_READ;
371 if (flags & PIPE_MAP_DONTBLOCK)
372 pb_flags |= PB_USAGE_DONTBLOCK;
373 if (flags & PIPE_MAP_UNSYNCHRONIZED)
374 pb_flags |= PB_USAGE_UNSYNCHRONIZED;
375 if (flags & PIPE_MAP_PERSISTENT)
376 pb_flags |= PB_USAGE_PERSISTENT;
377
378 map = pb_map(vmw_pb_buffer(buf), pb_flags, NULL);
379
380 #ifdef DEBUG
381 if (map != NULL)
382 debug_flush_map(buf->fbuf, pb_flags);
383 #endif
384
385 return map;
386 }
387
388
389 void
vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf)390 vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen *sws,
391 struct svga_winsys_buffer *buf)
392 {
393 (void)sws;
394
395 #ifdef DEBUG
396 debug_flush_unmap(buf->fbuf);
397 #endif
398
399 pb_unmap(vmw_pb_buffer(buf));
400 }
401