1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "os/os_thread.h"
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "util/u_math.h"
32 #include "util/u_memory.h"
33
34 #include "svga_cmd.h"
35 #include "svga_context.h"
36 #include "svga_debug.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_screen.h"
40 #include "svga_winsys.h"
41
42 /**
43 * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
44 *
45 */
46 struct svga_3d_update_gb_image {
47 SVGA3dCmdHeader header;
48 SVGA3dCmdUpdateGBImage body;
49 };
50
51 struct svga_3d_invalidate_gb_image {
52 SVGA3dCmdHeader header;
53 SVGA3dCmdInvalidateGBImage body;
54 };
55
56
57 /**
58 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
59 *
60 * It will flush and retry in case the first attempt to create a DMA buffer
61 * fails, so it should not be called from any function involved in flushing
62 * to avoid recursion.
63 */
64 struct svga_winsys_buffer *
svga_winsys_buffer_create(struct svga_context * svga,unsigned alignment,unsigned usage,unsigned size)65 svga_winsys_buffer_create( struct svga_context *svga,
66 unsigned alignment,
67 unsigned usage,
68 unsigned size )
69 {
70 struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
71 struct svga_winsys_screen *sws = svgascreen->sws;
72 struct svga_winsys_buffer *buf;
73
74 /* Just try */
75 buf = SVGA_TRY_PTR(sws->buffer_create(sws, alignment, usage, size));
76 if (!buf) {
77 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
78 size);
79
80 /* Try flushing all pending DMAs */
81 svga_retry_enter(svga);
82 svga_context_flush(svga, NULL);
83 buf = sws->buffer_create(sws, alignment, usage, size);
84 svga_retry_exit(svga);
85 }
86
87 return buf;
88 }
89
90
91 /**
92 * Destroy HW storage if separate from the host surface.
93 * In the GB case, the HW storage is associated with the host surface
94 * and is therefore a No-op.
95 */
96 void
svga_buffer_destroy_hw_storage(struct svga_screen * ss,struct svga_buffer * sbuf)97 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
98 {
99 struct svga_winsys_screen *sws = ss->sws;
100
101 assert(sbuf->map.count == 0);
102 assert(sbuf->hwbuf);
103 if (sbuf->hwbuf) {
104 sws->buffer_destroy(sws, sbuf->hwbuf);
105 sbuf->hwbuf = NULL;
106 }
107 }
108
109
110
111 /**
112 * Allocate DMA'ble or Updatable storage for the buffer.
113 *
114 * Called before mapping a buffer.
115 */
116 enum pipe_error
svga_buffer_create_hw_storage(struct svga_screen * ss,struct svga_buffer * sbuf,unsigned bind_flags)117 svga_buffer_create_hw_storage(struct svga_screen *ss,
118 struct svga_buffer *sbuf,
119 unsigned bind_flags)
120 {
121 assert(!sbuf->user);
122
123 if (ss->sws->have_gb_objects) {
124 assert(sbuf->handle || !sbuf->dma.pending);
125 return svga_buffer_create_host_surface(ss, sbuf, bind_flags);
126 }
127 if (!sbuf->hwbuf) {
128 struct svga_winsys_screen *sws = ss->sws;
129 unsigned alignment = 16;
130 unsigned usage = 0;
131 unsigned size = sbuf->b.b.width0;
132
133 sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
134 if (!sbuf->hwbuf)
135 return PIPE_ERROR_OUT_OF_MEMORY;
136
137 assert(!sbuf->dma.pending);
138 }
139
140 return PIPE_OK;
141 }
142
143
144 /**
145 * Allocate graphics memory for vertex/index/constant/etc buffer (not
146 * textures).
147 */
148 enum pipe_error
svga_buffer_create_host_surface(struct svga_screen * ss,struct svga_buffer * sbuf,unsigned bind_flags)149 svga_buffer_create_host_surface(struct svga_screen *ss,
150 struct svga_buffer *sbuf,
151 unsigned bind_flags)
152 {
153 enum pipe_error ret = PIPE_OK;
154
155 assert(!sbuf->user);
156
157 if (!sbuf->handle) {
158 boolean validated;
159
160 sbuf->key.flags = 0;
161
162 sbuf->key.format = SVGA3D_BUFFER;
163 if (bind_flags & PIPE_BIND_VERTEX_BUFFER) {
164 sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
165 sbuf->key.flags |= SVGA3D_SURFACE_BIND_VERTEX_BUFFER;
166 }
167 if (bind_flags & PIPE_BIND_INDEX_BUFFER) {
168 sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
169 sbuf->key.flags |= SVGA3D_SURFACE_BIND_INDEX_BUFFER;
170 }
171 if (bind_flags & PIPE_BIND_CONSTANT_BUFFER)
172 sbuf->key.flags |= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER;
173
174 if (bind_flags & PIPE_BIND_STREAM_OUTPUT)
175 sbuf->key.flags |= SVGA3D_SURFACE_BIND_STREAM_OUTPUT;
176
177 if (bind_flags & PIPE_BIND_SAMPLER_VIEW)
178 sbuf->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
179
180 if (bind_flags & PIPE_BIND_COMMAND_ARGS_BUFFER) {
181 assert(ss->sws->have_sm5);
182 sbuf->key.flags |= SVGA3D_SURFACE_DRAWINDIRECT_ARGS;
183 }
184
185 if (!bind_flags && sbuf->b.b.usage == PIPE_USAGE_STAGING) {
186 /* This surface is to be used with the
187 * SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
188 * bind flags are allowed to be set for this surface.
189 */
190 sbuf->key.flags = SVGA3D_SURFACE_TRANSFER_FROM_BUFFER;
191 }
192
193 if (sbuf->b.b.flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) {
194 /* This surface can be mapped persistently. We use
195 * coherent memory to avoid implementing memory barriers for
196 * persistent non-coherent memory for now.
197 */
198 sbuf->key.coherent = 1;
199 }
200
201 sbuf->key.size.width = sbuf->b.b.width0;
202 sbuf->key.size.height = 1;
203 sbuf->key.size.depth = 1;
204
205 sbuf->key.numFaces = 1;
206 sbuf->key.numMipLevels = 1;
207 sbuf->key.cachable = 1;
208 sbuf->key.arraySize = 1;
209 sbuf->key.sampleCount = 0;
210
211 SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n",
212 sbuf->b.b.width0);
213
214 sbuf->handle = svga_screen_surface_create(ss, bind_flags,
215 sbuf->b.b.usage,
216 &validated, &sbuf->key);
217 if (!sbuf->handle)
218 return PIPE_ERROR_OUT_OF_MEMORY;
219
220 /* Always set the discard flag on the first time the buffer is written
221 * as svga_screen_surface_create might have passed a recycled host
222 * buffer.
223 */
224 sbuf->dma.flags.discard = TRUE;
225
226 SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n",
227 sbuf->handle, sbuf->b.b.width0);
228
229 /* Add the new surface to the buffer surface list */
230 ret = svga_buffer_add_host_surface(sbuf, sbuf->handle, &sbuf->key,
231 bind_flags);
232
233 if (ss->sws->have_gb_objects) {
234 /* Initialize the surface with zero */
235 ss->sws->surface_init(ss->sws, sbuf->handle, svga_surface_size(&sbuf->key),
236 sbuf->key.flags);
237 }
238 }
239
240 return ret;
241 }
242
243
244 /**
245 * Recreates a host surface with the new bind flags.
246 */
247 enum pipe_error
svga_buffer_recreate_host_surface(struct svga_context * svga,struct svga_buffer * sbuf,unsigned bind_flags)248 svga_buffer_recreate_host_surface(struct svga_context *svga,
249 struct svga_buffer *sbuf,
250 unsigned bind_flags)
251 {
252 enum pipe_error ret = PIPE_OK;
253 struct svga_winsys_surface *old_handle = sbuf->handle;
254
255 assert(sbuf->bind_flags != bind_flags);
256 assert(old_handle);
257
258 sbuf->handle = NULL;
259
260 /* Create a new resource with the requested bind_flags */
261 ret = svga_buffer_create_host_surface(svga_screen(svga->pipe.screen),
262 sbuf, bind_flags);
263 if (ret == PIPE_OK) {
264 /* Copy the surface data */
265 assert(sbuf->handle);
266 SVGA_RETRY(svga, SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle,
267 sbuf->handle,
268 0, 0, sbuf->b.b.width0));
269 }
270
271 /* Set the new bind flags for this buffer resource */
272 sbuf->bind_flags = bind_flags;
273
274 return ret;
275 }
276
277
278 /**
279 * Returns TRUE if the surface bind flags is compatible with the new bind flags.
280 */
281 static boolean
compatible_bind_flags(unsigned bind_flags,unsigned tobind_flags)282 compatible_bind_flags(unsigned bind_flags,
283 unsigned tobind_flags)
284 {
285 if ((bind_flags & tobind_flags) == tobind_flags)
286 return TRUE;
287 else if ((bind_flags|tobind_flags) & PIPE_BIND_CONSTANT_BUFFER)
288 return FALSE;
289 else
290 return TRUE;
291 }
292
293
294 /**
295 * Returns a buffer surface from the surface list
296 * that has the requested bind flags or its existing bind flags
297 * can be promoted to include the new bind flags.
298 */
299 static struct svga_buffer_surface *
svga_buffer_get_host_surface(struct svga_buffer * sbuf,unsigned bind_flags)300 svga_buffer_get_host_surface(struct svga_buffer *sbuf,
301 unsigned bind_flags)
302 {
303 struct svga_buffer_surface *bufsurf;
304
305 LIST_FOR_EACH_ENTRY(bufsurf, &sbuf->surfaces, list) {
306 if (compatible_bind_flags(bufsurf->bind_flags, bind_flags))
307 return bufsurf;
308 }
309 return NULL;
310 }
311
312
313 /**
314 * Adds the host surface to the buffer surface list.
315 */
316 enum pipe_error
svga_buffer_add_host_surface(struct svga_buffer * sbuf,struct svga_winsys_surface * handle,struct svga_host_surface_cache_key * key,unsigned bind_flags)317 svga_buffer_add_host_surface(struct svga_buffer *sbuf,
318 struct svga_winsys_surface *handle,
319 struct svga_host_surface_cache_key *key,
320 unsigned bind_flags)
321 {
322 struct svga_buffer_surface *bufsurf;
323
324 bufsurf = CALLOC_STRUCT(svga_buffer_surface);
325 if (!bufsurf)
326 return PIPE_ERROR_OUT_OF_MEMORY;
327
328 bufsurf->bind_flags = bind_flags;
329 bufsurf->handle = handle;
330 bufsurf->key = *key;
331
332 /* add the surface to the surface list */
333 list_add(&bufsurf->list, &sbuf->surfaces);
334
335 /* Set the new bind flags for this buffer resource */
336 sbuf->bind_flags = bind_flags;
337
338 return PIPE_OK;
339 }
340
341
342 /**
343 * Start using the specified surface for this buffer resource.
344 */
345 void
svga_buffer_bind_host_surface(struct svga_context * svga,struct svga_buffer * sbuf,struct svga_buffer_surface * bufsurf)346 svga_buffer_bind_host_surface(struct svga_context *svga,
347 struct svga_buffer *sbuf,
348 struct svga_buffer_surface *bufsurf)
349 {
350 /* Update the to-bind surface */
351 assert(bufsurf->handle);
352 assert(sbuf->handle);
353
354 /* If we are switching from stream output to other buffer,
355 * make sure to copy the buffer content.
356 */
357 if (sbuf->bind_flags & PIPE_BIND_STREAM_OUTPUT) {
358 SVGA_RETRY(svga, SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle,
359 bufsurf->handle,
360 0, 0, sbuf->b.b.width0));
361 }
362
363 /* Set this surface as the current one */
364 sbuf->handle = bufsurf->handle;
365 sbuf->key = bufsurf->key;
366 sbuf->bind_flags = bufsurf->bind_flags;
367 }
368
369
370 /**
371 * Prepare a host surface that can be used as indicated in the
372 * tobind_flags. If the existing host surface is not created
373 * with the necessary binding flags and if the new bind flags can be
374 * combined with the existing bind flags, then we will recreate a
375 * new surface with the combined bind flags. Otherwise, we will create
376 * a surface for that incompatible bind flags.
377 * For example, if a stream output buffer is reused as a constant buffer,
378 * since constant buffer surface cannot be bound as a stream output surface,
379 * two surfaces will be created, one for stream output,
380 * and another one for constant buffer.
381 */
382 enum pipe_error
svga_buffer_validate_host_surface(struct svga_context * svga,struct svga_buffer * sbuf,unsigned tobind_flags)383 svga_buffer_validate_host_surface(struct svga_context *svga,
384 struct svga_buffer *sbuf,
385 unsigned tobind_flags)
386 {
387 struct svga_buffer_surface *bufsurf;
388 enum pipe_error ret = PIPE_OK;
389
390 /* Flush any pending upload first */
391 svga_buffer_upload_flush(svga, sbuf);
392
393 /* First check from the cached buffer surface list to see if there is
394 * already a buffer surface that has the requested bind flags, or
395 * surface with compatible bind flags that can be promoted.
396 */
397 bufsurf = svga_buffer_get_host_surface(sbuf, tobind_flags);
398
399 if (bufsurf) {
400 if ((bufsurf->bind_flags & tobind_flags) == tobind_flags) {
401 /* there is a surface with the requested bind flags */
402 svga_buffer_bind_host_surface(svga, sbuf, bufsurf);
403 } else {
404
405 /* Recreate a host surface with the combined bind flags */
406 ret = svga_buffer_recreate_host_surface(svga, sbuf,
407 bufsurf->bind_flags |
408 tobind_flags);
409
410 /* Destroy the old surface */
411 svga_screen_surface_destroy(svga_screen(sbuf->b.b.screen),
412 &bufsurf->key, &bufsurf->handle);
413
414 list_del(&bufsurf->list);
415 FREE(bufsurf);
416 }
417 } else {
418 /* Need to create a new surface if the bind flags are incompatible,
419 * such as constant buffer surface & stream output surface.
420 */
421 ret = svga_buffer_recreate_host_surface(svga, sbuf,
422 tobind_flags);
423 }
424 return ret;
425 }
426
427
428 void
svga_buffer_destroy_host_surface(struct svga_screen * ss,struct svga_buffer * sbuf)429 svga_buffer_destroy_host_surface(struct svga_screen *ss,
430 struct svga_buffer *sbuf)
431 {
432 struct svga_buffer_surface *bufsurf, *next;
433
434 LIST_FOR_EACH_ENTRY_SAFE(bufsurf, next, &sbuf->surfaces, list) {
435 SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n",
436 bufsurf->handle, sbuf->b.b.width0);
437 svga_screen_surface_destroy(ss, &bufsurf->key, &bufsurf->handle);
438 FREE(bufsurf);
439 }
440 }
441
442
443 /**
444 * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
445 * command buffer, equal to the current number of mapped ranges.
446 * The UPDATE_GB_IMAGE commands will be patched with the
447 * actual ranges just before flush.
448 */
449 static enum pipe_error
svga_buffer_upload_gb_command(struct svga_context * svga,struct svga_buffer * sbuf)450 svga_buffer_upload_gb_command(struct svga_context *svga,
451 struct svga_buffer *sbuf)
452 {
453 struct svga_winsys_context *swc = svga->swc;
454 SVGA3dCmdUpdateGBImage *update_cmd;
455 struct svga_3d_update_gb_image *whole_update_cmd = NULL;
456 const uint32 numBoxes = sbuf->map.num_ranges;
457 struct pipe_resource *dummy;
458 unsigned i;
459
460 if (swc->force_coherent || sbuf->key.coherent)
461 return PIPE_OK;
462
463 assert(svga_have_gb_objects(svga));
464 assert(numBoxes);
465 assert(sbuf->dma.updates == NULL);
466
467 if (sbuf->dma.flags.discard) {
468 struct svga_3d_invalidate_gb_image *cicmd = NULL;
469 SVGA3dCmdInvalidateGBImage *invalidate_cmd;
470 const unsigned total_commands_size =
471 sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd);
472
473 /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
474 * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather
475 * than with separate commands because we need to properly deal with
476 * filling the command buffer.
477 */
478 invalidate_cmd = SVGA3D_FIFOReserve(swc,
479 SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
480 total_commands_size, 1 + numBoxes);
481 if (!invalidate_cmd)
482 return PIPE_ERROR_OUT_OF_MEMORY;
483
484 cicmd = container_of(invalidate_cmd, cicmd, body);
485 cicmd->header.size = sizeof(*invalidate_cmd);
486 swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL,
487 sbuf->handle,
488 (SVGA_RELOC_WRITE |
489 SVGA_RELOC_INTERNAL |
490 SVGA_RELOC_DMA));
491 invalidate_cmd->image.face = 0;
492 invalidate_cmd->image.mipmap = 0;
493
494 /* The whole_update_command is a SVGA3dCmdHeader plus the
495 * SVGA3dCmdUpdateGBImage command.
496 */
497 whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1];
498 /* initialize the first UPDATE_GB_IMAGE command */
499 whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
500 update_cmd = &whole_update_cmd->body;
501
502 } else {
503 /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
504 const unsigned total_commands_size =
505 sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd);
506
507 update_cmd = SVGA3D_FIFOReserve(swc,
508 SVGA_3D_CMD_UPDATE_GB_IMAGE,
509 total_commands_size, numBoxes);
510 if (!update_cmd)
511 return PIPE_ERROR_OUT_OF_MEMORY;
512
513 /* The whole_update_command is a SVGA3dCmdHeader plus the
514 * SVGA3dCmdUpdateGBImage command.
515 */
516 whole_update_cmd = container_of(update_cmd, whole_update_cmd, body);
517 }
518
519 /* Init the first UPDATE_GB_IMAGE command */
520 whole_update_cmd->header.size = sizeof(*update_cmd);
521 swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle,
522 SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
523 update_cmd->image.face = 0;
524 update_cmd->image.mipmap = 0;
525
526 /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
527 * fill in the box info below.
528 */
529 sbuf->dma.updates = whole_update_cmd;
530
531 /*
532 * Copy the face, mipmap, etc. info to all subsequent commands.
533 * Also do the surface relocation for each subsequent command.
534 */
535 for (i = 1; i < numBoxes; ++i) {
536 whole_update_cmd++;
537 memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd));
538
539 swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL,
540 sbuf->handle,
541 SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
542 }
543
544 /* Increment reference count */
545 sbuf->dma.svga = svga;
546 dummy = NULL;
547 pipe_resource_reference(&dummy, &sbuf->b.b);
548 SVGA_FIFOCommitAll(swc);
549
550 swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
551 sbuf->dma.flags.discard = FALSE;
552
553 svga->hud.num_resource_updates++;
554
555 return PIPE_OK;
556 }
557
558
559 /**
560 * Issue DMA commands to transfer guest memory to the host.
561 * Note that the memory segments (offset, size) will be patched in
562 * later in the svga_buffer_upload_flush() function.
563 */
564 static enum pipe_error
svga_buffer_upload_hb_command(struct svga_context * svga,struct svga_buffer * sbuf)565 svga_buffer_upload_hb_command(struct svga_context *svga,
566 struct svga_buffer *sbuf)
567 {
568 struct svga_winsys_context *swc = svga->swc;
569 struct svga_winsys_buffer *guest = sbuf->hwbuf;
570 struct svga_winsys_surface *host = sbuf->handle;
571 const SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
572 SVGA3dCmdSurfaceDMA *cmd;
573 const uint32 numBoxes = sbuf->map.num_ranges;
574 SVGA3dCopyBox *boxes;
575 SVGA3dCmdSurfaceDMASuffix *pSuffix;
576 unsigned region_flags;
577 unsigned surface_flags;
578 struct pipe_resource *dummy;
579
580 assert(!svga_have_gb_objects(svga));
581
582 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
583 region_flags = SVGA_RELOC_READ;
584 surface_flags = SVGA_RELOC_WRITE;
585 }
586 else if (transfer == SVGA3D_READ_HOST_VRAM) {
587 region_flags = SVGA_RELOC_WRITE;
588 surface_flags = SVGA_RELOC_READ;
589 }
590 else {
591 assert(0);
592 return PIPE_ERROR_BAD_INPUT;
593 }
594
595 assert(numBoxes);
596
597 cmd = SVGA3D_FIFOReserve(swc,
598 SVGA_3D_CMD_SURFACE_DMA,
599 sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
600 2);
601 if (!cmd)
602 return PIPE_ERROR_OUT_OF_MEMORY;
603
604 swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
605 cmd->guest.pitch = 0;
606
607 swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags);
608 cmd->host.face = 0;
609 cmd->host.mipmap = 0;
610
611 cmd->transfer = transfer;
612
613 sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
614 sbuf->dma.svga = svga;
615
616 /* Increment reference count */
617 dummy = NULL;
618 pipe_resource_reference(&dummy, &sbuf->b.b);
619
620 pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
621 pSuffix->suffixSize = sizeof *pSuffix;
622 pSuffix->maximumOffset = sbuf->b.b.width0;
623 pSuffix->flags = sbuf->dma.flags;
624
625 SVGA_FIFOCommitAll(swc);
626
627 swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
628 sbuf->dma.flags.discard = FALSE;
629
630 svga->hud.num_buffer_uploads++;
631
632 return PIPE_OK;
633 }
634
635
636 /**
637 * Issue commands to transfer guest memory to the host.
638 */
639 static enum pipe_error
svga_buffer_upload_command(struct svga_context * svga,struct svga_buffer * sbuf)640 svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf)
641 {
642 if (svga_have_gb_objects(svga)) {
643 return svga_buffer_upload_gb_command(svga, sbuf);
644 } else {
645 return svga_buffer_upload_hb_command(svga, sbuf);
646 }
647 }
648
649
650 /**
651 * Patch up the upload DMA command reserved by svga_buffer_upload_command
652 * with the final ranges.
653 */
654 void
svga_buffer_upload_flush(struct svga_context * svga,struct svga_buffer * sbuf)655 svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
656 {
657 unsigned i;
658 struct pipe_resource *dummy;
659
660 if (!sbuf->dma.pending || svga->swc->force_coherent ||
661 sbuf->key.coherent) {
662 //debug_printf("no dma pending on buffer\n");
663 return;
664 }
665
666 assert(sbuf->handle);
667 assert(sbuf->map.num_ranges);
668 assert(sbuf->dma.svga == svga);
669
670 /*
671 * Patch the DMA/update command with the final copy box.
672 */
673 if (svga_have_gb_objects(svga)) {
674 struct svga_3d_update_gb_image *update = sbuf->dma.updates;
675
676 assert(update);
677
678 for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
679 SVGA3dBox *box = &update->body.box;
680
681 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
682 sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
683
684 box->x = sbuf->map.ranges[i].start;
685 box->y = 0;
686 box->z = 0;
687 box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
688 box->h = 1;
689 box->d = 1;
690
691 assert(box->x <= sbuf->b.b.width0);
692 assert(box->x + box->w <= sbuf->b.b.width0);
693
694 svga->hud.num_bytes_uploaded += box->w;
695 svga->hud.num_buffer_uploads++;
696 }
697 }
698 else {
699 assert(sbuf->hwbuf);
700 assert(sbuf->dma.boxes);
701 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
702
703 for (i = 0; i < sbuf->map.num_ranges; ++i) {
704 SVGA3dCopyBox *box = sbuf->dma.boxes + i;
705
706 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
707 sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
708
709 box->x = sbuf->map.ranges[i].start;
710 box->y = 0;
711 box->z = 0;
712 box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
713 box->h = 1;
714 box->d = 1;
715 box->srcx = sbuf->map.ranges[i].start;
716 box->srcy = 0;
717 box->srcz = 0;
718
719 assert(box->x <= sbuf->b.b.width0);
720 assert(box->x + box->w <= sbuf->b.b.width0);
721
722 svga->hud.num_bytes_uploaded += box->w;
723 svga->hud.num_buffer_uploads++;
724 }
725 }
726
727 /* Reset sbuf for next use/upload */
728
729 sbuf->map.num_ranges = 0;
730
731 assert(sbuf->head.prev && sbuf->head.next);
732 list_del(&sbuf->head); /* remove from svga->dirty_buffers list */
733 #ifdef DEBUG
734 sbuf->head.next = sbuf->head.prev = NULL;
735 #endif
736 sbuf->dma.pending = FALSE;
737 sbuf->dma.flags.discard = FALSE;
738 sbuf->dma.flags.unsynchronized = FALSE;
739
740 sbuf->dma.svga = NULL;
741 sbuf->dma.boxes = NULL;
742 sbuf->dma.updates = NULL;
743
744 /* Decrement reference count (and potentially destroy) */
745 dummy = &sbuf->b.b;
746 pipe_resource_reference(&dummy, NULL);
747 }
748
749
750 /**
751 * Note a dirty range.
752 *
753 * This function only notes the range down. It doesn't actually emit a DMA
754 * upload command. That only happens when a context tries to refer to this
755 * buffer, and the DMA upload command is added to that context's command
756 * buffer.
757 *
758 * We try to lump as many contiguous DMA transfers together as possible.
759 */
760 void
svga_buffer_add_range(struct svga_buffer * sbuf,unsigned start,unsigned end)761 svga_buffer_add_range(struct svga_buffer *sbuf, unsigned start, unsigned end)
762 {
763 unsigned i;
764 unsigned nearest_range;
765 unsigned nearest_dist;
766
767 assert(end > start);
768
769 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
770 nearest_range = sbuf->map.num_ranges;
771 nearest_dist = ~0;
772 } else {
773 nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
774 nearest_dist = 0;
775 }
776
777 /*
778 * Try to grow one of the ranges.
779 */
780 for (i = 0; i < sbuf->map.num_ranges; ++i) {
781 const int left_dist = start - sbuf->map.ranges[i].end;
782 const int right_dist = sbuf->map.ranges[i].start - end;
783 const int dist = MAX2(left_dist, right_dist);
784
785 if (dist <= 0) {
786 /*
787 * Ranges are contiguous or overlapping -- extend this one and return.
788 *
789 * Note that it is not this function's task to prevent overlapping
790 * ranges, as the GMR was already given so it is too late to do
791 * anything. If the ranges overlap here it must surely be because
792 * PIPE_MAP_UNSYNCHRONIZED was set.
793 */
794 sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
795 sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
796 return;
797 }
798 else {
799 /*
800 * Discontiguous ranges -- keep track of the nearest range.
801 */
802 if (dist < nearest_dist) {
803 nearest_range = i;
804 nearest_dist = dist;
805 }
806 }
807 }
808
809 /*
810 * We cannot add a new range to an existing DMA command, so patch-up the
811 * pending DMA upload and start clean.
812 */
813
814 svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
815
816 assert(!sbuf->dma.pending);
817 assert(!sbuf->dma.svga);
818 assert(!sbuf->dma.boxes);
819
820 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
821 /*
822 * Add a new range.
823 */
824
825 sbuf->map.ranges[sbuf->map.num_ranges].start = start;
826 sbuf->map.ranges[sbuf->map.num_ranges].end = end;
827 ++sbuf->map.num_ranges;
828 } else {
829 /*
830 * Everything else failed, so just extend the nearest range.
831 *
832 * It is OK to do this because we always keep a local copy of the
833 * host buffer data, for SW TNL, and the host never modifies the buffer.
834 */
835
836 assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
837 assert(nearest_range < sbuf->map.num_ranges);
838 sbuf->map.ranges[nearest_range].start =
839 MIN2(sbuf->map.ranges[nearest_range].start, start);
840 sbuf->map.ranges[nearest_range].end =
841 MAX2(sbuf->map.ranges[nearest_range].end, end);
842 }
843 }
844
845
846
847 /**
848 * Copy the contents of the malloc buffer to a hardware buffer.
849 */
850 static enum pipe_error
svga_buffer_update_hw(struct svga_context * svga,struct svga_buffer * sbuf,unsigned bind_flags)851 svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf,
852 unsigned bind_flags)
853 {
854 assert(!sbuf->user);
855 if (!svga_buffer_has_hw_storage(sbuf)) {
856 struct svga_screen *ss = svga_screen(sbuf->b.b.screen);
857 enum pipe_error ret;
858 boolean retry;
859 void *map;
860 unsigned i;
861
862 assert(sbuf->swbuf);
863 if (!sbuf->swbuf)
864 return PIPE_ERROR;
865
866 ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen), sbuf,
867 bind_flags);
868 if (ret != PIPE_OK)
869 return ret;
870
871 mtx_lock(&ss->swc_mutex);
872 map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_MAP_WRITE, &retry);
873 assert(map);
874 assert(!retry);
875 if (!map) {
876 mtx_unlock(&ss->swc_mutex);
877 svga_buffer_destroy_hw_storage(ss, sbuf);
878 return PIPE_ERROR;
879 }
880
881 /* Copy data from malloc'd swbuf to the new hardware buffer */
882 for (i = 0; i < sbuf->map.num_ranges; i++) {
883 unsigned start = sbuf->map.ranges[i].start;
884 unsigned len = sbuf->map.ranges[i].end - start;
885 memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
886 }
887
888 if (svga->swc->force_coherent || sbuf->key.coherent)
889 sbuf->map.num_ranges = 0;
890
891 svga_buffer_hw_storage_unmap(svga, sbuf);
892
893 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
894 assert(sbuf->map.count == 0);
895 if (sbuf->map.count == 0) {
896 if (sbuf->user)
897 sbuf->user = FALSE;
898 else
899 align_free(sbuf->swbuf);
900 sbuf->swbuf = NULL;
901 }
902
903 mtx_unlock(&ss->swc_mutex);
904 }
905
906 return PIPE_OK;
907 }
908
909
910 /**
911 * Upload the buffer to the host in a piecewise fashion.
912 *
913 * Used when the buffer is too big to fit in the GMR aperture.
914 * This function should never get called in the guest-backed case
915 * since we always have a full-sized hardware storage backing the
916 * host surface.
917 */
918 static enum pipe_error
svga_buffer_upload_piecewise(struct svga_screen * ss,struct svga_context * svga,struct svga_buffer * sbuf)919 svga_buffer_upload_piecewise(struct svga_screen *ss,
920 struct svga_context *svga,
921 struct svga_buffer *sbuf)
922 {
923 struct svga_winsys_screen *sws = ss->sws;
924 const unsigned alignment = sizeof(void *);
925 const unsigned usage = 0;
926 unsigned i;
927
928 assert(sbuf->map.num_ranges);
929 assert(!sbuf->dma.pending);
930 assert(!svga_have_gb_objects(svga));
931
932 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
933
934 for (i = 0; i < sbuf->map.num_ranges; ++i) {
935 const struct svga_buffer_range *range = &sbuf->map.ranges[i];
936 unsigned offset = range->start;
937 unsigned size = range->end - range->start;
938
939 while (offset < range->end) {
940 struct svga_winsys_buffer *hwbuf;
941 uint8_t *map;
942
943 if (offset + size > range->end)
944 size = range->end - offset;
945
946 hwbuf = sws->buffer_create(sws, alignment, usage, size);
947 while (!hwbuf) {
948 size /= 2;
949 if (!size)
950 return PIPE_ERROR_OUT_OF_MEMORY;
951 hwbuf = sws->buffer_create(sws, alignment, usage, size);
952 }
953
954 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
955 offset, offset + size);
956
957 map = sws->buffer_map(sws, hwbuf,
958 PIPE_MAP_WRITE |
959 PIPE_MAP_DISCARD_RANGE);
960 assert(map);
961 if (map) {
962 memcpy(map, (const char *) sbuf->swbuf + offset, size);
963 sws->buffer_unmap(sws, hwbuf);
964 }
965
966 SVGA_RETRY(svga, SVGA3D_BufferDMA(svga->swc,
967 hwbuf, sbuf->handle,
968 SVGA3D_WRITE_HOST_VRAM,
969 size, 0, offset, sbuf->dma.flags));
970 sbuf->dma.flags.discard = FALSE;
971
972 sws->buffer_destroy(sws, hwbuf);
973
974 offset += size;
975 }
976 }
977
978 sbuf->map.num_ranges = 0;
979
980 return PIPE_OK;
981 }
982
983
984 /**
985 * Get (or create/upload) the winsys surface handle so that we can
986 * refer to this buffer in fifo commands.
987 * This function will create the host surface, and in the GB case also the
988 * hardware storage. In the non-GB case, the hardware storage will be created
989 * if there are mapped ranges and the data is currently in a malloc'ed buffer.
990 */
991 struct svga_winsys_surface *
svga_buffer_handle(struct svga_context * svga,struct pipe_resource * buf,unsigned tobind_flags)992 svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf,
993 unsigned tobind_flags)
994 {
995 struct pipe_screen *screen = svga->pipe.screen;
996 struct svga_screen *ss = svga_screen(screen);
997 struct svga_buffer *sbuf;
998 enum pipe_error ret;
999
1000 if (!buf)
1001 return NULL;
1002
1003 sbuf = svga_buffer(buf);
1004
1005 assert(!sbuf->user);
1006
1007 if (sbuf->handle) {
1008 if ((sbuf->bind_flags & tobind_flags) != tobind_flags) {
1009 /* If the allocated resource's bind flags do not include the
1010 * requested bind flags, validate the host surface.
1011 */
1012 ret = svga_buffer_validate_host_surface(svga, sbuf, tobind_flags);
1013 if (ret != PIPE_OK)
1014 return NULL;
1015 }
1016 } else {
1017 /* If there is no resource handle yet, then combine the buffer bind
1018 * flags and the tobind_flags if they are compatible.
1019 * If not, just use the tobind_flags for creating the resource handle.
1020 */
1021 if (compatible_bind_flags(sbuf->bind_flags, tobind_flags))
1022 sbuf->bind_flags = sbuf->bind_flags | tobind_flags;
1023 else
1024 sbuf->bind_flags = tobind_flags;
1025
1026 assert((sbuf->bind_flags & tobind_flags) == tobind_flags);
1027
1028 /* This call will set sbuf->handle */
1029 if (svga_have_gb_objects(svga)) {
1030 ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
1031 } else {
1032 ret = svga_buffer_create_host_surface(ss, sbuf, sbuf->bind_flags);
1033 }
1034 if (ret != PIPE_OK)
1035 return NULL;
1036 }
1037
1038 assert(sbuf->handle);
1039 if (svga->swc->force_coherent || sbuf->key.coherent)
1040 return sbuf->handle;
1041
1042 if (sbuf->map.num_ranges) {
1043 if (!sbuf->dma.pending) {
1044 /* No pending DMA/update commands yet. */
1045
1046 /* Migrate the data from swbuf -> hwbuf if necessary */
1047 ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
1048 if (ret == PIPE_OK) {
1049 /* Emit DMA or UpdateGBImage commands */
1050 SVGA_RETRY_OOM(svga, ret, svga_buffer_upload_command(svga, sbuf));
1051 if (ret == PIPE_OK) {
1052 sbuf->dma.pending = TRUE;
1053 assert(!sbuf->head.prev && !sbuf->head.next);
1054 list_addtail(&sbuf->head, &svga->dirty_buffers);
1055 }
1056 }
1057 else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
1058 /*
1059 * The buffer is too big to fit in the GMR aperture, so break it in
1060 * smaller pieces.
1061 */
1062 ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
1063 }
1064
1065 if (ret != PIPE_OK) {
1066 /*
1067 * Something unexpected happened above. There is very little that
1068 * we can do other than proceeding while ignoring the dirty ranges.
1069 */
1070 assert(0);
1071 sbuf->map.num_ranges = 0;
1072 }
1073 }
1074 else {
1075 /*
1076 * There a pending dma already. Make sure it is from this context.
1077 */
1078 assert(sbuf->dma.svga == svga);
1079 }
1080 }
1081
1082 assert(sbuf->map.num_ranges == 0 || sbuf->dma.pending);
1083
1084 return sbuf->handle;
1085 }
1086
1087
1088 void
svga_context_flush_buffers(struct svga_context * svga)1089 svga_context_flush_buffers(struct svga_context *svga)
1090 {
1091 struct list_head *curr, *next;
1092
1093 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERSFLUSH);
1094
1095 curr = svga->dirty_buffers.next;
1096 next = curr->next;
1097 while (curr != &svga->dirty_buffers) {
1098 struct svga_buffer *sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
1099
1100 assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
1101 assert(sbuf->dma.pending);
1102
1103 svga_buffer_upload_flush(svga, sbuf);
1104
1105 curr = next;
1106 next = curr->next;
1107 }
1108
1109 SVGA_STATS_TIME_POP(svga_sws(svga));
1110 }
1111