1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "os/os_thread.h"
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "util/u_math.h"
32 #include "util/u_memory.h"
33
34 #include "svga_cmd.h"
35 #include "svga_context.h"
36 #include "svga_debug.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_screen.h"
40 #include "svga_winsys.h"
41
42 /**
43 * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
44 *
45 */
46 struct svga_3d_update_gb_image {
47 SVGA3dCmdHeader header;
48 SVGA3dCmdUpdateGBImage body;
49 };
50
51 struct svga_3d_invalidate_gb_image {
52 SVGA3dCmdHeader header;
53 SVGA3dCmdInvalidateGBImage body;
54 };
55
56
57 /**
58 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
59 *
60 * It will flush and retry in case the first attempt to create a DMA buffer
61 * fails, so it should not be called from any function involved in flushing
62 * to avoid recursion.
63 */
64 struct svga_winsys_buffer *
svga_winsys_buffer_create(struct svga_context * svga,unsigned alignment,unsigned usage,unsigned size)65 svga_winsys_buffer_create( struct svga_context *svga,
66 unsigned alignment,
67 unsigned usage,
68 unsigned size )
69 {
70 struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
71 struct svga_winsys_screen *sws = svgascreen->sws;
72 struct svga_winsys_buffer *buf;
73
74 /* Just try */
75 buf = SVGA_TRY_PTR(sws->buffer_create(sws, alignment, usage, size));
76 if (!buf) {
77 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
78 size);
79
80 /* Try flushing all pending DMAs */
81 svga_retry_enter(svga);
82 svga_context_flush(svga, NULL);
83 buf = sws->buffer_create(sws, alignment, usage, size);
84 svga_retry_exit(svga);
85 }
86
87 return buf;
88 }
89
90
91 /**
92 * Destroy HW storage if separate from the host surface.
93 * In the GB case, the HW storage is associated with the host surface
94 * and is therefore a No-op.
95 */
96 void
svga_buffer_destroy_hw_storage(struct svga_screen * ss,struct svga_buffer * sbuf)97 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
98 {
99 struct svga_winsys_screen *sws = ss->sws;
100
101 assert(sbuf->map.count == 0);
102 assert(sbuf->hwbuf);
103 if (sbuf->hwbuf) {
104 sws->buffer_destroy(sws, sbuf->hwbuf);
105 sbuf->hwbuf = NULL;
106 }
107 }
108
109
110
111 /**
112 * Allocate DMA'ble or Updatable storage for the buffer.
113 *
114 * Called before mapping a buffer.
115 */
116 enum pipe_error
svga_buffer_create_hw_storage(struct svga_screen * ss,struct svga_buffer * sbuf,unsigned bind_flags)117 svga_buffer_create_hw_storage(struct svga_screen *ss,
118 struct svga_buffer *sbuf,
119 unsigned bind_flags)
120 {
121 assert(!sbuf->user);
122
123 if (ss->sws->have_gb_objects) {
124 assert(sbuf->handle || !sbuf->dma.pending);
125 return svga_buffer_create_host_surface(ss, sbuf, bind_flags);
126 }
127 if (!sbuf->hwbuf) {
128 struct svga_winsys_screen *sws = ss->sws;
129 unsigned alignment = 16;
130 unsigned usage = 0;
131 unsigned size = sbuf->b.width0;
132
133 sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
134 if (!sbuf->hwbuf)
135 return PIPE_ERROR_OUT_OF_MEMORY;
136
137 assert(!sbuf->dma.pending);
138 }
139
140 return PIPE_OK;
141 }
142
143
144 /**
145 * Allocate graphics memory for vertex/index/constant/etc buffer (not
146 * textures).
147 */
148 enum pipe_error
svga_buffer_create_host_surface(struct svga_screen * ss,struct svga_buffer * sbuf,unsigned bind_flags)149 svga_buffer_create_host_surface(struct svga_screen *ss,
150 struct svga_buffer *sbuf,
151 unsigned bind_flags)
152 {
153 enum pipe_error ret = PIPE_OK;
154
155 assert(!sbuf->user);
156
157 if (!sbuf->handle) {
158 boolean validated;
159
160 sbuf->key.flags = 0;
161
162 sbuf->key.format = SVGA3D_BUFFER;
163 if (bind_flags & PIPE_BIND_VERTEX_BUFFER) {
164 sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
165 sbuf->key.flags |= SVGA3D_SURFACE_BIND_VERTEX_BUFFER;
166 }
167 if (bind_flags & PIPE_BIND_INDEX_BUFFER) {
168 sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
169 sbuf->key.flags |= SVGA3D_SURFACE_BIND_INDEX_BUFFER;
170 }
171 if (bind_flags & PIPE_BIND_CONSTANT_BUFFER)
172 sbuf->key.flags |= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER;
173
174 if (bind_flags & PIPE_BIND_STREAM_OUTPUT)
175 sbuf->key.flags |= SVGA3D_SURFACE_BIND_STREAM_OUTPUT;
176
177 if (bind_flags & PIPE_BIND_SAMPLER_VIEW)
178 sbuf->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
179
180 if (bind_flags & PIPE_BIND_COMMAND_ARGS_BUFFER) {
181 assert(ss->sws->have_sm5);
182 sbuf->key.flags |= SVGA3D_SURFACE_DRAWINDIRECT_ARGS;
183 }
184
185 if (!bind_flags && sbuf->b.usage == PIPE_USAGE_STAGING) {
186 /* This surface is to be used with the
187 * SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
188 * bind flags are allowed to be set for this surface.
189 */
190 sbuf->key.flags = SVGA3D_SURFACE_TRANSFER_FROM_BUFFER;
191 }
192
193 if (sbuf->b.flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) {
194 /* This surface can be mapped persistently. We use
195 * coherent memory to avoid implementing memory barriers for
196 * persistent non-coherent memory for now.
197 */
198 sbuf->key.coherent = 1;
199 }
200
201 sbuf->key.size.width = sbuf->b.width0;
202 sbuf->key.size.height = 1;
203 sbuf->key.size.depth = 1;
204
205 sbuf->key.numFaces = 1;
206 sbuf->key.numMipLevels = 1;
207 sbuf->key.cachable = 1;
208 sbuf->key.arraySize = 1;
209 sbuf->key.sampleCount = 0;
210
211 SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n",
212 sbuf->b.width0);
213
214 sbuf->handle = svga_screen_surface_create(ss, bind_flags,
215 sbuf->b.usage,
216 &validated, &sbuf->key);
217 if (!sbuf->handle)
218 return PIPE_ERROR_OUT_OF_MEMORY;
219
220 /* Always set the discard flag on the first time the buffer is written
221 * as svga_screen_surface_create might have passed a recycled host
222 * buffer.
223 */
224 sbuf->dma.flags.discard = TRUE;
225
226 SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n",
227 sbuf->handle, sbuf->b.width0);
228
229 /* Add the new surface to the buffer surface list */
230 ret = svga_buffer_add_host_surface(sbuf, sbuf->handle, &sbuf->key,
231 bind_flags);
232
233 if (ss->sws->have_gb_objects) {
234 /* Initialize the surface with zero */
235 ss->sws->surface_init(ss->sws, sbuf->handle, svga_surface_size(&sbuf->key),
236 sbuf->key.flags);
237 }
238 }
239
240 return ret;
241 }
242
243
244 /**
245 * Recreates a host surface with the new bind flags.
246 */
247 enum pipe_error
svga_buffer_recreate_host_surface(struct svga_context * svga,struct svga_buffer * sbuf,unsigned bind_flags)248 svga_buffer_recreate_host_surface(struct svga_context *svga,
249 struct svga_buffer *sbuf,
250 unsigned bind_flags)
251 {
252 enum pipe_error ret = PIPE_OK;
253 struct svga_winsys_surface *old_handle = sbuf->handle;
254
255 assert(sbuf->bind_flags != bind_flags);
256 assert(old_handle);
257
258 sbuf->handle = NULL;
259
260 /* Create a new resource with the requested bind_flags */
261 ret = svga_buffer_create_host_surface(svga_screen(svga->pipe.screen),
262 sbuf, bind_flags);
263 if (ret == PIPE_OK) {
264 /* Copy the surface data */
265 assert(sbuf->handle);
266 SVGA_RETRY(svga, SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle,
267 sbuf->handle,
268 0, 0, sbuf->b.width0));
269 }
270
271 /* Set the new bind flags for this buffer resource */
272 sbuf->bind_flags = bind_flags;
273
274 return ret;
275 }
276
277
278 /**
279 * Returns TRUE if the surface bind flags is compatible with the new bind flags.
280 */
281 static boolean
compatible_bind_flags(unsigned bind_flags,unsigned tobind_flags)282 compatible_bind_flags(unsigned bind_flags,
283 unsigned tobind_flags)
284 {
285 if ((bind_flags & tobind_flags) == tobind_flags)
286 return TRUE;
287 else if ((bind_flags|tobind_flags) & PIPE_BIND_CONSTANT_BUFFER)
288 return FALSE;
289 else
290 return TRUE;
291 }
292
293
294 /**
295 * Returns a buffer surface from the surface list
296 * that has the requested bind flags or its existing bind flags
297 * can be promoted to include the new bind flags.
298 */
299 static struct svga_buffer_surface *
svga_buffer_get_host_surface(struct svga_buffer * sbuf,unsigned bind_flags)300 svga_buffer_get_host_surface(struct svga_buffer *sbuf,
301 unsigned bind_flags)
302 {
303 struct svga_buffer_surface *bufsurf;
304
305 LIST_FOR_EACH_ENTRY(bufsurf, &sbuf->surfaces, list) {
306 if (compatible_bind_flags(bufsurf->bind_flags, bind_flags))
307 return bufsurf;
308 }
309 return NULL;
310 }
311
312
313 /**
314 * Adds the host surface to the buffer surface list.
315 */
316 enum pipe_error
svga_buffer_add_host_surface(struct svga_buffer * sbuf,struct svga_winsys_surface * handle,struct svga_host_surface_cache_key * key,unsigned bind_flags)317 svga_buffer_add_host_surface(struct svga_buffer *sbuf,
318 struct svga_winsys_surface *handle,
319 struct svga_host_surface_cache_key *key,
320 unsigned bind_flags)
321 {
322 struct svga_buffer_surface *bufsurf;
323
324 bufsurf = CALLOC_STRUCT(svga_buffer_surface);
325 if (!bufsurf)
326 return PIPE_ERROR_OUT_OF_MEMORY;
327
328 bufsurf->bind_flags = bind_flags;
329 bufsurf->handle = handle;
330 bufsurf->key = *key;
331
332 /* add the surface to the surface list */
333 list_add(&bufsurf->list, &sbuf->surfaces);
334
335 /* Set the new bind flags for this buffer resource */
336 sbuf->bind_flags = bind_flags;
337
338 return PIPE_OK;
339 }
340
341
342 /**
343 * Start using the specified surface for this buffer resource.
344 */
345 void
svga_buffer_bind_host_surface(struct svga_context * svga,struct svga_buffer * sbuf,struct svga_buffer_surface * bufsurf)346 svga_buffer_bind_host_surface(struct svga_context *svga,
347 struct svga_buffer *sbuf,
348 struct svga_buffer_surface *bufsurf)
349 {
350 /* Update the to-bind surface */
351 assert(bufsurf->handle);
352 assert(sbuf->handle);
353
354 /* If we are switching from stream output to other buffer,
355 * make sure to copy the buffer content.
356 */
357 if (sbuf->bind_flags & PIPE_BIND_STREAM_OUTPUT) {
358 SVGA_RETRY(svga, SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle,
359 bufsurf->handle,
360 0, 0, sbuf->b.width0));
361 }
362
363 /* Set this surface as the current one */
364 sbuf->handle = bufsurf->handle;
365 sbuf->key = bufsurf->key;
366 sbuf->bind_flags = bufsurf->bind_flags;
367 }
368
369
370 /**
371 * Prepare a host surface that can be used as indicated in the
372 * tobind_flags. If the existing host surface is not created
373 * with the necessary binding flags and if the new bind flags can be
374 * combined with the existing bind flags, then we will recreate a
375 * new surface with the combined bind flags. Otherwise, we will create
376 * a surface for that incompatible bind flags.
377 * For example, if a stream output buffer is reused as a constant buffer,
378 * since constant buffer surface cannot be bound as a stream output surface,
379 * two surfaces will be created, one for stream output,
380 * and another one for constant buffer.
381 */
382 enum pipe_error
svga_buffer_validate_host_surface(struct svga_context * svga,struct svga_buffer * sbuf,unsigned tobind_flags)383 svga_buffer_validate_host_surface(struct svga_context *svga,
384 struct svga_buffer *sbuf,
385 unsigned tobind_flags)
386 {
387 struct svga_buffer_surface *bufsurf;
388 enum pipe_error ret = PIPE_OK;
389
390 /* Flush any pending upload first */
391 svga_buffer_upload_flush(svga, sbuf);
392
393 /* First check from the cached buffer surface list to see if there is
394 * already a buffer surface that has the requested bind flags, or
395 * surface with compatible bind flags that can be promoted.
396 */
397 bufsurf = svga_buffer_get_host_surface(sbuf, tobind_flags);
398
399 if (bufsurf) {
400 if ((bufsurf->bind_flags & tobind_flags) == tobind_flags) {
401 /* there is a surface with the requested bind flags */
402 svga_buffer_bind_host_surface(svga, sbuf, bufsurf);
403 } else {
404
405 /* Recreate a host surface with the combined bind flags */
406 ret = svga_buffer_recreate_host_surface(svga, sbuf,
407 bufsurf->bind_flags |
408 tobind_flags);
409
410 /* Destroy the old surface */
411 svga_screen_surface_destroy(svga_screen(sbuf->b.screen),
412 &bufsurf->key, &bufsurf->handle);
413
414 list_del(&bufsurf->list);
415 FREE(bufsurf);
416 }
417 } else {
418 /* Need to create a new surface if the bind flags are incompatible,
419 * such as constant buffer surface & stream output surface.
420 */
421 ret = svga_buffer_recreate_host_surface(svga, sbuf,
422 tobind_flags);
423 }
424 return ret;
425 }
426
427
428 void
svga_buffer_destroy_host_surface(struct svga_screen * ss,struct svga_buffer * sbuf)429 svga_buffer_destroy_host_surface(struct svga_screen *ss,
430 struct svga_buffer *sbuf)
431 {
432 struct svga_buffer_surface *bufsurf, *next;
433
434 LIST_FOR_EACH_ENTRY_SAFE(bufsurf, next, &sbuf->surfaces, list) {
435 SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n",
436 bufsurf->handle, sbuf->b.width0);
437 svga_screen_surface_destroy(ss, &bufsurf->key, &bufsurf->handle);
438 FREE(bufsurf);
439 }
440 }
441
442
443 /**
444 * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
445 * command buffer, equal to the current number of mapped ranges.
446 * The UPDATE_GB_IMAGE commands will be patched with the
447 * actual ranges just before flush.
448 */
449 static enum pipe_error
svga_buffer_upload_gb_command(struct svga_context * svga,struct svga_buffer * sbuf)450 svga_buffer_upload_gb_command(struct svga_context *svga,
451 struct svga_buffer *sbuf)
452 {
453 struct svga_winsys_context *swc = svga->swc;
454 SVGA3dCmdUpdateGBImage *update_cmd;
455 struct svga_3d_update_gb_image *whole_update_cmd = NULL;
456 const uint32 numBoxes = sbuf->map.num_ranges;
457 struct pipe_resource *dummy;
458 unsigned i;
459
460 if (swc->force_coherent || sbuf->key.coherent)
461 return PIPE_OK;
462
463 assert(svga_have_gb_objects(svga));
464 assert(numBoxes);
465 assert(sbuf->dma.updates == NULL);
466
467 if (sbuf->dma.flags.discard) {
468 struct svga_3d_invalidate_gb_image *cicmd = NULL;
469 SVGA3dCmdInvalidateGBImage *invalidate_cmd;
470 const unsigned total_commands_size =
471 sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd);
472
473 /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
474 * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather
475 * than with separate commands because we need to properly deal with
476 * filling the command buffer.
477 */
478 invalidate_cmd = SVGA3D_FIFOReserve(swc,
479 SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
480 total_commands_size, 1 + numBoxes);
481 if (!invalidate_cmd)
482 return PIPE_ERROR_OUT_OF_MEMORY;
483
484 cicmd = container_of(invalidate_cmd, struct svga_3d_invalidate_gb_image, body);
485 cicmd->header.size = sizeof(*invalidate_cmd);
486 swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL,
487 sbuf->handle,
488 (SVGA_RELOC_WRITE |
489 SVGA_RELOC_INTERNAL |
490 SVGA_RELOC_DMA));
491 invalidate_cmd->image.face = 0;
492 invalidate_cmd->image.mipmap = 0;
493
494 /* The whole_update_command is a SVGA3dCmdHeader plus the
495 * SVGA3dCmdUpdateGBImage command.
496 */
497 whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1];
498 /* initialize the first UPDATE_GB_IMAGE command */
499 whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
500 update_cmd = &whole_update_cmd->body;
501
502 } else {
503 /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
504 const unsigned total_commands_size =
505 sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd);
506
507 update_cmd = SVGA3D_FIFOReserve(swc,
508 SVGA_3D_CMD_UPDATE_GB_IMAGE,
509 total_commands_size, numBoxes);
510 if (!update_cmd)
511 return PIPE_ERROR_OUT_OF_MEMORY;
512
513 /* The whole_update_command is a SVGA3dCmdHeader plus the
514 * SVGA3dCmdUpdateGBImage command.
515 */
516 whole_update_cmd = container_of(update_cmd, struct svga_3d_update_gb_image, body);
517 }
518
519 /* Init the first UPDATE_GB_IMAGE command */
520 whole_update_cmd->header.size = sizeof(*update_cmd);
521 swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle,
522 SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
523 update_cmd->image.face = 0;
524 update_cmd->image.mipmap = 0;
525
526 /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
527 * fill in the box info below.
528 */
529 sbuf->dma.updates = whole_update_cmd;
530
531 /*
532 * Copy the face, mipmap, etc. info to all subsequent commands.
533 * Also do the surface relocation for each subsequent command.
534 */
535 for (i = 1; i < numBoxes; ++i) {
536 whole_update_cmd++;
537 memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd));
538
539 swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL,
540 sbuf->handle,
541 SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
542 }
543
544 /* Increment reference count */
545 sbuf->dma.svga = svga;
546 dummy = NULL;
547 pipe_resource_reference(&dummy, &sbuf->b);
548 SVGA_FIFOCommitAll(swc);
549
550 swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
551 sbuf->dma.flags.discard = FALSE;
552
553 svga->hud.num_resource_updates++;
554
555 return PIPE_OK;
556 }
557
558
559 /**
560 * Issue DMA commands to transfer guest memory to the host.
561 * Note that the memory segments (offset, size) will be patched in
562 * later in the svga_buffer_upload_flush() function.
563 */
564 static enum pipe_error
svga_buffer_upload_hb_command(struct svga_context * svga,struct svga_buffer * sbuf)565 svga_buffer_upload_hb_command(struct svga_context *svga,
566 struct svga_buffer *sbuf)
567 {
568 struct svga_winsys_context *swc = svga->swc;
569 struct svga_winsys_buffer *guest = sbuf->hwbuf;
570 struct svga_winsys_surface *host = sbuf->handle;
571 const SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
572 SVGA3dCmdSurfaceDMA *cmd;
573 const uint32 numBoxes = sbuf->map.num_ranges;
574 SVGA3dCopyBox *boxes;
575 SVGA3dCmdSurfaceDMASuffix *pSuffix;
576 unsigned region_flags;
577 unsigned surface_flags;
578 struct pipe_resource *dummy;
579
580 assert(!svga_have_gb_objects(svga));
581
582 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
583 region_flags = SVGA_RELOC_READ;
584 surface_flags = SVGA_RELOC_WRITE;
585 }
586 else if (transfer == SVGA3D_READ_HOST_VRAM) {
587 region_flags = SVGA_RELOC_WRITE;
588 surface_flags = SVGA_RELOC_READ;
589 }
590 else {
591 assert(0);
592 return PIPE_ERROR_BAD_INPUT;
593 }
594
595 assert(numBoxes);
596
597 cmd = SVGA3D_FIFOReserve(swc,
598 SVGA_3D_CMD_SURFACE_DMA,
599 sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
600 2);
601 if (!cmd)
602 return PIPE_ERROR_OUT_OF_MEMORY;
603
604 swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
605 cmd->guest.pitch = 0;
606
607 swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags);
608 cmd->host.face = 0;
609 cmd->host.mipmap = 0;
610
611 cmd->transfer = transfer;
612
613 sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
614 sbuf->dma.svga = svga;
615
616 /* Increment reference count */
617 dummy = NULL;
618 pipe_resource_reference(&dummy, &sbuf->b);
619
620 pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
621 pSuffix->suffixSize = sizeof *pSuffix;
622 pSuffix->maximumOffset = sbuf->b.width0;
623 pSuffix->flags = sbuf->dma.flags;
624
625 SVGA_FIFOCommitAll(swc);
626
627 swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
628 sbuf->dma.flags.discard = FALSE;
629
630 svga->hud.num_buffer_uploads++;
631
632 return PIPE_OK;
633 }
634
635
636 /**
637 * Issue commands to transfer guest memory to the host.
638 */
639 static enum pipe_error
svga_buffer_upload_command(struct svga_context * svga,struct svga_buffer * sbuf)640 svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf)
641 {
642 if (svga_have_gb_objects(svga)) {
643 return svga_buffer_upload_gb_command(svga, sbuf);
644 } else {
645 return svga_buffer_upload_hb_command(svga, sbuf);
646 }
647 }
648
649
650 /**
651 * Patch up the upload DMA command reserved by svga_buffer_upload_command
652 * with the final ranges.
653 */
654 void
svga_buffer_upload_flush(struct svga_context * svga,struct svga_buffer * sbuf)655 svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
656 {
657 unsigned i;
658 struct pipe_resource *dummy;
659
660 if (!sbuf->dma.pending || svga->swc->force_coherent ||
661 sbuf->key.coherent) {
662 //debug_printf("no dma pending on buffer\n");
663 return;
664 }
665
666 assert(sbuf->handle);
667 assert(sbuf->map.num_ranges);
668 assert(sbuf->dma.svga == svga);
669
670 /*
671 * Patch the DMA/update command with the final copy box.
672 */
673 if (svga_have_gb_objects(svga)) {
674 struct svga_3d_update_gb_image *update = sbuf->dma.updates;
675
676 assert(update);
677
678 for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
679 SVGA3dBox *box = &update->body.box;
680
681 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
682 sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
683
684 box->x = sbuf->map.ranges[i].start;
685 box->y = 0;
686 box->z = 0;
687 box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
688 box->h = 1;
689 box->d = 1;
690
691 assert(box->x <= sbuf->b.width0);
692 assert(box->x + box->w <= sbuf->b.width0);
693
694 svga->hud.num_bytes_uploaded += box->w;
695 svga->hud.num_buffer_uploads++;
696 }
697 }
698 else {
699 assert(sbuf->hwbuf);
700 assert(sbuf->dma.boxes);
701 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
702
703 for (i = 0; i < sbuf->map.num_ranges; ++i) {
704 SVGA3dCopyBox *box = sbuf->dma.boxes + i;
705
706 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
707 sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
708
709 box->x = sbuf->map.ranges[i].start;
710 box->y = 0;
711 box->z = 0;
712 box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
713 box->h = 1;
714 box->d = 1;
715 box->srcx = sbuf->map.ranges[i].start;
716 box->srcy = 0;
717 box->srcz = 0;
718
719 assert(box->x <= sbuf->b.width0);
720 assert(box->x + box->w <= sbuf->b.width0);
721
722 svga->hud.num_bytes_uploaded += box->w;
723 svga->hud.num_buffer_uploads++;
724 }
725 }
726
727 /* Reset sbuf for next use/upload */
728
729 sbuf->map.num_ranges = 0;
730
731 assert(sbuf->head.prev && sbuf->head.next);
732 list_del(&sbuf->head); /* remove from svga->dirty_buffers list */
733 sbuf->dma.pending = FALSE;
734 sbuf->dma.flags.discard = FALSE;
735 sbuf->dma.flags.unsynchronized = FALSE;
736
737 sbuf->dma.svga = NULL;
738 sbuf->dma.boxes = NULL;
739 sbuf->dma.updates = NULL;
740
741 /* Decrement reference count (and potentially destroy) */
742 dummy = &sbuf->b;
743 pipe_resource_reference(&dummy, NULL);
744 }
745
746
747 /**
748 * Note a dirty range.
749 *
750 * This function only notes the range down. It doesn't actually emit a DMA
751 * upload command. That only happens when a context tries to refer to this
752 * buffer, and the DMA upload command is added to that context's command
753 * buffer.
754 *
755 * We try to lump as many contiguous DMA transfers together as possible.
756 */
757 void
svga_buffer_add_range(struct svga_buffer * sbuf,unsigned start,unsigned end)758 svga_buffer_add_range(struct svga_buffer *sbuf, unsigned start, unsigned end)
759 {
760 unsigned i;
761 unsigned nearest_range;
762 unsigned nearest_dist;
763
764 assert(end > start);
765
766 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
767 nearest_range = sbuf->map.num_ranges;
768 nearest_dist = ~0;
769 } else {
770 nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
771 nearest_dist = 0;
772 }
773
774 /*
775 * Try to grow one of the ranges.
776 */
777 for (i = 0; i < sbuf->map.num_ranges; ++i) {
778 const int left_dist = start - sbuf->map.ranges[i].end;
779 const int right_dist = sbuf->map.ranges[i].start - end;
780 const int dist = MAX2(left_dist, right_dist);
781
782 if (dist <= 0) {
783 /*
784 * Ranges are contiguous or overlapping -- extend this one and return.
785 *
786 * Note that it is not this function's task to prevent overlapping
787 * ranges, as the GMR was already given so it is too late to do
788 * anything. If the ranges overlap here it must surely be because
789 * PIPE_MAP_UNSYNCHRONIZED was set.
790 */
791 sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
792 sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
793 return;
794 }
795 else {
796 /*
797 * Discontiguous ranges -- keep track of the nearest range.
798 */
799 if (dist < nearest_dist) {
800 nearest_range = i;
801 nearest_dist = dist;
802 }
803 }
804 }
805
806 /*
807 * We cannot add a new range to an existing DMA command, so patch-up the
808 * pending DMA upload and start clean.
809 */
810
811 svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
812
813 assert(!sbuf->dma.pending);
814 assert(!sbuf->dma.svga);
815 assert(!sbuf->dma.boxes);
816
817 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
818 /*
819 * Add a new range.
820 */
821
822 sbuf->map.ranges[sbuf->map.num_ranges].start = start;
823 sbuf->map.ranges[sbuf->map.num_ranges].end = end;
824 ++sbuf->map.num_ranges;
825 } else {
826 /*
827 * Everything else failed, so just extend the nearest range.
828 *
829 * It is OK to do this because we always keep a local copy of the
830 * host buffer data, for SW TNL, and the host never modifies the buffer.
831 */
832
833 assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
834 assert(nearest_range < sbuf->map.num_ranges);
835 sbuf->map.ranges[nearest_range].start =
836 MIN2(sbuf->map.ranges[nearest_range].start, start);
837 sbuf->map.ranges[nearest_range].end =
838 MAX2(sbuf->map.ranges[nearest_range].end, end);
839 }
840 }
841
842
843
844 /**
845 * Copy the contents of the malloc buffer to a hardware buffer.
846 */
847 static enum pipe_error
svga_buffer_update_hw(struct svga_context * svga,struct svga_buffer * sbuf,unsigned bind_flags)848 svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf,
849 unsigned bind_flags)
850 {
851 assert(!sbuf->user);
852 if (!svga_buffer_has_hw_storage(sbuf)) {
853 struct svga_screen *ss = svga_screen(sbuf->b.screen);
854 enum pipe_error ret;
855 boolean retry;
856 void *map;
857 unsigned i;
858
859 assert(sbuf->swbuf);
860 if (!sbuf->swbuf)
861 return PIPE_ERROR;
862
863 ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.screen), sbuf,
864 bind_flags);
865 if (ret != PIPE_OK)
866 return ret;
867
868 mtx_lock(&ss->swc_mutex);
869 map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_MAP_WRITE, &retry);
870 assert(map);
871 assert(!retry);
872 if (!map) {
873 mtx_unlock(&ss->swc_mutex);
874 svga_buffer_destroy_hw_storage(ss, sbuf);
875 return PIPE_ERROR;
876 }
877
878 /* Copy data from malloc'd swbuf to the new hardware buffer */
879 for (i = 0; i < sbuf->map.num_ranges; i++) {
880 unsigned start = sbuf->map.ranges[i].start;
881 unsigned len = sbuf->map.ranges[i].end - start;
882 memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
883 }
884
885 if (svga->swc->force_coherent || sbuf->key.coherent)
886 sbuf->map.num_ranges = 0;
887
888 svga_buffer_hw_storage_unmap(svga, sbuf);
889
890 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
891 assert(sbuf->map.count == 0);
892 if (sbuf->map.count == 0) {
893 if (sbuf->user)
894 sbuf->user = FALSE;
895 else
896 align_free(sbuf->swbuf);
897 sbuf->swbuf = NULL;
898 }
899
900 mtx_unlock(&ss->swc_mutex);
901 }
902
903 return PIPE_OK;
904 }
905
906
907 /**
908 * Upload the buffer to the host in a piecewise fashion.
909 *
910 * Used when the buffer is too big to fit in the GMR aperture.
911 * This function should never get called in the guest-backed case
912 * since we always have a full-sized hardware storage backing the
913 * host surface.
914 */
915 static enum pipe_error
svga_buffer_upload_piecewise(struct svga_screen * ss,struct svga_context * svga,struct svga_buffer * sbuf)916 svga_buffer_upload_piecewise(struct svga_screen *ss,
917 struct svga_context *svga,
918 struct svga_buffer *sbuf)
919 {
920 struct svga_winsys_screen *sws = ss->sws;
921 const unsigned alignment = sizeof(void *);
922 const unsigned usage = 0;
923 unsigned i;
924
925 assert(sbuf->map.num_ranges);
926 assert(!sbuf->dma.pending);
927 assert(!svga_have_gb_objects(svga));
928
929 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
930
931 for (i = 0; i < sbuf->map.num_ranges; ++i) {
932 const struct svga_buffer_range *range = &sbuf->map.ranges[i];
933 unsigned offset = range->start;
934 unsigned size = range->end - range->start;
935
936 while (offset < range->end) {
937 struct svga_winsys_buffer *hwbuf;
938 uint8_t *map;
939
940 if (offset + size > range->end)
941 size = range->end - offset;
942
943 hwbuf = sws->buffer_create(sws, alignment, usage, size);
944 while (!hwbuf) {
945 size /= 2;
946 if (!size)
947 return PIPE_ERROR_OUT_OF_MEMORY;
948 hwbuf = sws->buffer_create(sws, alignment, usage, size);
949 }
950
951 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
952 offset, offset + size);
953
954 map = sws->buffer_map(sws, hwbuf,
955 PIPE_MAP_WRITE |
956 PIPE_MAP_DISCARD_RANGE);
957 assert(map);
958 if (map) {
959 memcpy(map, (const char *) sbuf->swbuf + offset, size);
960 sws->buffer_unmap(sws, hwbuf);
961 }
962
963 SVGA_RETRY(svga, SVGA3D_BufferDMA(svga->swc,
964 hwbuf, sbuf->handle,
965 SVGA3D_WRITE_HOST_VRAM,
966 size, 0, offset, sbuf->dma.flags));
967 sbuf->dma.flags.discard = FALSE;
968
969 sws->buffer_destroy(sws, hwbuf);
970
971 offset += size;
972 }
973 }
974
975 sbuf->map.num_ranges = 0;
976
977 return PIPE_OK;
978 }
979
980
981 /**
982 * Get (or create/upload) the winsys surface handle so that we can
983 * refer to this buffer in fifo commands.
984 * This function will create the host surface, and in the GB case also the
985 * hardware storage. In the non-GB case, the hardware storage will be created
986 * if there are mapped ranges and the data is currently in a malloc'ed buffer.
987 */
988 struct svga_winsys_surface *
svga_buffer_handle(struct svga_context * svga,struct pipe_resource * buf,unsigned tobind_flags)989 svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf,
990 unsigned tobind_flags)
991 {
992 struct pipe_screen *screen = svga->pipe.screen;
993 struct svga_screen *ss = svga_screen(screen);
994 struct svga_buffer *sbuf;
995 enum pipe_error ret;
996
997 if (!buf)
998 return NULL;
999
1000 sbuf = svga_buffer(buf);
1001
1002 assert(!sbuf->user);
1003
1004 if (sbuf->handle) {
1005 if ((sbuf->bind_flags & tobind_flags) != tobind_flags) {
1006 /* If the allocated resource's bind flags do not include the
1007 * requested bind flags, validate the host surface.
1008 */
1009 ret = svga_buffer_validate_host_surface(svga, sbuf, tobind_flags);
1010 if (ret != PIPE_OK)
1011 return NULL;
1012 }
1013 } else {
1014 /* If there is no resource handle yet, then combine the buffer bind
1015 * flags and the tobind_flags if they are compatible.
1016 * If not, just use the tobind_flags for creating the resource handle.
1017 */
1018 if (compatible_bind_flags(sbuf->bind_flags, tobind_flags))
1019 sbuf->bind_flags = sbuf->bind_flags | tobind_flags;
1020 else
1021 sbuf->bind_flags = tobind_flags;
1022
1023 assert((sbuf->bind_flags & tobind_flags) == tobind_flags);
1024
1025 /* This call will set sbuf->handle */
1026 if (svga_have_gb_objects(svga)) {
1027 ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
1028 } else {
1029 ret = svga_buffer_create_host_surface(ss, sbuf, sbuf->bind_flags);
1030 }
1031 if (ret != PIPE_OK)
1032 return NULL;
1033 }
1034
1035 assert(sbuf->handle);
1036 if (svga->swc->force_coherent || sbuf->key.coherent)
1037 return sbuf->handle;
1038
1039 if (sbuf->map.num_ranges) {
1040 if (!sbuf->dma.pending) {
1041 /* No pending DMA/update commands yet. */
1042
1043 /* Migrate the data from swbuf -> hwbuf if necessary */
1044 ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
1045 if (ret == PIPE_OK) {
1046 /* Emit DMA or UpdateGBImage commands */
1047 SVGA_RETRY_OOM(svga, ret, svga_buffer_upload_command(svga, sbuf));
1048 if (ret == PIPE_OK) {
1049 sbuf->dma.pending = TRUE;
1050 assert(!sbuf->head.prev && !sbuf->head.next);
1051 list_addtail(&sbuf->head, &svga->dirty_buffers);
1052 }
1053 }
1054 else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
1055 /*
1056 * The buffer is too big to fit in the GMR aperture, so break it in
1057 * smaller pieces.
1058 */
1059 ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
1060 }
1061
1062 if (ret != PIPE_OK) {
1063 /*
1064 * Something unexpected happened above. There is very little that
1065 * we can do other than proceeding while ignoring the dirty ranges.
1066 */
1067 assert(0);
1068 sbuf->map.num_ranges = 0;
1069 }
1070 }
1071 else {
1072 /*
1073 * There a pending dma already. Make sure it is from this context.
1074 */
1075 assert(sbuf->dma.svga == svga);
1076 }
1077 }
1078
1079 assert(sbuf->map.num_ranges == 0 || sbuf->dma.pending);
1080
1081 return sbuf->handle;
1082 }
1083
1084
1085 void
svga_context_flush_buffers(struct svga_context * svga)1086 svga_context_flush_buffers(struct svga_context *svga)
1087 {
1088 struct list_head *curr, *next;
1089
1090 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERSFLUSH);
1091
1092 curr = svga->dirty_buffers.next;
1093 next = curr->next;
1094 while (curr != &svga->dirty_buffers) {
1095 struct svga_buffer *sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
1096
1097 assert(p_atomic_read(&sbuf->b.reference.count) != 0);
1098 assert(sbuf->dma.pending);
1099
1100 svga_buffer_upload_flush(svga, sbuf);
1101
1102 curr = next;
1103 next = curr->next;
1104 }
1105
1106 SVGA_STATS_TIME_POP(svga_sws(svga));
1107 }
1108