• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2008-2024 Broadcom. All Rights Reserved.
3  * The term “Broadcom” refers to Broadcom Inc.
4  * and/or its subsidiaries.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "svga3d_reg.h"
9 #include "vmw_surf_defs.h"
10 
11 #include "include/svga3d_surfacedefs.h"
12 #include "pipe/p_state.h"
13 #include "pipe/p_defines.h"
14 #include "util/u_thread.h"
15 #include "util/format/u_format.h"
16 #include "util/u_inlines.h"
17 #include "util/u_math.h"
18 #include "util/u_memory.h"
19 #include "util/u_resource.h"
20 #include "util/u_upload_mgr.h"
21 
22 #include "svga_cmd.h"
23 #include "svga_format.h"
24 #include "svga_screen.h"
25 #include "svga_context.h"
26 #include "svga_resource_texture.h"
27 #include "svga_resource_buffer.h"
28 #include "svga_sampler_view.h"
29 #include "svga_surface.h"
30 #include "svga_winsys.h"
31 #include "svga_debug.h"
32 
33 
34 static void
svga_transfer_dma_band(struct svga_context * svga,struct svga_transfer * st,SVGA3dTransferType transfer,unsigned x,unsigned y,unsigned z,unsigned w,unsigned h,unsigned d,unsigned srcx,unsigned srcy,unsigned srcz,SVGA3dSurfaceDMAFlags flags)35 svga_transfer_dma_band(struct svga_context *svga,
36                        struct svga_transfer *st,
37                        SVGA3dTransferType transfer,
38                        unsigned x, unsigned y, unsigned z,
39                        unsigned w, unsigned h, unsigned d,
40                        unsigned srcx, unsigned srcy, unsigned srcz,
41                        SVGA3dSurfaceDMAFlags flags)
42 {
43    struct svga_texture *texture = svga_texture(st->base.resource);
44    SVGA3dCopyBox box;
45 
46    assert(!st->use_direct_map);
47 
48    box.x = x;
49    box.y = y;
50    box.z = z;
51    box.w = w;
52    box.h = h;
53    box.d = d;
54    box.srcx = srcx;
55    box.srcy = srcy;
56    box.srcz = srcz;
57 
58    SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
59             "(%u, %u, %u), %ubpp\n",
60             transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
61             texture->handle,
62             st->slice,
63             x,
64             y,
65             z,
66             x + w,
67             y + h,
68             z + 1,
69             util_format_get_blocksize(texture->b.format) * 8 /
70             (util_format_get_blockwidth(texture->b.format)
71              * util_format_get_blockheight(texture->b.format)));
72 
73    SVGA_RETRY(svga, SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags));
74 }
75 
76 
77 static void
svga_transfer_dma(struct svga_context * svga,struct svga_transfer * st,SVGA3dTransferType transfer,SVGA3dSurfaceDMAFlags flags)78 svga_transfer_dma(struct svga_context *svga,
79                   struct svga_transfer *st,
80                   SVGA3dTransferType transfer,
81                   SVGA3dSurfaceDMAFlags flags)
82 {
83    struct svga_texture *texture = svga_texture(st->base.resource);
84    struct svga_screen *screen = svga_screen(texture->b.screen);
85    struct svga_winsys_screen *sws = screen->sws;
86    struct pipe_fence_handle *fence = NULL;
87 
88    assert(!st->use_direct_map);
89 
90    if (transfer == SVGA3D_READ_HOST_VRAM) {
91       SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __func__);
92    }
93 
94    /* Ensure any pending operations on host surfaces are queued on the command
95     * buffer first.
96     */
97    svga_surfaces_flush(svga);
98 
99    if (!st->swbuf) {
100       /* Do the DMA transfer in a single go */
101       svga_transfer_dma_band(svga, st, transfer,
102                              st->box.x, st->box.y, st->box.z,
103                              st->box.w, st->box.h, st->box.d,
104                              0, 0, 0,
105                              flags);
106 
107       if (transfer == SVGA3D_READ_HOST_VRAM) {
108          svga_context_flush(svga, &fence);
109          sws->fence_finish(sws, fence, OS_TIMEOUT_INFINITE, 0);
110          sws->fence_reference(sws, &fence, NULL);
111       }
112    }
113    else {
114       int y, h, srcy;
115       unsigned blockheight =
116          util_format_get_blockheight(st->base.resource->format);
117 
118       h = st->hw_nblocksy * blockheight;
119       srcy = 0;
120 
121       for (y = 0; y < st->box.h; y += h) {
122          unsigned offset, length;
123          void *hw, *sw;
124 
125          if (y + h > st->box.h)
126             h = st->box.h - y;
127 
128          /* Transfer band must be aligned to pixel block boundaries */
129          assert(y % blockheight == 0);
130          assert(h % blockheight == 0);
131 
132          offset = y * st->base.stride / blockheight;
133          length = h * st->base.stride / blockheight;
134 
135          sw = (uint8_t *) st->swbuf + offset;
136 
137          if (transfer == SVGA3D_WRITE_HOST_VRAM) {
138             unsigned usage = PIPE_MAP_WRITE;
139 
140             /* Wait for the previous DMAs to complete */
141             /* TODO: keep one DMA (at half the size) in the background */
142             if (y) {
143                svga_context_flush(svga, NULL);
144                usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
145             }
146 
147             hw = sws->buffer_map(sws, st->hwbuf, usage);
148             assert(hw);
149             if (hw) {
150                memcpy(hw, sw, length);
151                sws->buffer_unmap(sws, st->hwbuf);
152             }
153          }
154 
155          svga_transfer_dma_band(svga, st, transfer,
156                                 st->box.x, y, st->box.z,
157                                 st->box.w, h, st->box.d,
158                                 0, srcy, 0, flags);
159 
160          /*
161           * Prevent the texture contents to be discarded on the next band
162           * upload.
163           */
164          flags.discard = false;
165 
166          if (transfer == SVGA3D_READ_HOST_VRAM) {
167             svga_context_flush(svga, &fence);
168             sws->fence_finish(sws, fence, OS_TIMEOUT_INFINITE, 0);
169 
170             hw = sws->buffer_map(sws, st->hwbuf, PIPE_MAP_READ);
171             assert(hw);
172             if (hw) {
173                memcpy(sw, hw, length);
174                sws->buffer_unmap(sws, st->hwbuf);
175             }
176          }
177       }
178    }
179 }
180 
181 
182 
183 bool
svga_resource_get_handle(struct pipe_screen * screen,struct pipe_context * context,struct pipe_resource * texture,struct winsys_handle * whandle,unsigned usage)184 svga_resource_get_handle(struct pipe_screen *screen,
185                          struct pipe_context *context,
186                          struct pipe_resource *texture,
187                          struct winsys_handle *whandle,
188                          unsigned usage)
189 {
190    struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
191    unsigned stride;
192 
193    if (texture->target == PIPE_BUFFER)
194       return false;
195 
196    SVGA_DBG(DEBUG_DMA, "%s: texture=%p cachable=%d\n", __FUNCTION__,
197             texture, svga_texture(texture)->key.cachable);
198 
199    svga_texture(texture)->key.cachable = 0;
200 
201    stride = util_format_get_nblocksx(texture->format, texture->width0) *
202             util_format_get_blocksize(texture->format);
203 
204    return sws->surface_get_handle(sws, svga_texture(texture)->handle,
205                                   stride, whandle);
206 }
207 
208 
209 /**
210  * Determine if we need to read back a texture image before mapping it.
211  */
212 static inline bool
need_tex_readback(struct svga_transfer * st)213 need_tex_readback(struct svga_transfer *st)
214 {
215    if (st->base.usage & PIPE_MAP_READ)
216       return true;
217 
218    if ((st->base.usage & PIPE_MAP_WRITE) &&
219        ((st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) == 0)) {
220       return svga_was_texture_rendered_to(svga_texture(st->base.resource));
221    }
222 
223    return false;
224 }
225 
226 
227 static void
readback_texture_surface(struct svga_context * svga,struct svga_texture * tex,struct svga_winsys_surface * surf)228 readback_texture_surface(struct svga_context *svga,
229                          struct svga_texture *tex,
230                          struct svga_winsys_surface *surf)
231 {
232    SVGA_RETRY(svga, SVGA3D_ReadbackGBSurface(svga->swc, surf));
233 
234    /* Mark the texture surface as UPDATED */
235    tex->surface_state = SVGA_SURFACE_STATE_UPDATED;
236 
237    svga->hud.num_readbacks++;
238    SVGA_STATS_COUNT_INC(svga_sws(svga), SVGA_STATS_COUNT_TEXREADBACK);
239 }
240 
241 /**
242  * Use DMA for the transfer request
243  */
244 static void *
svga_texture_transfer_map_dma(struct svga_context * svga,struct svga_transfer * st)245 svga_texture_transfer_map_dma(struct svga_context *svga,
246                               struct svga_transfer *st)
247 {
248    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
249    struct pipe_resource *texture = st->base.resource;
250    unsigned nblocksx, nblocksy;
251    unsigned d;
252    unsigned usage = st->base.usage;
253 
254    /* we'll put the data into a tightly packed buffer */
255    nblocksx = util_format_get_nblocksx(texture->format, st->box.w);
256    nblocksy = util_format_get_nblocksy(texture->format, st->box.h);
257    d = st->box.d;
258 
259    st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
260    st->base.layer_stride = st->base.stride * nblocksy;
261    st->hw_nblocksy = nblocksy;
262 
263    st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
264                                          st->hw_nblocksy * st->base.stride * d);
265 
266    while (!st->hwbuf && (st->hw_nblocksy /= 2)) {
267       st->hwbuf =
268          svga_winsys_buffer_create(svga, 1, 0,
269                                    st->hw_nblocksy * st->base.stride * d);
270    }
271 
272    if (!st->hwbuf)
273       return NULL;
274 
275    if (st->hw_nblocksy < nblocksy) {
276       /* We couldn't allocate a hardware buffer big enough for the transfer,
277        * so allocate regular malloc memory instead
278        */
279       if (0) {
280          debug_printf("%s: failed to allocate %u KB of DMA, "
281                       "splitting into %u x %u KB DMA transfers\n",
282                       __func__,
283                       (nblocksy * st->base.stride + 1023) / 1024,
284                       (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
285                       (st->hw_nblocksy * st->base.stride + 1023) / 1024);
286       }
287 
288       st->swbuf = MALLOC(nblocksy * st->base.stride * d);
289       if (!st->swbuf) {
290          sws->buffer_destroy(sws, st->hwbuf);
291          return NULL;
292       }
293    }
294 
295    if (usage & PIPE_MAP_READ) {
296       SVGA3dSurfaceDMAFlags flags;
297       memset(&flags, 0, sizeof flags);
298       svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
299    }
300 
301    if (st->swbuf) {
302       return st->swbuf;
303    }
304    else {
305       return sws->buffer_map(sws, st->hwbuf, usage);
306    }
307 }
308 
309 
310 /**
311  * Use direct map for the transfer request
312  */
313 static void *
svga_texture_transfer_map_direct(struct svga_context * svga,struct svga_transfer * st)314 svga_texture_transfer_map_direct(struct svga_context *svga,
315                                  struct svga_transfer *st)
316 {
317    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
318    struct pipe_transfer *transfer = &st->base;
319    struct pipe_resource *texture = transfer->resource;
320    struct svga_texture *tex = svga_texture(texture);
321    struct svga_winsys_surface *surf = tex->handle;
322    unsigned level = st->base.level;
323    unsigned w, h, nblocksx, nblocksy;
324    unsigned usage = st->base.usage;
325 
326    if (need_tex_readback(st)) {
327       svga_surfaces_flush(svga);
328 
329       if (!svga->swc->force_coherent || tex->imported) {
330          /* Readback the whole surface */
331          readback_texture_surface(svga, tex, surf);
332 
333          svga_context_finish(svga);
334       }
335       /*
336        * Note: if PIPE_MAP_DISCARD_WHOLE_RESOURCE were specified
337        * we could potentially clear the flag for all faces/layers/mips.
338        */
339       svga_clear_texture_rendered_to(tex);
340    }
341    else {
342       assert(usage & PIPE_MAP_WRITE);
343       if ((usage & PIPE_MAP_UNSYNCHRONIZED) == 0) {
344          if (svga_is_texture_level_dirty(tex, st->slice, level)) {
345             /*
346              * do a surface flush if the subresource has been modified
347              * in this command buffer.
348              */
349             svga_surfaces_flush(svga);
350             if (!sws->surface_is_flushed(sws, surf)) {
351                svga->hud.surface_write_flushes++;
352                SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
353                svga_context_flush(svga, NULL);
354             }
355          }
356       }
357    }
358 
359    /* we'll directly access the guest-backed surface */
360    w = u_minify(texture->width0, level);
361    h = u_minify(texture->height0, level);
362    nblocksx = util_format_get_nblocksx(texture->format, w);
363    nblocksy = util_format_get_nblocksy(texture->format, h);
364    st->hw_nblocksy = nblocksy;
365    st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
366    st->base.layer_stride = st->base.stride * nblocksy;
367 
368    /*
369     * Begin mapping code
370     */
371    {
372       SVGA3dSize baseLevelSize;
373       uint8_t *map;
374       bool retry, rebind;
375       unsigned offset, mip_width, mip_height;
376       struct svga_winsys_context *swc = svga->swc;
377 
378       if (swc->force_coherent) {
379          usage |= PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT;
380       }
381 
382       map = SVGA_TRY_MAP(svga->swc->surface_map
383                          (svga->swc, surf, usage, &retry, &rebind), retry);
384 
385       if (map == NULL && retry) {
386          /*
387           * At this point, the svga_surfaces_flush() should already have
388           * called in svga_texture_get_transfer().
389           */
390          svga->hud.surface_write_flushes++;
391          svga_retry_enter(svga);
392          svga_context_flush(svga, NULL);
393          map = svga->swc->surface_map(svga->swc, surf, usage, &retry, &rebind);
394          svga_retry_exit(svga);
395       }
396       if (map && rebind) {
397          enum pipe_error ret;
398 
399          ret = SVGA3D_BindGBSurface(swc, surf);
400          if (ret != PIPE_OK) {
401             svga_context_flush(svga, NULL);
402             ret = SVGA3D_BindGBSurface(swc, surf);
403             assert(ret == PIPE_OK);
404          }
405          svga_context_flush(svga, NULL);
406       }
407 
408       /*
409        * Make sure we return NULL if the map fails
410        */
411       if (!map) {
412          return NULL;
413       }
414 
415       /**
416        * Compute the offset to the specific texture slice in the buffer.
417        */
418       baseLevelSize.width = tex->b.width0;
419       baseLevelSize.height = tex->b.height0;
420       baseLevelSize.depth = tex->b.depth0;
421 
422       if ((tex->b.target == PIPE_TEXTURE_1D_ARRAY) ||
423           (tex->b.target == PIPE_TEXTURE_2D_ARRAY) ||
424           (tex->b.target == PIPE_TEXTURE_CUBE_ARRAY)) {
425          st->base.layer_stride =
426             vmw_surf_get_image_offset(tex->key.format, baseLevelSize,
427                                       tex->b.last_level + 1, 1, 0);
428       }
429 
430       offset = vmw_surf_get_image_offset(tex->key.format, baseLevelSize,
431                                          tex->b.last_level + 1, /* numMips */
432                                          st->slice, level);
433       if (level > 0) {
434          assert(offset > 0);
435       }
436 
437       mip_width = u_minify(tex->b.width0, level);
438       mip_height = u_minify(tex->b.height0, level);
439 
440       offset += vmw_surf_get_pixel_offset(tex->key.format,
441                                           mip_width, mip_height,
442                                           st->box.x,
443                                           st->box.y,
444                                           st->box.z);
445 
446       return (void *) (map + offset);
447    }
448 }
449 
450 
451 /**
452  * Request a transfer map to the texture resource
453  */
454 void *
svga_texture_transfer_map(struct pipe_context * pipe,struct pipe_resource * texture,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)455 svga_texture_transfer_map(struct pipe_context *pipe,
456                           struct pipe_resource *texture,
457                           unsigned level,
458                           unsigned usage,
459                           const struct pipe_box *box,
460                           struct pipe_transfer **ptransfer)
461 {
462    struct svga_context *svga = svga_context(pipe);
463    struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
464    struct svga_texture *tex = svga_texture(texture);
465    struct svga_transfer *st;
466    struct svga_winsys_surface *surf = tex->handle;
467    bool use_direct_map = svga_have_gb_objects(svga) &&
468                          (!svga_have_gb_dma(svga) || (usage & PIPE_MAP_WRITE));
469    void *map = NULL;
470    int64_t begin = svga_get_time(svga);
471 
472    SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
473 
474    if (!surf)
475       goto done;
476 
477    /* We can't map texture storage directly unless we have GB objects */
478    if (usage & PIPE_MAP_DIRECTLY) {
479       if (svga_have_gb_objects(svga))
480          use_direct_map = true;
481       else
482          goto done;
483    }
484 
485    st = CALLOC_STRUCT(svga_transfer);
486    if (!st)
487       goto done;
488 
489    st->base.level = level;
490    st->base.usage = usage;
491    st->base.box = *box;
492 
493    /* The modified transfer map box with the array index removed from z.
494     * The array index is specified in slice.
495     */
496    st->box.x = box->x;
497    st->box.y = box->y;
498    st->box.z = box->z;
499    st->box.w = box->width;
500    st->box.h = box->height;
501    st->box.d = box->depth;
502 
503    switch (tex->b.target) {
504    case PIPE_TEXTURE_CUBE:
505       st->slice = st->base.box.z;
506       st->box.z = 0;   /* so we don't apply double offsets below */
507       break;
508    case PIPE_TEXTURE_1D_ARRAY:
509    case PIPE_TEXTURE_2D_ARRAY:
510    case PIPE_TEXTURE_CUBE_ARRAY:
511       st->slice = st->base.box.z;
512       st->box.z = 0;   /* so we don't apply double offsets below */
513 
514       /* Force direct map for transfering multiple slices */
515       if (st->base.box.depth > 1)
516          use_direct_map = svga_have_gb_objects(svga);
517 
518       break;
519    default:
520       st->slice = 0;
521       break;
522    }
523 
524    /* We never want to use DMA transfers on systems with GBObjects because
525     * it causes serialization issues and in SVGAv3 vram is gone which
526     * makes it impossible to support both at the same time.
527     */
528    if (svga_have_gb_objects(svga)) {
529       use_direct_map = true;
530    }
531 
532    st->use_direct_map = use_direct_map;
533    pipe_resource_reference(&st->base.resource, texture);
534 
535    /* If this is the first time mapping to the surface in this
536     * command buffer and there is no pending primitives, clear
537     * the dirty masks of this surface.
538     */
539    if (sws->surface_is_flushed(sws, surf) &&
540        (svga_have_vgpu10(svga) ||
541         !svga_hwtnl_has_pending_prim(svga->hwtnl))) {
542       svga_clear_texture_dirty(tex);
543    }
544 
545    if (!use_direct_map) {
546       /* upload to the DMA buffer */
547       map = svga_texture_transfer_map_dma(svga, st);
548    }
549    else {
550       bool can_use_upload = tex->can_use_upload &&
551                             !(st->base.usage & PIPE_MAP_READ);
552       bool was_rendered_to =
553          svga_was_texture_rendered_to(svga_texture(texture));
554       bool is_dirty = svga_is_texture_dirty(svga_texture(texture));
555 
556       /* If the texture was already rendered to or has pending changes and
557        * upload buffer is supported, then we will use upload buffer to
558        * avoid the need to read back the texture content; otherwise,
559        * we'll first try to map directly to the GB surface, if it is blocked,
560        * then we'll try the upload buffer.
561        */
562       if ((was_rendered_to || is_dirty) && can_use_upload) {
563          map = svga_texture_transfer_map_upload(svga, st);
564       }
565       else {
566          unsigned orig_usage = st->base.usage;
567 
568          /* First try directly map to the GB surface */
569          if (can_use_upload)
570             st->base.usage |= PIPE_MAP_DONTBLOCK;
571          map = svga_texture_transfer_map_direct(svga, st);
572          st->base.usage = orig_usage;
573 
574          if (!map && can_use_upload) {
575             /* if direct map with DONTBLOCK fails, then try upload to the
576              * texture upload buffer.
577              */
578             map = svga_texture_transfer_map_upload(svga, st);
579          }
580       }
581 
582       /* If upload fails, then try direct map again without forcing it
583        * to DONTBLOCK.
584        */
585       if (!map) {
586          map = svga_texture_transfer_map_direct(svga, st);
587       }
588    }
589 
590    if (!map) {
591       FREE(st);
592    }
593    else {
594       *ptransfer = &st->base;
595       svga->hud.num_textures_mapped++;
596       if (usage & PIPE_MAP_WRITE) {
597          /* record texture upload for HUD */
598          svga->hud.num_bytes_uploaded +=
599             st->base.layer_stride * st->box.d;
600 
601          /* mark this texture level as dirty */
602          svga_set_texture_dirty(tex, st->slice, level);
603       }
604    }
605 
606 done:
607    svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
608    SVGA_STATS_TIME_POP(sws);
609    (void) sws;
610 
611    return map;
612 }
613 
614 /**
615  * Unmap a GB texture surface.
616  */
617 static void
svga_texture_surface_unmap(struct svga_context * svga,struct pipe_transfer * transfer)618 svga_texture_surface_unmap(struct svga_context *svga,
619                            struct pipe_transfer *transfer)
620 {
621    struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
622    struct svga_winsys_context *swc = svga->swc;
623    bool rebind;
624 
625    assert(surf);
626 
627    swc->surface_unmap(swc, surf, &rebind);
628    if (rebind) {
629       SVGA_RETRY(svga, SVGA3D_BindGBSurface(swc, surf));
630    }
631 }
632 
633 
634 static void
update_image_vgpu9(struct svga_context * svga,struct svga_winsys_surface * surf,const SVGA3dBox * box,unsigned slice,unsigned level)635 update_image_vgpu9(struct svga_context *svga,
636                    struct svga_winsys_surface *surf,
637                    const SVGA3dBox *box,
638                    unsigned slice,
639                    unsigned level)
640 {
641    SVGA_RETRY(svga, SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level));
642 }
643 
644 
645 static void
update_image_vgpu10(struct svga_context * svga,struct svga_winsys_surface * surf,const SVGA3dBox * box,unsigned slice,unsigned level,unsigned numMipLevels)646 update_image_vgpu10(struct svga_context *svga,
647                     struct svga_winsys_surface *surf,
648                     const SVGA3dBox *box,
649                     unsigned slice,
650                     unsigned level,
651                     unsigned numMipLevels)
652 {
653    unsigned subResource;
654 
655    subResource = slice * numMipLevels + level;
656 
657    SVGA_RETRY(svga, SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box,
658                                                     subResource));
659 }
660 
661 
662 /**
663  * unmap DMA transfer request
664  */
665 static void
svga_texture_transfer_unmap_dma(struct svga_context * svga,struct svga_transfer * st)666 svga_texture_transfer_unmap_dma(struct svga_context *svga,
667                                 struct svga_transfer *st)
668 {
669    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
670 
671    if (!st->swbuf)
672       sws->buffer_unmap(sws, st->hwbuf);
673 
674    if (st->base.usage & PIPE_MAP_WRITE) {
675       /* Use DMA to transfer texture data */
676       SVGA3dSurfaceDMAFlags flags;
677       struct pipe_resource *texture = st->base.resource;
678       struct svga_texture *tex = svga_texture(texture);
679 
680 
681       memset(&flags, 0, sizeof flags);
682       if (st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
683          flags.discard = true;
684       }
685       if (st->base.usage & PIPE_MAP_UNSYNCHRONIZED) {
686          flags.unsynchronized = true;
687       }
688 
689       svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
690       svga_set_texture_rendered_to(tex);
691    }
692 
693    FREE(st->swbuf);
694    sws->buffer_destroy(sws, st->hwbuf);
695 }
696 
697 
698 /**
699  * unmap direct map transfer request
700  */
701 static void
svga_texture_transfer_unmap_direct(struct svga_context * svga,struct svga_transfer * st)702 svga_texture_transfer_unmap_direct(struct svga_context *svga,
703                                    struct svga_transfer *st)
704 {
705    struct pipe_transfer *transfer = &st->base;
706    struct svga_texture *tex = svga_texture(transfer->resource);
707 
708    svga_texture_surface_unmap(svga, transfer);
709 
710    /* Now send an update command to update the content in the backend. */
711    if (st->base.usage & PIPE_MAP_WRITE) {
712       struct svga_winsys_surface *surf = tex->handle;
713 
714       assert(svga_have_gb_objects(svga));
715 
716       /* update the effected region */
717       SVGA3dBox box = st->box;
718       unsigned nlayers;
719 
720       switch (tex->b.target) {
721       case PIPE_TEXTURE_2D_ARRAY:
722       case PIPE_TEXTURE_CUBE_ARRAY:
723       case PIPE_TEXTURE_1D_ARRAY:
724          nlayers = box.d;
725          box.d = 1;
726          break;
727       default:
728          nlayers = 1;
729          break;
730       }
731 
732 
733       if (0)
734          debug_printf("%s %d, %d, %d  %d x %d x %d\n",
735                       __func__,
736                       box.x, box.y, box.z,
737                       box.w, box.h, box.d);
738 
739       if (!svga->swc->force_coherent || tex->imported) {
740          if (svga_have_vgpu10(svga)) {
741             unsigned i;
742 
743             for (i = 0; i < nlayers; i++) {
744                update_image_vgpu10(svga, surf, &box,
745                                    st->slice + i, transfer->level,
746                                    tex->b.last_level + 1);
747             }
748          } else {
749             assert(nlayers == 1);
750             update_image_vgpu9(svga, surf, &box, st->slice,
751                                transfer->level);
752          }
753       }
754 
755       /* Mark the texture surface state as UPDATED */
756       tex->surface_state = SVGA_SURFACE_STATE_UPDATED;
757    }
758 }
759 
760 
761 void
svga_texture_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)762 svga_texture_transfer_unmap(struct pipe_context *pipe,
763                             struct pipe_transfer *transfer)
764 {
765    struct svga_context *svga = svga_context(pipe);
766    struct svga_screen *ss = svga_screen(pipe->screen);
767    struct svga_winsys_screen *sws = ss->sws;
768    struct svga_transfer *st = svga_transfer(transfer);
769    struct svga_texture *tex = svga_texture(transfer->resource);
770 
771    SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
772 
773    if (!st->use_direct_map) {
774       svga_texture_transfer_unmap_dma(svga, st);
775    }
776    else if (st->upload.buf) {
777       svga_texture_transfer_unmap_upload(svga, st);
778    }
779    else {
780       svga_texture_transfer_unmap_direct(svga, st);
781    }
782 
783    if (st->base.usage & PIPE_MAP_WRITE) {
784       svga->hud.num_resource_updates++;
785 
786       /* Mark the texture level as dirty */
787       ss->texture_timestamp++;
788       svga_age_texture_view(tex, transfer->level);
789       if (transfer->resource->target == PIPE_TEXTURE_CUBE)
790          svga_define_texture_level(tex, st->slice, transfer->level);
791       else
792          svga_define_texture_level(tex, 0, transfer->level);
793    }
794 
795    pipe_resource_reference(&st->base.resource, NULL);
796    FREE(st);
797    SVGA_STATS_TIME_POP(sws);
798    (void) sws;
799 }
800 
801 
802 /**
803  * Does format store depth values?
804  */
805 static inline bool
format_has_depth(enum pipe_format format)806 format_has_depth(enum pipe_format format)
807 {
808    const struct util_format_description *desc = util_format_description(format);
809    return util_format_has_depth(desc);
810 }
811 
812 struct pipe_resource *
svga_texture_create(struct pipe_screen * screen,const struct pipe_resource * template)813 svga_texture_create(struct pipe_screen *screen,
814                     const struct pipe_resource *template)
815 {
816    struct svga_screen *svgascreen = svga_screen(screen);
817    struct svga_texture *tex;
818    unsigned bindings = template->bind;
819 
820    SVGA_STATS_TIME_PUSH(svgascreen->sws,
821                         SVGA_STATS_TIME_CREATETEXTURE);
822 
823    assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
824    if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
825       goto fail_notex;
826    }
827 
828    /* Verify the number of mipmap levels isn't impossibly large.  For example,
829     * if the base 2D image is 16x16, we can't have 8 mipmap levels.
830     * the gallium frontend should never ask us to create a resource with invalid
831     * parameters.
832     */
833    {
834       unsigned max_dim = template->width0;
835 
836       switch (template->target) {
837       case PIPE_TEXTURE_1D:
838       case PIPE_TEXTURE_1D_ARRAY:
839          // nothing
840          break;
841       case PIPE_TEXTURE_2D:
842       case PIPE_TEXTURE_CUBE:
843       case PIPE_TEXTURE_CUBE_ARRAY:
844       case PIPE_TEXTURE_2D_ARRAY:
845          max_dim = MAX2(max_dim, template->height0);
846          break;
847       case PIPE_TEXTURE_3D:
848          max_dim = MAX3(max_dim, template->height0, template->depth0);
849          break;
850       case PIPE_TEXTURE_RECT:
851       case PIPE_BUFFER:
852          assert(template->last_level == 0);
853          /* the assertion below should always pass */
854          break;
855       default:
856          debug_printf("Unexpected texture target type\n");
857       }
858       assert(1 << template->last_level <= max_dim);
859    }
860 
861    tex = CALLOC_STRUCT(svga_texture);
862    if (!tex) {
863       goto fail_notex;
864    }
865 
866    tex->defined = CALLOC(template->depth0 * template->array_size,
867                          sizeof(tex->defined[0]));
868    if (!tex->defined) {
869       FREE(tex);
870       goto fail_notex;
871    }
872 
873    tex->dirty = CALLOC(template->depth0 * template->array_size,
874                              sizeof(tex->dirty[0]));
875    if (!tex->dirty) {
876       goto fail;
877    }
878 
879    tex->b = *template;
880    pipe_reference_init(&tex->b.reference, 1);
881    tex->b.screen = screen;
882 
883    tex->key.flags = 0;
884    tex->key.size.width = template->width0;
885    tex->key.size.height = template->height0;
886    tex->key.size.depth = template->depth0;
887    tex->key.arraySize = 1;
888    tex->key.numFaces = 1;
889 
890    /* nr_samples=1 must be treated as a non-multisample texture */
891    if (tex->b.nr_samples == 1) {
892       tex->b.nr_samples = 0;
893    }
894    else if (tex->b.nr_samples > 1) {
895       assert(svgascreen->sws->have_sm4_1);
896       tex->key.flags |= SVGA3D_SURFACE_MULTISAMPLE;
897    }
898 
899    tex->key.sampleCount = tex->b.nr_samples;
900 
901    if (svgascreen->sws->have_vgpu10) {
902       switch (template->target) {
903       case PIPE_TEXTURE_1D:
904          tex->key.flags |= SVGA3D_SURFACE_1D;
905          break;
906       case PIPE_TEXTURE_1D_ARRAY:
907          tex->key.flags |= SVGA3D_SURFACE_1D;
908          FALLTHROUGH;
909       case PIPE_TEXTURE_2D_ARRAY:
910          tex->key.flags |= SVGA3D_SURFACE_ARRAY;
911          tex->key.arraySize = template->array_size;
912          break;
913       case PIPE_TEXTURE_3D:
914          tex->key.flags |= SVGA3D_SURFACE_VOLUME;
915          break;
916       case PIPE_TEXTURE_CUBE:
917          tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
918          tex->key.numFaces = 6;
919          break;
920       case PIPE_TEXTURE_CUBE_ARRAY:
921          assert(svgascreen->sws->have_sm4_1);
922          tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
923          tex->key.numFaces = 1;  // arraySize already includes the 6 faces
924          tex->key.arraySize = template->array_size;
925          break;
926       default:
927          break;
928       }
929    }
930    else {
931       switch (template->target) {
932       case PIPE_TEXTURE_3D:
933          tex->key.flags |= SVGA3D_SURFACE_VOLUME;
934          break;
935       case PIPE_TEXTURE_CUBE:
936          tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
937          tex->key.numFaces = 6;
938          break;
939       default:
940          break;
941       }
942    }
943 
944    tex->key.cachable = 1;
945 
946    if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
947        !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
948       /* Also check if the format can be sampled from */
949       if (screen->is_format_supported(screen, template->format,
950                                       template->target,
951                                       template->nr_samples,
952                                       template->nr_storage_samples,
953                                       PIPE_BIND_SAMPLER_VIEW)) {
954          bindings |= PIPE_BIND_SAMPLER_VIEW;
955       }
956    }
957 
958    if (bindings & PIPE_BIND_SAMPLER_VIEW) {
959       tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
960       tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
961 
962       if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
963          /* Also check if the format is color renderable */
964          if (screen->is_format_supported(screen, template->format,
965                                          template->target,
966                                          template->nr_samples,
967                                          template->nr_storage_samples,
968                                          PIPE_BIND_RENDER_TARGET)) {
969             bindings |= PIPE_BIND_RENDER_TARGET;
970          }
971       }
972 
973       if (!(bindings & PIPE_BIND_DEPTH_STENCIL)) {
974          /* Also check if the format is depth/stencil renderable */
975          if (screen->is_format_supported(screen, template->format,
976                                          template->target,
977                                          template->nr_samples,
978                                          template->nr_storage_samples,
979                                          PIPE_BIND_DEPTH_STENCIL)) {
980             bindings |= PIPE_BIND_DEPTH_STENCIL;
981          }
982       }
983    }
984 
985    if (bindings & PIPE_BIND_DISPLAY_TARGET) {
986       tex->key.cachable = 0;
987    }
988 
989    if (bindings & PIPE_BIND_SHARED) {
990       tex->key.cachable = 0;
991    }
992 
993    if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
994       tex->key.scanout = 1;
995       tex->key.cachable = 0;
996    }
997 
998    /*
999     * Note: Previously we never passed the
1000     * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
1001     * know beforehand whether a texture will be used as a rendertarget or not
1002     * and it always requests PIPE_BIND_RENDER_TARGET, therefore
1003     * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
1004     *
1005     * However, this was changed since other gallium frontends
1006     * (XA for example) uses it accurately and certain device versions
1007     * relies on it in certain situations to render correctly.
1008     */
1009    if ((bindings & PIPE_BIND_RENDER_TARGET) &&
1010        !util_format_is_s3tc(template->format)) {
1011       tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1012       tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1013    }
1014 
1015    if (bindings & PIPE_BIND_DEPTH_STENCIL) {
1016       tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1017       tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1018    }
1019 
1020    tex->key.numMipLevels = template->last_level + 1;
1021 
1022    tex->key.format = svga_translate_format(svgascreen, template->format,
1023                                            bindings);
1024    if (tex->key.format == SVGA3D_FORMAT_INVALID) {
1025       goto fail;
1026    }
1027 
1028    bool use_typeless = false;
1029    if (svgascreen->sws->have_gl43) {
1030       /* Do not use typeless for SHARED, SCANOUT or DISPLAY_TARGET surfaces. */
1031       use_typeless = !(bindings & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT |
1032                                    PIPE_BIND_DISPLAY_TARGET));
1033    } else if (svgascreen->sws->have_vgpu10) {
1034       /* For VGPU10 device, use typeless formats only for sRGB and depth resources
1035        * if they do not have SHARED, SCANOUT or DISPLAY_TARGET bind flags
1036        */
1037       use_typeless = (util_format_is_srgb(template->format) ||
1038                       format_has_depth(template->format)) &&
1039                      !(bindings & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT |
1040                                    PIPE_BIND_DISPLAY_TARGET));
1041    }
1042 
1043    if (use_typeless) {
1044       SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
1045       if (0) {
1046          debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
1047                       svga_format_name(tex->key.format),
1048                       svga_format_name(typeless),
1049                       bindings);
1050       }
1051 
1052       if (svga_format_is_uncompressed_snorm(tex->key.format)) {
1053          /* We can't normally render to snorm surfaces, but once we
1054           * substitute a typeless format, we can if the rendertarget view
1055           * is unorm.  This can happen with GL_ARB_copy_image.
1056           */
1057          tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1058          tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1059       }
1060 
1061       tex->key.format = typeless;
1062    }
1063 
1064    if (svgascreen->sws->have_sm5 &&
1065        bindings & (PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET)) {
1066        if (template->nr_samples < 2 &&
1067            screen->is_format_supported(screen, template->format,
1068                                        template->target,
1069                                        template->nr_samples,
1070                                        template->nr_storage_samples,
1071                                        PIPE_BIND_SHADER_IMAGE)) {
1072           /* Any non multi-samples texture that can be used as a render target
1073            * or sampler view can be bound to an image unit.
1074            * So make sure to set the UAV flag here.
1075            */
1076           tex->key.flags |= SVGA3D_SURFACE_BIND_UAVIEW;
1077        }
1078    }
1079 
1080    SVGA_DBG(DEBUG_DMA, "surface_create for texture\n");
1081    bool invalidated;
1082    tex->handle = svga_screen_surface_create(svgascreen, bindings,
1083                                             tex->b.usage,
1084                                             &invalidated, &tex->key);
1085    if (!tex->handle) {
1086       goto fail;
1087    }
1088    if (invalidated) {
1089       tex->surface_state = SVGA_SURFACE_STATE_INVALIDATED;
1090    } else {
1091       tex->surface_state = SVGA_SURFACE_STATE_CREATED;
1092    }
1093 
1094    SVGA_DBG(DEBUG_DMA, "  --> got sid %p (texture)\n", tex->handle);
1095 
1096    debug_reference(&tex->b.reference,
1097                    (debug_reference_descriptor)debug_describe_resource, 0);
1098 
1099    tex->size = util_resource_size(template);
1100 
1101    /* Determine if texture upload buffer can be used to upload this texture */
1102    tex->can_use_upload = svga_texture_transfer_map_can_upload(svgascreen,
1103                                                               &tex->b);
1104 
1105    /* Initialize the backing resource cache */
1106    tex->backed_handle = NULL;
1107 
1108    svgascreen->hud.total_resource_bytes += tex->size;
1109    svgascreen->hud.num_resources++;
1110 
1111    SVGA_STATS_TIME_POP(svgascreen->sws);
1112 
1113    return &tex->b;
1114 
1115 fail:
1116    if (tex->dirty)
1117       FREE(tex->dirty);
1118    if (tex->defined)
1119       FREE(tex->defined);
1120    FREE(tex);
1121 fail_notex:
1122    SVGA_STATS_TIME_POP(svgascreen->sws);
1123    return NULL;
1124 }
1125 
1126 
1127 struct pipe_resource *
svga_texture_from_handle(struct pipe_screen * screen,const struct pipe_resource * template,struct winsys_handle * whandle)1128 svga_texture_from_handle(struct pipe_screen *screen,
1129                          const struct pipe_resource *template,
1130                          struct winsys_handle *whandle)
1131 {
1132    struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1133    struct svga_screen *ss = svga_screen(screen);
1134    struct svga_winsys_surface *srf;
1135    struct svga_texture *tex;
1136    enum SVGA3dSurfaceFormat format = 0;
1137    assert(screen);
1138 
1139    /* Only supports one type */
1140    if ((template->target != PIPE_TEXTURE_2D &&
1141        template->target != PIPE_TEXTURE_RECT) ||
1142        template->last_level != 0 ||
1143        template->depth0 != 1) {
1144       return NULL;
1145    }
1146 
1147    srf = sws->surface_from_handle(sws, whandle, &format);
1148 
1149    if (!srf)
1150       return NULL;
1151 
1152    if (!svga_format_is_shareable(ss, template->format, format,
1153                                  template->bind, true))
1154       goto out_unref;
1155 
1156    tex = CALLOC_STRUCT(svga_texture);
1157    if (!tex)
1158       goto out_unref;
1159 
1160    tex->defined = CALLOC(template->depth0 * template->array_size,
1161                          sizeof(tex->defined[0]));
1162    if (!tex->defined)
1163       goto out_no_defined;
1164 
1165    tex->b = *template;
1166    pipe_reference_init(&tex->b.reference, 1);
1167    tex->b.screen = screen;
1168 
1169    SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1170 
1171    tex->key.cachable = 0;
1172    tex->key.format = format;
1173    tex->handle = srf;
1174 
1175 
1176    /* set bind flags for the imported texture handle according to the bind
1177     * flags in the template
1178     */
1179    if (template->bind & PIPE_BIND_RENDER_TARGET){
1180       tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1181       tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1182    }
1183 
1184    if (template->bind & PIPE_BIND_DEPTH_STENCIL) {
1185       tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1186       tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1187    }
1188 
1189    if (template->bind & PIPE_BIND_SAMPLER_VIEW) {
1190       tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
1191       tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
1192    }
1193 
1194    tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1195    if (!tex->dirty)
1196       goto out_no_dirty;
1197 
1198    tex->imported = true;
1199 
1200    ss->hud.num_resources++;
1201 
1202    return &tex->b;
1203 
1204 out_no_dirty:
1205    FREE(tex->defined);
1206 out_no_defined:
1207    FREE(tex);
1208 out_unref:
1209    sws->surface_reference(sws, &srf, NULL);
1210    return NULL;
1211 }
1212 
1213 bool
svga_texture_generate_mipmap(struct pipe_context * pipe,struct pipe_resource * pt,enum pipe_format format,unsigned base_level,unsigned last_level,unsigned first_layer,unsigned last_layer)1214 svga_texture_generate_mipmap(struct pipe_context *pipe,
1215                              struct pipe_resource *pt,
1216                              enum pipe_format format,
1217                              unsigned base_level,
1218                              unsigned last_level,
1219                              unsigned first_layer,
1220                              unsigned last_layer)
1221 {
1222    struct pipe_sampler_view templ, *psv;
1223    struct svga_pipe_sampler_view *sv;
1224    struct svga_context *svga = svga_context(pipe);
1225    struct svga_texture *tex = svga_texture(pt);
1226 
1227    assert(svga_have_vgpu10(svga));
1228 
1229    /* Fallback to the mipmap generation utility for those formats that
1230     * do not support hw generate mipmap
1231     */
1232    if (!svga_format_support_gen_mips(format))
1233       return false;
1234 
1235    /* Make sure the texture surface was created with
1236     * SVGA3D_SURFACE_BIND_RENDER_TARGET
1237     */
1238    if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1239       return false;
1240 
1241    templ.format = format;
1242    templ.target = pt->target;
1243    templ.u.tex.first_layer = first_layer;
1244    templ.u.tex.last_layer = last_layer;
1245    templ.u.tex.first_level = base_level;
1246    templ.u.tex.last_level = last_level;
1247 
1248    if (pt->target == PIPE_TEXTURE_CUBE) {
1249       /**
1250        * state tracker generates mipmap one face at a time.
1251        * But SVGA generates mipmap for the entire cubemap.
1252        */
1253       templ.u.tex.first_layer = 0;
1254       templ.u.tex.last_layer = 5;
1255    }
1256 
1257    psv = pipe->create_sampler_view(pipe, pt, &templ);
1258    if (psv == NULL)
1259       return false;
1260 
1261    sv = svga_pipe_sampler_view(psv);
1262    SVGA_RETRY(svga, svga_validate_pipe_sampler_view(svga, sv));
1263 
1264    SVGA_RETRY(svga, SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle));
1265    pipe_sampler_view_reference(&psv, NULL);
1266 
1267    /* Mark the texture surface as RENDERED */
1268    svga_set_texture_rendered_to(tex);
1269 
1270    svga->hud.num_generate_mipmap++;
1271 
1272    return true;
1273 }
1274 
1275 
1276 /* texture upload buffer default size in bytes */
1277 #define TEX_UPLOAD_DEFAULT_SIZE (1024 * 1024)
1278 
1279 /**
1280  * Create a texture upload buffer
1281  */
1282 bool
svga_texture_transfer_map_upload_create(struct svga_context * svga)1283 svga_texture_transfer_map_upload_create(struct svga_context *svga)
1284 {
1285    svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
1286                                       PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 0);
1287    if (svga->tex_upload)
1288       u_upload_disable_persistent(svga->tex_upload);
1289 
1290    return svga->tex_upload != NULL;
1291 }
1292 
1293 
1294 /**
1295  * Destroy the texture upload buffer
1296  */
1297 void
svga_texture_transfer_map_upload_destroy(struct svga_context * svga)1298 svga_texture_transfer_map_upload_destroy(struct svga_context *svga)
1299 {
1300    u_upload_destroy(svga->tex_upload);
1301 }
1302 
1303 
1304 /**
1305  * Returns true if this transfer map request can use the upload buffer.
1306  */
1307 bool
svga_texture_transfer_map_can_upload(const struct svga_screen * svgascreen,const struct pipe_resource * texture)1308 svga_texture_transfer_map_can_upload(const struct svga_screen *svgascreen,
1309                                      const struct pipe_resource *texture)
1310 {
1311    if (svgascreen->sws->have_transfer_from_buffer_cmd == false)
1312       return false;
1313 
1314    /* TransferFromBuffer command is not well supported with multi-samples surface */
1315    if (texture->nr_samples > 1)
1316       return false;
1317 
1318    if (util_format_is_compressed(texture->format)) {
1319       /* XXX Need to take a closer look to see why texture upload
1320        * with 3D texture with compressed format fails
1321        */
1322       if (texture->target == PIPE_TEXTURE_3D)
1323           return false;
1324    }
1325    else if (texture->format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1326       return false;
1327    }
1328 
1329    return true;
1330 }
1331 
1332 
1333 /**
1334  *  Return TRUE if the same texture is bound to the specified
1335  *  surface view and a backing resource is created for the surface view.
1336  */
1337 static bool
need_update_texture_resource(struct pipe_surface * surf,struct svga_texture * tex)1338 need_update_texture_resource(struct pipe_surface *surf,
1339 		             struct svga_texture *tex)
1340 {
1341    struct svga_texture *stex = svga_texture(surf->texture);
1342    struct svga_surface *s = svga_surface(surf);
1343 
1344    return (stex == tex && s->handle != tex->handle);
1345 }
1346 
1347 
1348 /**
1349  *  Make sure the texture resource is up-to-date. If the texture is
1350  *  currently bound to a render target view and a backing resource is
1351  *  created, we will need to update the original resource with the
1352  *  changes in the backing resource.
1353  */
1354 static void
svga_validate_texture_resource(struct svga_context * svga,struct svga_texture * tex)1355 svga_validate_texture_resource(struct svga_context *svga,
1356 		               struct svga_texture *tex)
1357 {
1358    if (svga_was_texture_rendered_to(tex) == false)
1359       return;
1360 
1361    if ((svga->state.hw_draw.has_backed_views == false) ||
1362        (tex->backed_handle == NULL))
1363       return;
1364 
1365    struct pipe_surface *s;
1366    for (unsigned i = 0; i < svga->state.hw_clear.num_rendertargets; i++) {
1367       s = svga->state.hw_clear.rtv[i];
1368       if (s && need_update_texture_resource(s, tex))
1369          svga_propagate_surface(svga, s, true);
1370    }
1371 
1372    s = svga->state.hw_clear.dsv;
1373    if (s && need_update_texture_resource(s, tex))
1374       svga_propagate_surface(svga, s, true);
1375 }
1376 
1377 
1378 /**
1379  * Use upload buffer for the transfer map request.
1380  */
1381 void *
svga_texture_transfer_map_upload(struct svga_context * svga,struct svga_transfer * st)1382 svga_texture_transfer_map_upload(struct svga_context *svga,
1383                                  struct svga_transfer *st)
1384 {
1385    struct pipe_resource *texture = st->base.resource;
1386    struct pipe_resource *tex_buffer = NULL;
1387    struct svga_texture *tex = svga_texture(texture);
1388    void *tex_map;
1389    unsigned nblocksx, nblocksy;
1390    unsigned offset;
1391    unsigned upload_size;
1392 
1393    assert(svga->tex_upload);
1394 
1395    /* Validate the texture resource in case there is any changes
1396     * in the backing resource that needs to be updated to the original
1397     * texture resource first before the transfer upload occurs, otherwise,
1398     * the later update from backing resource to original will overwrite the
1399     * changes in this transfer map update.
1400     */
1401    svga_validate_texture_resource(svga, tex);
1402 
1403    st->upload.box.x = st->base.box.x;
1404    st->upload.box.y = st->base.box.y;
1405    st->upload.box.z = st->base.box.z;
1406    st->upload.box.w = st->base.box.width;
1407    st->upload.box.h = st->base.box.height;
1408    st->upload.box.d = st->base.box.depth;
1409    st->upload.nlayers = 1;
1410 
1411    switch (texture->target) {
1412    case PIPE_TEXTURE_CUBE:
1413       st->upload.box.z = 0;
1414       break;
1415    case PIPE_TEXTURE_2D_ARRAY:
1416    case PIPE_TEXTURE_CUBE_ARRAY:
1417       st->upload.nlayers = st->base.box.depth;
1418       st->upload.box.z = 0;
1419       st->upload.box.d = 1;
1420       break;
1421    case PIPE_TEXTURE_1D_ARRAY:
1422       st->upload.nlayers = st->base.box.depth;
1423       st->upload.box.y = st->upload.box.z = 0;
1424       st->upload.box.d = 1;
1425       break;
1426    default:
1427       break;
1428    }
1429 
1430    nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
1431    nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
1432 
1433    st->base.stride = nblocksx * util_format_get_blocksize(texture->format);
1434    st->base.layer_stride = st->base.stride * nblocksy;
1435 
1436    /* In order to use the TransferFromBuffer command to update the
1437     * texture content from the buffer, the layer stride for a multi-layers
1438     * surface needs to be in multiples of 16 bytes.
1439     */
1440    if (st->upload.nlayers > 1 && st->base.layer_stride & 15)
1441       return NULL;
1442 
1443    upload_size = st->base.layer_stride * st->base.box.depth;
1444    upload_size = align(upload_size, 16);
1445 
1446 #if MESA_DEBUG
1447    if (util_format_is_compressed(texture->format)) {
1448       unsigned blockw, blockh, bytesPerBlock;
1449 
1450       svga_format_size(tex->key.format, &blockw, &blockh, &bytesPerBlock);
1451 
1452       /* dest box must start on block boundary */
1453       assert((st->base.box.x % blockw) == 0);
1454       assert((st->base.box.y % blockh) == 0);
1455    }
1456 #endif
1457 
1458    /* If the upload size exceeds the default buffer size, the
1459     * upload buffer manager code will try to allocate a new buffer
1460     * with the new buffer size.
1461     */
1462    u_upload_alloc(svga->tex_upload, 0, upload_size, 16,
1463                   &offset, &tex_buffer, &tex_map);
1464 
1465    if (!tex_map) {
1466       return NULL;
1467    }
1468 
1469    st->upload.buf = tex_buffer;
1470    st->upload.map = tex_map;
1471    st->upload.offset = offset;
1472 
1473    return tex_map;
1474 }
1475 
1476 
1477 /**
1478  * Unmap upload map transfer request
1479  */
1480 void
svga_texture_transfer_unmap_upload(struct svga_context * svga,struct svga_transfer * st)1481 svga_texture_transfer_unmap_upload(struct svga_context *svga,
1482                                    struct svga_transfer *st)
1483 {
1484    struct svga_winsys_surface *srcsurf;
1485    struct svga_winsys_surface *dstsurf;
1486    struct pipe_resource *texture = st->base.resource;
1487    struct svga_texture *tex = svga_texture(texture);
1488    unsigned subResource;
1489    unsigned numMipLevels;
1490    unsigned i, layer;
1491    unsigned offset = st->upload.offset;
1492 
1493    assert(svga->tex_upload);
1494    assert(st->upload.buf);
1495 
1496    /* unmap the texture upload buffer */
1497    u_upload_unmap(svga->tex_upload);
1498 
1499    srcsurf = svga_buffer_handle(svga, st->upload.buf, 0);
1500    dstsurf = svga_texture(texture)->handle;
1501    assert(dstsurf);
1502 
1503    numMipLevels = texture->last_level + 1;
1504 
1505    for (i = 0, layer = st->slice; i < st->upload.nlayers; i++, layer++) {
1506       subResource = layer * numMipLevels + st->base.level;
1507 
1508       /* send a transferFromBuffer command to update the host texture surface */
1509       assert((offset & 15) == 0);
1510 
1511       SVGA_RETRY(svga, SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1512                                                         offset,
1513                                                         st->base.stride,
1514                                                         st->base.layer_stride,
1515                                                         dstsurf, subResource,
1516                                                         &st->upload.box));
1517       offset += st->base.layer_stride;
1518    }
1519 
1520    /* Mark the texture surface state as RENDERED */
1521    svga_set_texture_rendered_to(tex);
1522 
1523    pipe_resource_reference(&st->upload.buf, NULL);
1524 }
1525 
1526 /**
1527  * Does the device format backing this surface have an
1528  * alpha channel?
1529  *
1530  * \param texture[in]  The texture whose format we're querying
1531  * \return TRUE if the format has an alpha channel, FALSE otherwise
1532  *
1533  * For locally created textures, the device (svga) format is typically
1534  * identical to svga_format(texture->format), and we can use the gallium
1535  * format tests to determine whether the device format has an alpha channel
1536  * or not. However, for textures backed by imported svga surfaces that is
1537  * not always true, and we have to look at the SVGA3D utilities.
1538  */
1539 bool
svga_texture_device_format_has_alpha(struct pipe_resource * texture)1540 svga_texture_device_format_has_alpha(struct pipe_resource *texture)
1541 {
1542    /* the svga_texture() call below is invalid for PIPE_BUFFER resources */
1543    assert(texture->target != PIPE_BUFFER);
1544 
1545    const struct SVGA3dSurfaceDesc *surf_desc =
1546       vmw_surf_get_desc(svga_texture(texture)->key.format);
1547 
1548    enum SVGA3dBlockDesc block_desc = surf_desc->blockDesc;
1549 
1550    return !!((block_desc & SVGA3DBLOCKDESC_ALPHA) ||
1551              ((block_desc == SVGA3DBLOCKDESC_TYPELESS) &&
1552               (surf_desc->bitDepth.alpha > 0)));
1553 }
1554