• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**********************************************************
2  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person
5  * obtaining a copy of this software and associated documentation
6  * files (the "Software"), to deal in the Software without
7  * restriction, including without limitation the rights to use, copy,
8  * modify, merge, publish, distribute, sublicense, and/or sell copies
9  * of the Software, and to permit persons to whom the Software is
10  * furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **********************************************************/
25 
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
28 
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "util/format/u_format.h"
33 #include "util/u_inlines.h"
34 #include "util/u_math.h"
35 #include "util/u_memory.h"
36 #include "util/u_resource.h"
37 #include "util/u_upload_mgr.h"
38 
39 #include "svga_cmd.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
48 
49 
50 static void
svga_transfer_dma_band(struct svga_context * svga,struct svga_transfer * st,SVGA3dTransferType transfer,unsigned x,unsigned y,unsigned z,unsigned w,unsigned h,unsigned d,unsigned srcx,unsigned srcy,unsigned srcz,SVGA3dSurfaceDMAFlags flags)51 svga_transfer_dma_band(struct svga_context *svga,
52                        struct svga_transfer *st,
53                        SVGA3dTransferType transfer,
54                        unsigned x, unsigned y, unsigned z,
55                        unsigned w, unsigned h, unsigned d,
56                        unsigned srcx, unsigned srcy, unsigned srcz,
57                        SVGA3dSurfaceDMAFlags flags)
58 {
59    struct svga_texture *texture = svga_texture(st->base.resource);
60    SVGA3dCopyBox box;
61 
62    assert(!st->use_direct_map);
63 
64    box.x = x;
65    box.y = y;
66    box.z = z;
67    box.w = w;
68    box.h = h;
69    box.d = d;
70    box.srcx = srcx;
71    box.srcy = srcy;
72    box.srcz = srcz;
73 
74    SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
75             "(%u, %u, %u), %ubpp\n",
76             transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
77             texture->handle,
78             st->slice,
79             x,
80             y,
81             z,
82             x + w,
83             y + h,
84             z + 1,
85             util_format_get_blocksize(texture->b.format) * 8 /
86             (util_format_get_blockwidth(texture->b.format)
87              * util_format_get_blockheight(texture->b.format)));
88 
89    SVGA_RETRY(svga, SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags));
90 }
91 
92 
93 static void
svga_transfer_dma(struct svga_context * svga,struct svga_transfer * st,SVGA3dTransferType transfer,SVGA3dSurfaceDMAFlags flags)94 svga_transfer_dma(struct svga_context *svga,
95                   struct svga_transfer *st,
96                   SVGA3dTransferType transfer,
97                   SVGA3dSurfaceDMAFlags flags)
98 {
99    struct svga_texture *texture = svga_texture(st->base.resource);
100    struct svga_screen *screen = svga_screen(texture->b.screen);
101    struct svga_winsys_screen *sws = screen->sws;
102    struct pipe_fence_handle *fence = NULL;
103 
104    assert(!st->use_direct_map);
105 
106    if (transfer == SVGA3D_READ_HOST_VRAM) {
107       SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
108    }
109 
110    /* Ensure any pending operations on host surfaces are queued on the command
111     * buffer first.
112     */
113    svga_surfaces_flush(svga);
114 
115    if (!st->swbuf) {
116       /* Do the DMA transfer in a single go */
117       svga_transfer_dma_band(svga, st, transfer,
118                              st->box.x, st->box.y, st->box.z,
119                              st->box.w, st->box.h, st->box.d,
120                              0, 0, 0,
121                              flags);
122 
123       if (transfer == SVGA3D_READ_HOST_VRAM) {
124          svga_context_flush(svga, &fence);
125          sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
126          sws->fence_reference(sws, &fence, NULL);
127       }
128    }
129    else {
130       int y, h, srcy;
131       unsigned blockheight =
132          util_format_get_blockheight(st->base.resource->format);
133 
134       h = st->hw_nblocksy * blockheight;
135       srcy = 0;
136 
137       for (y = 0; y < st->box.h; y += h) {
138          unsigned offset, length;
139          void *hw, *sw;
140 
141          if (y + h > st->box.h)
142             h = st->box.h - y;
143 
144          /* Transfer band must be aligned to pixel block boundaries */
145          assert(y % blockheight == 0);
146          assert(h % blockheight == 0);
147 
148          offset = y * st->base.stride / blockheight;
149          length = h * st->base.stride / blockheight;
150 
151          sw = (uint8_t *) st->swbuf + offset;
152 
153          if (transfer == SVGA3D_WRITE_HOST_VRAM) {
154             unsigned usage = PIPE_MAP_WRITE;
155 
156             /* Wait for the previous DMAs to complete */
157             /* TODO: keep one DMA (at half the size) in the background */
158             if (y) {
159                svga_context_flush(svga, NULL);
160                usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
161             }
162 
163             hw = sws->buffer_map(sws, st->hwbuf, usage);
164             assert(hw);
165             if (hw) {
166                memcpy(hw, sw, length);
167                sws->buffer_unmap(sws, st->hwbuf);
168             }
169          }
170 
171          svga_transfer_dma_band(svga, st, transfer,
172                                 st->box.x, y, st->box.z,
173                                 st->box.w, h, st->box.d,
174                                 0, srcy, 0, flags);
175 
176          /*
177           * Prevent the texture contents to be discarded on the next band
178           * upload.
179           */
180          flags.discard = FALSE;
181 
182          if (transfer == SVGA3D_READ_HOST_VRAM) {
183             svga_context_flush(svga, &fence);
184             sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
185 
186             hw = sws->buffer_map(sws, st->hwbuf, PIPE_MAP_READ);
187             assert(hw);
188             if (hw) {
189                memcpy(sw, hw, length);
190                sws->buffer_unmap(sws, st->hwbuf);
191             }
192          }
193       }
194    }
195 }
196 
197 
198 
199 bool
svga_resource_get_handle(struct pipe_screen * screen,struct pipe_context * context,struct pipe_resource * texture,struct winsys_handle * whandle,unsigned usage)200 svga_resource_get_handle(struct pipe_screen *screen,
201                          struct pipe_context *context,
202                          struct pipe_resource *texture,
203                          struct winsys_handle *whandle,
204                          unsigned usage)
205 {
206    struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
207    unsigned stride;
208 
209    if (texture->target == PIPE_BUFFER)
210       return false;
211 
212    assert(svga_texture(texture)->key.cachable == 0);
213    svga_texture(texture)->key.cachable = 0;
214 
215    stride = util_format_get_nblocksx(texture->format, texture->width0) *
216             util_format_get_blocksize(texture->format);
217 
218    return sws->surface_get_handle(sws, svga_texture(texture)->handle,
219                                   stride, whandle);
220 }
221 
222 
223 /**
224  * Determine if we need to read back a texture image before mapping it.
225  */
226 static inline boolean
need_tex_readback(struct svga_transfer * st)227 need_tex_readback(struct svga_transfer *st)
228 {
229    if (st->base.usage & PIPE_MAP_READ)
230       return TRUE;
231 
232    if ((st->base.usage & PIPE_MAP_WRITE) &&
233        ((st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) == 0)) {
234       return svga_was_texture_rendered_to(svga_texture(st->base.resource));
235    }
236 
237    return FALSE;
238 }
239 
240 
241 static void
readback_texture_surface(struct svga_context * svga,struct svga_texture * tex,struct svga_winsys_surface * surf)242 readback_texture_surface(struct svga_context *svga,
243                          struct svga_texture *tex,
244                          struct svga_winsys_surface *surf)
245 {
246    SVGA_RETRY(svga, SVGA3D_ReadbackGBSurface(svga->swc, surf));
247 
248    /* Mark the texture surface as UPDATED */
249    tex->surface_state = SVGA_SURFACE_STATE_UPDATED;
250 
251    svga->hud.num_readbacks++;
252    SVGA_STATS_COUNT_INC(svga_sws(svga), SVGA_STATS_COUNT_TEXREADBACK);
253 }
254 
255 /**
256  * Use DMA for the transfer request
257  */
258 static void *
svga_texture_transfer_map_dma(struct svga_context * svga,struct svga_transfer * st)259 svga_texture_transfer_map_dma(struct svga_context *svga,
260                               struct svga_transfer *st)
261 {
262    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
263    struct pipe_resource *texture = st->base.resource;
264    unsigned nblocksx, nblocksy;
265    unsigned d;
266    unsigned usage = st->base.usage;
267 
268    /* we'll put the data into a tightly packed buffer */
269    nblocksx = util_format_get_nblocksx(texture->format, st->box.w);
270    nblocksy = util_format_get_nblocksy(texture->format, st->box.h);
271    d = st->box.d;
272 
273    st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
274    st->base.layer_stride = st->base.stride * nblocksy;
275    st->hw_nblocksy = nblocksy;
276 
277    st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
278                                          st->hw_nblocksy * st->base.stride * d);
279 
280    while (!st->hwbuf && (st->hw_nblocksy /= 2)) {
281       st->hwbuf =
282          svga_winsys_buffer_create(svga, 1, 0,
283                                    st->hw_nblocksy * st->base.stride * d);
284    }
285 
286    if (!st->hwbuf)
287       return NULL;
288 
289    if (st->hw_nblocksy < nblocksy) {
290       /* We couldn't allocate a hardware buffer big enough for the transfer,
291        * so allocate regular malloc memory instead
292        */
293       if (0) {
294          debug_printf("%s: failed to allocate %u KB of DMA, "
295                       "splitting into %u x %u KB DMA transfers\n",
296                       __FUNCTION__,
297                       (nblocksy * st->base.stride + 1023) / 1024,
298                       (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
299                       (st->hw_nblocksy * st->base.stride + 1023) / 1024);
300       }
301 
302       st->swbuf = MALLOC(nblocksy * st->base.stride * d);
303       if (!st->swbuf) {
304          sws->buffer_destroy(sws, st->hwbuf);
305          return NULL;
306       }
307    }
308 
309    if (usage & PIPE_MAP_READ) {
310       SVGA3dSurfaceDMAFlags flags;
311       memset(&flags, 0, sizeof flags);
312       svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
313    }
314 
315    if (st->swbuf) {
316       return st->swbuf;
317    }
318    else {
319       return sws->buffer_map(sws, st->hwbuf, usage);
320    }
321 }
322 
323 
324 /**
325  * Use direct map for the transfer request
326  */
327 static void *
svga_texture_transfer_map_direct(struct svga_context * svga,struct svga_transfer * st)328 svga_texture_transfer_map_direct(struct svga_context *svga,
329                                  struct svga_transfer *st)
330 {
331    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
332    struct pipe_transfer *transfer = &st->base;
333    struct pipe_resource *texture = transfer->resource;
334    struct svga_texture *tex = svga_texture(texture);
335    struct svga_winsys_surface *surf = tex->handle;
336    unsigned level = st->base.level;
337    unsigned w, h, nblocksx, nblocksy;
338    unsigned usage = st->base.usage;
339 
340    if (need_tex_readback(st)) {
341       svga_surfaces_flush(svga);
342 
343       if (!svga->swc->force_coherent || tex->imported) {
344          /* Readback the whole surface */
345          readback_texture_surface(svga, tex, surf);
346 
347          svga_context_finish(svga);
348       }
349       /*
350        * Note: if PIPE_MAP_DISCARD_WHOLE_RESOURCE were specified
351        * we could potentially clear the flag for all faces/layers/mips.
352        */
353       svga_clear_texture_rendered_to(tex);
354    }
355    else {
356       assert(usage & PIPE_MAP_WRITE);
357       if ((usage & PIPE_MAP_UNSYNCHRONIZED) == 0) {
358          if (svga_is_texture_dirty(tex, st->slice, level)) {
359             /*
360              * do a surface flush if the subresource has been modified
361              * in this command buffer.
362              */
363             svga_surfaces_flush(svga);
364             if (!sws->surface_is_flushed(sws, surf)) {
365                svga->hud.surface_write_flushes++;
366                SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
367                svga_context_flush(svga, NULL);
368             }
369          }
370       }
371    }
372 
373    /* we'll directly access the guest-backed surface */
374    w = u_minify(texture->width0, level);
375    h = u_minify(texture->height0, level);
376    nblocksx = util_format_get_nblocksx(texture->format, w);
377    nblocksy = util_format_get_nblocksy(texture->format, h);
378    st->hw_nblocksy = nblocksy;
379    st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
380    st->base.layer_stride = st->base.stride * nblocksy;
381 
382    /*
383     * Begin mapping code
384     */
385    {
386       SVGA3dSize baseLevelSize;
387       uint8_t *map;
388       boolean retry, rebind;
389       unsigned offset, mip_width, mip_height;
390       struct svga_winsys_context *swc = svga->swc;
391 
392       if (swc->force_coherent) {
393          usage |= PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT;
394       }
395 
396       map = SVGA_TRY_MAP(svga->swc->surface_map
397                          (svga->swc, surf, usage, &retry, &rebind), retry);
398 
399       if (map == NULL && retry) {
400          /*
401           * At this point, the svga_surfaces_flush() should already have
402           * called in svga_texture_get_transfer().
403           */
404          svga->hud.surface_write_flushes++;
405          svga_retry_enter(svga);
406          svga_context_flush(svga, NULL);
407          map = svga->swc->surface_map(svga->swc, surf, usage, &retry, &rebind);
408          svga_retry_exit(svga);
409       }
410       if (map && rebind) {
411          enum pipe_error ret;
412 
413          ret = SVGA3D_BindGBSurface(swc, surf);
414          if (ret != PIPE_OK) {
415             svga_context_flush(svga, NULL);
416             ret = SVGA3D_BindGBSurface(swc, surf);
417             assert(ret == PIPE_OK);
418          }
419          svga_context_flush(svga, NULL);
420       }
421 
422       /*
423        * Make sure we return NULL if the map fails
424        */
425       if (!map) {
426          return NULL;
427       }
428 
429       /**
430        * Compute the offset to the specific texture slice in the buffer.
431        */
432       baseLevelSize.width = tex->b.width0;
433       baseLevelSize.height = tex->b.height0;
434       baseLevelSize.depth = tex->b.depth0;
435 
436       if ((tex->b.target == PIPE_TEXTURE_1D_ARRAY) ||
437           (tex->b.target == PIPE_TEXTURE_2D_ARRAY) ||
438           (tex->b.target == PIPE_TEXTURE_CUBE_ARRAY)) {
439          st->base.layer_stride =
440             svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
441                                            tex->b.last_level + 1, 1, 0);
442       }
443 
444       offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
445                                               tex->b.last_level + 1, /* numMips */
446                                               st->slice, level);
447       if (level > 0) {
448          assert(offset > 0);
449       }
450 
451       mip_width = u_minify(tex->b.width0, level);
452       mip_height = u_minify(tex->b.height0, level);
453 
454       offset += svga3dsurface_get_pixel_offset(tex->key.format,
455                                                mip_width, mip_height,
456                                                st->box.x,
457                                                st->box.y,
458                                                st->box.z);
459 
460       return (void *) (map + offset);
461    }
462 }
463 
464 
465 /**
466  * Request a transfer map to the texture resource
467  */
468 void *
svga_texture_transfer_map(struct pipe_context * pipe,struct pipe_resource * texture,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)469 svga_texture_transfer_map(struct pipe_context *pipe,
470                           struct pipe_resource *texture,
471                           unsigned level,
472                           unsigned usage,
473                           const struct pipe_box *box,
474                           struct pipe_transfer **ptransfer)
475 {
476    struct svga_context *svga = svga_context(pipe);
477    struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
478    struct svga_texture *tex = svga_texture(texture);
479    struct svga_transfer *st;
480    struct svga_winsys_surface *surf = tex->handle;
481    boolean use_direct_map = svga_have_gb_objects(svga) &&
482        (!svga_have_gb_dma(svga) || (usage & PIPE_MAP_WRITE));
483    void *map = NULL;
484    int64_t begin = svga_get_time(svga);
485 
486    SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
487 
488    if (!surf)
489       goto done;
490 
491    /* We can't map texture storage directly unless we have GB objects */
492    if (usage & PIPE_MAP_DIRECTLY) {
493       if (svga_have_gb_objects(svga))
494          use_direct_map = TRUE;
495       else
496          goto done;
497    }
498 
499    st = CALLOC_STRUCT(svga_transfer);
500    if (!st)
501       goto done;
502 
503    st->base.level = level;
504    st->base.usage = usage;
505    st->base.box = *box;
506 
507    /* The modified transfer map box with the array index removed from z.
508     * The array index is specified in slice.
509     */
510    st->box.x = box->x;
511    st->box.y = box->y;
512    st->box.z = box->z;
513    st->box.w = box->width;
514    st->box.h = box->height;
515    st->box.d = box->depth;
516 
517    switch (tex->b.target) {
518    case PIPE_TEXTURE_CUBE:
519       st->slice = st->base.box.z;
520       st->box.z = 0;   /* so we don't apply double offsets below */
521       break;
522    case PIPE_TEXTURE_1D_ARRAY:
523    case PIPE_TEXTURE_2D_ARRAY:
524    case PIPE_TEXTURE_CUBE_ARRAY:
525       st->slice = st->base.box.z;
526       st->box.z = 0;   /* so we don't apply double offsets below */
527 
528       /* Force direct map for transfering multiple slices */
529       if (st->base.box.depth > 1)
530          use_direct_map = svga_have_gb_objects(svga);
531 
532       break;
533    default:
534       st->slice = 0;
535       break;
536    }
537 
538    /* We never want to use DMA transfers on systems with GBObjects because
539     * it causes serialization issues and in SVGAv3 vram is gone which
540     * makes it impossible to support both at the same time.
541     */
542    if (svga_have_gb_objects(svga)) {
543       use_direct_map = TRUE;
544    }
545 
546    st->use_direct_map = use_direct_map;
547    pipe_resource_reference(&st->base.resource, texture);
548 
549    /* If this is the first time mapping to the surface in this
550     * command buffer and there is no pending primitives, clear
551     * the dirty masks of this surface.
552     */
553    if (sws->surface_is_flushed(sws, surf) &&
554        (svga_have_vgpu10(svga) ||
555         !svga_hwtnl_has_pending_prim(svga->hwtnl))) {
556       svga_clear_texture_dirty(tex);
557    }
558 
559    if (!use_direct_map) {
560       /* upload to the DMA buffer */
561       map = svga_texture_transfer_map_dma(svga, st);
562    }
563    else {
564       boolean can_use_upload = tex->can_use_upload &&
565                                !(st->base.usage & PIPE_MAP_READ);
566       boolean was_rendered_to =
567          svga_was_texture_rendered_to(svga_texture(texture));
568 
569       /* If the texture was already rendered to and upload buffer
570        * is supported, then we will use upload buffer to
571        * avoid the need to read back the texture content; otherwise,
572        * we'll first try to map directly to the GB surface, if it is blocked,
573        * then we'll try the upload buffer.
574        */
575       if (was_rendered_to && can_use_upload) {
576          map = svga_texture_transfer_map_upload(svga, st);
577       }
578       else {
579          unsigned orig_usage = st->base.usage;
580 
581          /* First try directly map to the GB surface */
582          if (can_use_upload)
583             st->base.usage |= PIPE_MAP_DONTBLOCK;
584          map = svga_texture_transfer_map_direct(svga, st);
585          st->base.usage = orig_usage;
586 
587          if (!map && can_use_upload) {
588             /* if direct map with DONTBLOCK fails, then try upload to the
589              * texture upload buffer.
590              */
591             map = svga_texture_transfer_map_upload(svga, st);
592          }
593       }
594 
595       /* If upload fails, then try direct map again without forcing it
596        * to DONTBLOCK.
597        */
598       if (!map) {
599          map = svga_texture_transfer_map_direct(svga, st);
600       }
601    }
602 
603    if (!map) {
604       FREE(st);
605    }
606    else {
607       *ptransfer = &st->base;
608       svga->hud.num_textures_mapped++;
609       if (usage & PIPE_MAP_WRITE) {
610          /* record texture upload for HUD */
611          svga->hud.num_bytes_uploaded +=
612             st->base.layer_stride * st->box.d;
613 
614          /* mark this texture level as dirty */
615          svga_set_texture_dirty(tex, st->slice, level);
616       }
617    }
618 
619 done:
620    svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
621    SVGA_STATS_TIME_POP(sws);
622    (void) sws;
623 
624    return map;
625 }
626 
627 /**
628  * Unmap a GB texture surface.
629  */
630 static void
svga_texture_surface_unmap(struct svga_context * svga,struct pipe_transfer * transfer)631 svga_texture_surface_unmap(struct svga_context *svga,
632                            struct pipe_transfer *transfer)
633 {
634    struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
635    struct svga_winsys_context *swc = svga->swc;
636    boolean rebind;
637 
638    assert(surf);
639 
640    swc->surface_unmap(swc, surf, &rebind);
641    if (rebind) {
642       SVGA_RETRY(svga, SVGA3D_BindGBSurface(swc, surf));
643    }
644 }
645 
646 
647 static void
update_image_vgpu9(struct svga_context * svga,struct svga_winsys_surface * surf,const SVGA3dBox * box,unsigned slice,unsigned level)648 update_image_vgpu9(struct svga_context *svga,
649                    struct svga_winsys_surface *surf,
650                    const SVGA3dBox *box,
651                    unsigned slice,
652                    unsigned level)
653 {
654    SVGA_RETRY(svga, SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level));
655 }
656 
657 
658 static void
update_image_vgpu10(struct svga_context * svga,struct svga_winsys_surface * surf,const SVGA3dBox * box,unsigned slice,unsigned level,unsigned numMipLevels)659 update_image_vgpu10(struct svga_context *svga,
660                     struct svga_winsys_surface *surf,
661                     const SVGA3dBox *box,
662                     unsigned slice,
663                     unsigned level,
664                     unsigned numMipLevels)
665 {
666    unsigned subResource;
667 
668    subResource = slice * numMipLevels + level;
669 
670    SVGA_RETRY(svga, SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box,
671                                                     subResource));
672 }
673 
674 
675 /**
676  * unmap DMA transfer request
677  */
678 static void
svga_texture_transfer_unmap_dma(struct svga_context * svga,struct svga_transfer * st)679 svga_texture_transfer_unmap_dma(struct svga_context *svga,
680                                 struct svga_transfer *st)
681 {
682    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
683 
684    if (!st->swbuf)
685       sws->buffer_unmap(sws, st->hwbuf);
686 
687    if (st->base.usage & PIPE_MAP_WRITE) {
688       /* Use DMA to transfer texture data */
689       SVGA3dSurfaceDMAFlags flags;
690       struct pipe_resource *texture = st->base.resource;
691       struct svga_texture *tex = svga_texture(texture);
692 
693 
694       memset(&flags, 0, sizeof flags);
695       if (st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
696          flags.discard = TRUE;
697       }
698       if (st->base.usage & PIPE_MAP_UNSYNCHRONIZED) {
699          flags.unsynchronized = TRUE;
700       }
701 
702       svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
703       svga_set_texture_rendered_to(tex);
704    }
705 
706    FREE(st->swbuf);
707    sws->buffer_destroy(sws, st->hwbuf);
708 }
709 
710 
711 /**
712  * unmap direct map transfer request
713  */
714 static void
svga_texture_transfer_unmap_direct(struct svga_context * svga,struct svga_transfer * st)715 svga_texture_transfer_unmap_direct(struct svga_context *svga,
716                                    struct svga_transfer *st)
717 {
718    struct pipe_transfer *transfer = &st->base;
719    struct svga_texture *tex = svga_texture(transfer->resource);
720 
721    svga_texture_surface_unmap(svga, transfer);
722 
723    /* Now send an update command to update the content in the backend. */
724    if (st->base.usage & PIPE_MAP_WRITE) {
725       struct svga_winsys_surface *surf = tex->handle;
726 
727       assert(svga_have_gb_objects(svga));
728 
729       /* update the effected region */
730       SVGA3dBox box = st->box;
731       unsigned nlayers;
732 
733       switch (tex->b.target) {
734       case PIPE_TEXTURE_2D_ARRAY:
735       case PIPE_TEXTURE_CUBE_ARRAY:
736       case PIPE_TEXTURE_1D_ARRAY:
737          nlayers = box.d;
738          box.d = 1;
739          break;
740       default:
741          nlayers = 1;
742          break;
743       }
744 
745 
746       if (0)
747          debug_printf("%s %d, %d, %d  %d x %d x %d\n",
748                       __FUNCTION__,
749                       box.x, box.y, box.z,
750                       box.w, box.h, box.d);
751 
752       if (!svga->swc->force_coherent || tex->imported) {
753          if (svga_have_vgpu10(svga)) {
754             unsigned i;
755 
756             for (i = 0; i < nlayers; i++) {
757                update_image_vgpu10(svga, surf, &box,
758                                    st->slice + i, transfer->level,
759                                    tex->b.last_level + 1);
760             }
761          } else {
762             assert(nlayers == 1);
763             update_image_vgpu9(svga, surf, &box, st->slice,
764                                transfer->level);
765          }
766       }
767 
768       /* Mark the texture surface state as UPDATED */
769       tex->surface_state = SVGA_SURFACE_STATE_UPDATED;
770    }
771 }
772 
773 
774 void
svga_texture_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)775 svga_texture_transfer_unmap(struct pipe_context *pipe,
776                             struct pipe_transfer *transfer)
777 {
778    struct svga_context *svga = svga_context(pipe);
779    struct svga_screen *ss = svga_screen(pipe->screen);
780    struct svga_winsys_screen *sws = ss->sws;
781    struct svga_transfer *st = svga_transfer(transfer);
782    struct svga_texture *tex = svga_texture(transfer->resource);
783 
784    SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
785 
786    if (!st->use_direct_map) {
787       svga_texture_transfer_unmap_dma(svga, st);
788    }
789    else if (st->upload.buf) {
790       svga_texture_transfer_unmap_upload(svga, st);
791    }
792    else {
793       svga_texture_transfer_unmap_direct(svga, st);
794    }
795 
796    if (st->base.usage & PIPE_MAP_WRITE) {
797       svga->hud.num_resource_updates++;
798 
799       /* Mark the texture level as dirty */
800       ss->texture_timestamp++;
801       svga_age_texture_view(tex, transfer->level);
802       if (transfer->resource->target == PIPE_TEXTURE_CUBE)
803          svga_define_texture_level(tex, st->slice, transfer->level);
804       else
805          svga_define_texture_level(tex, 0, transfer->level);
806    }
807 
808    pipe_resource_reference(&st->base.resource, NULL);
809    FREE(st);
810    SVGA_STATS_TIME_POP(sws);
811    (void) sws;
812 }
813 
814 
815 /**
816  * Does format store depth values?
817  */
818 static inline boolean
format_has_depth(enum pipe_format format)819 format_has_depth(enum pipe_format format)
820 {
821    const struct util_format_description *desc = util_format_description(format);
822    return util_format_has_depth(desc);
823 }
824 
825 struct pipe_resource *
svga_texture_create(struct pipe_screen * screen,const struct pipe_resource * template)826 svga_texture_create(struct pipe_screen *screen,
827                     const struct pipe_resource *template)
828 {
829    struct svga_screen *svgascreen = svga_screen(screen);
830    struct svga_texture *tex;
831    unsigned bindings = template->bind;
832 
833    SVGA_STATS_TIME_PUSH(svgascreen->sws,
834                         SVGA_STATS_TIME_CREATETEXTURE);
835 
836    assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
837    if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
838       goto fail_notex;
839    }
840 
841    /* Verify the number of mipmap levels isn't impossibly large.  For example,
842     * if the base 2D image is 16x16, we can't have 8 mipmap levels.
843     * the gallium frontend should never ask us to create a resource with invalid
844     * parameters.
845     */
846    {
847       unsigned max_dim = template->width0;
848 
849       switch (template->target) {
850       case PIPE_TEXTURE_1D:
851       case PIPE_TEXTURE_1D_ARRAY:
852          // nothing
853          break;
854       case PIPE_TEXTURE_2D:
855       case PIPE_TEXTURE_CUBE:
856       case PIPE_TEXTURE_CUBE_ARRAY:
857       case PIPE_TEXTURE_2D_ARRAY:
858          max_dim = MAX2(max_dim, template->height0);
859          break;
860       case PIPE_TEXTURE_3D:
861          max_dim = MAX3(max_dim, template->height0, template->depth0);
862          break;
863       case PIPE_TEXTURE_RECT:
864       case PIPE_BUFFER:
865          assert(template->last_level == 0);
866          /* the assertion below should always pass */
867          break;
868       default:
869          debug_printf("Unexpected texture target type\n");
870       }
871       assert(1 << template->last_level <= max_dim);
872    }
873 
874    tex = CALLOC_STRUCT(svga_texture);
875    if (!tex) {
876       goto fail_notex;
877    }
878 
879    tex->defined = CALLOC(template->depth0 * template->array_size,
880                          sizeof(tex->defined[0]));
881    if (!tex->defined) {
882       FREE(tex);
883       goto fail_notex;
884    }
885 
886    tex->dirty = CALLOC(template->depth0 * template->array_size,
887                              sizeof(tex->dirty[0]));
888    if (!tex->dirty) {
889       goto fail;
890    }
891 
892    tex->b = *template;
893    pipe_reference_init(&tex->b.reference, 1);
894    tex->b.screen = screen;
895 
896    tex->key.flags = 0;
897    tex->key.size.width = template->width0;
898    tex->key.size.height = template->height0;
899    tex->key.size.depth = template->depth0;
900    tex->key.arraySize = 1;
901    tex->key.numFaces = 1;
902 
903    /* nr_samples=1 must be treated as a non-multisample texture */
904    if (tex->b.nr_samples == 1) {
905       tex->b.nr_samples = 0;
906    }
907    else if (tex->b.nr_samples > 1) {
908       assert(svgascreen->sws->have_sm4_1);
909       tex->key.flags |= SVGA3D_SURFACE_MULTISAMPLE;
910    }
911 
912    tex->key.sampleCount = tex->b.nr_samples;
913 
914    if (svgascreen->sws->have_vgpu10) {
915       switch (template->target) {
916       case PIPE_TEXTURE_1D:
917          tex->key.flags |= SVGA3D_SURFACE_1D;
918          break;
919       case PIPE_TEXTURE_1D_ARRAY:
920          tex->key.flags |= SVGA3D_SURFACE_1D;
921          FALLTHROUGH;
922       case PIPE_TEXTURE_2D_ARRAY:
923          tex->key.flags |= SVGA3D_SURFACE_ARRAY;
924          tex->key.arraySize = template->array_size;
925          break;
926       case PIPE_TEXTURE_3D:
927          tex->key.flags |= SVGA3D_SURFACE_VOLUME;
928          break;
929       case PIPE_TEXTURE_CUBE:
930          tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
931          tex->key.numFaces = 6;
932          break;
933       case PIPE_TEXTURE_CUBE_ARRAY:
934          assert(svgascreen->sws->have_sm4_1);
935          tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
936          tex->key.numFaces = 1;  // arraySize already includes the 6 faces
937          tex->key.arraySize = template->array_size;
938          break;
939       default:
940          break;
941       }
942    }
943    else {
944       switch (template->target) {
945       case PIPE_TEXTURE_3D:
946          tex->key.flags |= SVGA3D_SURFACE_VOLUME;
947          break;
948       case PIPE_TEXTURE_CUBE:
949          tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
950          tex->key.numFaces = 6;
951          break;
952       default:
953          break;
954       }
955    }
956 
957    tex->key.cachable = 1;
958 
959    if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
960        !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
961       /* Also check if the format can be sampled from */
962       if (screen->is_format_supported(screen, template->format,
963                                       template->target,
964                                       template->nr_samples,
965                                       template->nr_storage_samples,
966                                       PIPE_BIND_SAMPLER_VIEW)) {
967          bindings |= PIPE_BIND_SAMPLER_VIEW;
968       }
969    }
970 
971    if (bindings & PIPE_BIND_SAMPLER_VIEW) {
972       tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
973       tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
974 
975       if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
976          /* Also check if the format is color renderable */
977          if (screen->is_format_supported(screen, template->format,
978                                          template->target,
979                                          template->nr_samples,
980                                          template->nr_storage_samples,
981                                          PIPE_BIND_RENDER_TARGET)) {
982             bindings |= PIPE_BIND_RENDER_TARGET;
983          }
984       }
985 
986       if (!(bindings & PIPE_BIND_DEPTH_STENCIL)) {
987          /* Also check if the format is depth/stencil renderable */
988          if (screen->is_format_supported(screen, template->format,
989                                          template->target,
990                                          template->nr_samples,
991                                          template->nr_storage_samples,
992                                          PIPE_BIND_DEPTH_STENCIL)) {
993             bindings |= PIPE_BIND_DEPTH_STENCIL;
994          }
995       }
996    }
997 
998    if (bindings & PIPE_BIND_DISPLAY_TARGET) {
999       tex->key.cachable = 0;
1000    }
1001 
1002    if (bindings & PIPE_BIND_SHARED) {
1003       tex->key.cachable = 0;
1004    }
1005 
1006    if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
1007       tex->key.scanout = 1;
1008       tex->key.cachable = 0;
1009    }
1010 
1011    /*
1012     * Note: Previously we never passed the
1013     * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
1014     * know beforehand whether a texture will be used as a rendertarget or not
1015     * and it always requests PIPE_BIND_RENDER_TARGET, therefore
1016     * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
1017     *
1018     * However, this was changed since other gallium frontends
1019     * (XA for example) uses it accurately and certain device versions
1020     * relies on it in certain situations to render correctly.
1021     */
1022    if ((bindings & PIPE_BIND_RENDER_TARGET) &&
1023        !util_format_is_s3tc(template->format)) {
1024       tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1025       tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1026    }
1027 
1028    if (bindings & PIPE_BIND_DEPTH_STENCIL) {
1029       tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1030       tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1031    }
1032 
1033    tex->key.numMipLevels = template->last_level + 1;
1034 
1035    tex->key.format = svga_translate_format(svgascreen, template->format,
1036                                            bindings);
1037    if (tex->key.format == SVGA3D_FORMAT_INVALID) {
1038       goto fail;
1039    }
1040 
1041    bool use_typeless = FALSE;
1042    if (svgascreen->sws->have_gl43) {
1043       /* Do not use typeless for SHARED, SCANOUT or DISPLAY_TARGET surfaces. */
1044       use_typeless = !(bindings & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT |
1045                                    PIPE_BIND_DISPLAY_TARGET));
1046    } else if (svgascreen->sws->have_vgpu10) {
1047       /* For VGPU10 device, use typeless formats only for sRGB and depth resources
1048        * if they do not have SHARED, SCANOUT or DISPLAY_TARGET bind flags
1049        */
1050       use_typeless = (util_format_is_srgb(template->format) ||
1051                       format_has_depth(template->format)) &&
1052                      !(bindings & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT |
1053                                    PIPE_BIND_DISPLAY_TARGET));
1054    }
1055 
1056    if (use_typeless) {
1057       SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
1058       if (0) {
1059          debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
1060                       svga_format_name(tex->key.format),
1061                       svga_format_name(typeless),
1062                       bindings);
1063       }
1064 
1065       if (svga_format_is_uncompressed_snorm(tex->key.format)) {
1066          /* We can't normally render to snorm surfaces, but once we
1067           * substitute a typeless format, we can if the rendertarget view
1068           * is unorm.  This can happen with GL_ARB_copy_image.
1069           */
1070          tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1071          tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1072       }
1073 
1074       tex->key.format = typeless;
1075    }
1076 
1077    if (svgascreen->sws->have_sm5 &&
1078        bindings & (PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET)) {
1079        if (template->nr_samples < 2 &&
1080            screen->is_format_supported(screen, template->format,
1081                                        template->target,
1082                                        template->nr_samples,
1083                                        template->nr_storage_samples,
1084                                        PIPE_BIND_SHADER_IMAGE)) {
1085           /* Any non multi-samples texture that can be used as a render target
1086            * or sampler view can be bound to an image unit.
1087            * So make sure to set the UAV flag here.
1088            */
1089           tex->key.flags |= SVGA3D_SURFACE_BIND_UAVIEW;
1090        }
1091    }
1092 
1093    SVGA_DBG(DEBUG_DMA, "surface_create for texture\n");
1094    boolean invalidated;
1095    tex->handle = svga_screen_surface_create(svgascreen, bindings,
1096                                             tex->b.usage,
1097                                             &invalidated, &tex->key);
1098    if (!tex->handle) {
1099       goto fail;
1100    }
1101    if (invalidated) {
1102       tex->surface_state = SVGA_SURFACE_STATE_INVALIDATED;
1103    } else {
1104       tex->surface_state = SVGA_SURFACE_STATE_CREATED;
1105    }
1106 
1107    SVGA_DBG(DEBUG_DMA, "  --> got sid %p (texture)\n", tex->handle);
1108 
1109    debug_reference(&tex->b.reference,
1110                    (debug_reference_descriptor)debug_describe_resource, 0);
1111 
1112    tex->size = util_resource_size(template);
1113 
1114    /* Determine if texture upload buffer can be used to upload this texture */
1115    tex->can_use_upload = svga_texture_transfer_map_can_upload(svgascreen,
1116                                                               &tex->b);
1117 
1118    /* Initialize the backing resource cache */
1119    tex->backed_handle = NULL;
1120 
1121    svgascreen->hud.total_resource_bytes += tex->size;
1122    svgascreen->hud.num_resources++;
1123 
1124    SVGA_STATS_TIME_POP(svgascreen->sws);
1125 
1126    return &tex->b;
1127 
1128 fail:
1129    if (tex->dirty)
1130       FREE(tex->dirty);
1131    if (tex->defined)
1132       FREE(tex->defined);
1133    FREE(tex);
1134 fail_notex:
1135    SVGA_STATS_TIME_POP(svgascreen->sws);
1136    return NULL;
1137 }
1138 
1139 
1140 struct pipe_resource *
svga_texture_from_handle(struct pipe_screen * screen,const struct pipe_resource * template,struct winsys_handle * whandle)1141 svga_texture_from_handle(struct pipe_screen *screen,
1142                          const struct pipe_resource *template,
1143                          struct winsys_handle *whandle)
1144 {
1145    struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1146    struct svga_screen *ss = svga_screen(screen);
1147    struct svga_winsys_surface *srf;
1148    struct svga_texture *tex;
1149    enum SVGA3dSurfaceFormat format = 0;
1150    assert(screen);
1151 
1152    /* Only supports one type */
1153    if ((template->target != PIPE_TEXTURE_2D &&
1154        template->target != PIPE_TEXTURE_RECT) ||
1155        template->last_level != 0 ||
1156        template->depth0 != 1) {
1157       return NULL;
1158    }
1159 
1160    srf = sws->surface_from_handle(sws, whandle, &format);
1161 
1162    if (!srf)
1163       return NULL;
1164 
1165    if (!svga_format_is_shareable(ss, template->format, format,
1166                                  template->bind, true))
1167       goto out_unref;
1168 
1169    tex = CALLOC_STRUCT(svga_texture);
1170    if (!tex)
1171       goto out_unref;
1172 
1173    tex->defined = CALLOC(template->depth0 * template->array_size,
1174                          sizeof(tex->defined[0]));
1175    if (!tex->defined)
1176       goto out_no_defined;
1177 
1178    tex->b = *template;
1179    pipe_reference_init(&tex->b.reference, 1);
1180    tex->b.screen = screen;
1181 
1182    SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1183 
1184    tex->key.cachable = 0;
1185    tex->key.format = format;
1186    tex->handle = srf;
1187 
1188 
1189    /* set bind flags for the imported texture handle according to the bind
1190     * flags in the template
1191     */
1192    if (template->bind & PIPE_BIND_RENDER_TARGET){
1193       tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1194       tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1195    }
1196 
1197    if (template->bind & PIPE_BIND_DEPTH_STENCIL) {
1198       tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1199       tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1200    }
1201 
1202    if (template->bind & PIPE_BIND_SAMPLER_VIEW) {
1203       tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
1204       tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
1205    }
1206 
1207    tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1208    if (!tex->dirty)
1209       goto out_no_dirty;
1210 
1211    tex->imported = TRUE;
1212 
1213    ss->hud.num_resources++;
1214 
1215    return &tex->b;
1216 
1217 out_no_dirty:
1218    FREE(tex->defined);
1219 out_no_defined:
1220    FREE(tex);
1221 out_unref:
1222    sws->surface_reference(sws, &srf, NULL);
1223    return NULL;
1224 }
1225 
1226 bool
svga_texture_generate_mipmap(struct pipe_context * pipe,struct pipe_resource * pt,enum pipe_format format,unsigned base_level,unsigned last_level,unsigned first_layer,unsigned last_layer)1227 svga_texture_generate_mipmap(struct pipe_context *pipe,
1228                              struct pipe_resource *pt,
1229                              enum pipe_format format,
1230                              unsigned base_level,
1231                              unsigned last_level,
1232                              unsigned first_layer,
1233                              unsigned last_layer)
1234 {
1235    struct pipe_sampler_view templ, *psv;
1236    struct svga_pipe_sampler_view *sv;
1237    struct svga_context *svga = svga_context(pipe);
1238    struct svga_texture *tex = svga_texture(pt);
1239 
1240    assert(svga_have_vgpu10(svga));
1241 
1242    /* Fallback to the mipmap generation utility for those formats that
1243     * do not support hw generate mipmap
1244     */
1245    if (!svga_format_support_gen_mips(format))
1246       return false;
1247 
1248    /* Make sure the texture surface was created with
1249     * SVGA3D_SURFACE_BIND_RENDER_TARGET
1250     */
1251    if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1252       return false;
1253 
1254    templ.format = format;
1255    templ.target = pt->target;
1256    templ.u.tex.first_layer = first_layer;
1257    templ.u.tex.last_layer = last_layer;
1258    templ.u.tex.first_level = base_level;
1259    templ.u.tex.last_level = last_level;
1260 
1261    if (pt->target == PIPE_TEXTURE_CUBE) {
1262       /**
1263        * state tracker generates mipmap one face at a time.
1264        * But SVGA generates mipmap for the entire cubemap.
1265        */
1266       templ.u.tex.first_layer = 0;
1267       templ.u.tex.last_layer = 5;
1268    }
1269 
1270    psv = pipe->create_sampler_view(pipe, pt, &templ);
1271    if (psv == NULL)
1272       return false;
1273 
1274    sv = svga_pipe_sampler_view(psv);
1275    SVGA_RETRY(svga, svga_validate_pipe_sampler_view(svga, sv));
1276 
1277    SVGA_RETRY(svga, SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle));
1278    pipe_sampler_view_reference(&psv, NULL);
1279 
1280    /* Mark the texture surface as RENDERED */
1281    svga_set_texture_rendered_to(tex);
1282 
1283    svga->hud.num_generate_mipmap++;
1284 
1285    return true;
1286 }
1287 
1288 
1289 /* texture upload buffer default size in bytes */
1290 #define TEX_UPLOAD_DEFAULT_SIZE (1024 * 1024)
1291 
1292 /**
1293  * Create a texture upload buffer
1294  */
1295 boolean
svga_texture_transfer_map_upload_create(struct svga_context * svga)1296 svga_texture_transfer_map_upload_create(struct svga_context *svga)
1297 {
1298    svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
1299                                       PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 0);
1300    if (svga->tex_upload)
1301       u_upload_disable_persistent(svga->tex_upload);
1302 
1303    return svga->tex_upload != NULL;
1304 }
1305 
1306 
1307 /**
1308  * Destroy the texture upload buffer
1309  */
1310 void
svga_texture_transfer_map_upload_destroy(struct svga_context * svga)1311 svga_texture_transfer_map_upload_destroy(struct svga_context *svga)
1312 {
1313    u_upload_destroy(svga->tex_upload);
1314 }
1315 
1316 
1317 /**
1318  * Returns true if this transfer map request can use the upload buffer.
1319  */
1320 boolean
svga_texture_transfer_map_can_upload(const struct svga_screen * svgascreen,const struct pipe_resource * texture)1321 svga_texture_transfer_map_can_upload(const struct svga_screen *svgascreen,
1322                                      const struct pipe_resource *texture)
1323 {
1324    if (svgascreen->sws->have_transfer_from_buffer_cmd == FALSE)
1325       return FALSE;
1326 
1327    /* TransferFromBuffer command is not well supported with multi-samples surface */
1328    if (texture->nr_samples > 1)
1329       return FALSE;
1330 
1331    if (util_format_is_compressed(texture->format)) {
1332       /* XXX Need to take a closer look to see why texture upload
1333        * with 3D texture with compressed format fails
1334        */
1335       if (texture->target == PIPE_TEXTURE_3D)
1336           return FALSE;
1337    }
1338    else if (texture->format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1339       return FALSE;
1340    }
1341 
1342    return TRUE;
1343 }
1344 
1345 
1346 /**
1347  * Use upload buffer for the transfer map request.
1348  */
1349 void *
svga_texture_transfer_map_upload(struct svga_context * svga,struct svga_transfer * st)1350 svga_texture_transfer_map_upload(struct svga_context *svga,
1351                                  struct svga_transfer *st)
1352 {
1353    struct pipe_resource *texture = st->base.resource;
1354    struct pipe_resource *tex_buffer = NULL;
1355    void *tex_map;
1356    unsigned nblocksx, nblocksy;
1357    unsigned offset;
1358    unsigned upload_size;
1359 
1360    assert(svga->tex_upload);
1361 
1362    st->upload.box.x = st->base.box.x;
1363    st->upload.box.y = st->base.box.y;
1364    st->upload.box.z = st->base.box.z;
1365    st->upload.box.w = st->base.box.width;
1366    st->upload.box.h = st->base.box.height;
1367    st->upload.box.d = st->base.box.depth;
1368    st->upload.nlayers = 1;
1369 
1370    switch (texture->target) {
1371    case PIPE_TEXTURE_CUBE:
1372       st->upload.box.z = 0;
1373       break;
1374    case PIPE_TEXTURE_2D_ARRAY:
1375    case PIPE_TEXTURE_CUBE_ARRAY:
1376       st->upload.nlayers = st->base.box.depth;
1377       st->upload.box.z = 0;
1378       st->upload.box.d = 1;
1379       break;
1380    case PIPE_TEXTURE_1D_ARRAY:
1381       st->upload.nlayers = st->base.box.depth;
1382       st->upload.box.y = st->upload.box.z = 0;
1383       st->upload.box.d = 1;
1384       break;
1385    default:
1386       break;
1387    }
1388 
1389    nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
1390    nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
1391 
1392    st->base.stride = nblocksx * util_format_get_blocksize(texture->format);
1393    st->base.layer_stride = st->base.stride * nblocksy;
1394 
1395    /* In order to use the TransferFromBuffer command to update the
1396     * texture content from the buffer, the layer stride for a multi-layers
1397     * surface needs to be in multiples of 16 bytes.
1398     */
1399    if (st->upload.nlayers > 1 && st->base.layer_stride & 15)
1400       return NULL;
1401 
1402    upload_size = st->base.layer_stride * st->base.box.depth;
1403    upload_size = align(upload_size, 16);
1404 
1405 #ifdef DEBUG
1406    if (util_format_is_compressed(texture->format)) {
1407       struct svga_texture *tex = svga_texture(texture);
1408       unsigned blockw, blockh, bytesPerBlock;
1409 
1410       svga_format_size(tex->key.format, &blockw, &blockh, &bytesPerBlock);
1411 
1412       /* dest box must start on block boundary */
1413       assert((st->base.box.x % blockw) == 0);
1414       assert((st->base.box.y % blockh) == 0);
1415    }
1416 #endif
1417 
1418    /* If the upload size exceeds the default buffer size, the
1419     * upload buffer manager code will try to allocate a new buffer
1420     * with the new buffer size.
1421     */
1422    u_upload_alloc(svga->tex_upload, 0, upload_size, 16,
1423                   &offset, &tex_buffer, &tex_map);
1424 
1425    if (!tex_map) {
1426       return NULL;
1427    }
1428 
1429    st->upload.buf = tex_buffer;
1430    st->upload.map = tex_map;
1431    st->upload.offset = offset;
1432 
1433    return tex_map;
1434 }
1435 
1436 
1437 /**
1438  * Unmap upload map transfer request
1439  */
1440 void
svga_texture_transfer_unmap_upload(struct svga_context * svga,struct svga_transfer * st)1441 svga_texture_transfer_unmap_upload(struct svga_context *svga,
1442                                    struct svga_transfer *st)
1443 {
1444    struct svga_winsys_surface *srcsurf;
1445    struct svga_winsys_surface *dstsurf;
1446    struct pipe_resource *texture = st->base.resource;
1447    struct svga_texture *tex = svga_texture(texture);
1448    unsigned subResource;
1449    unsigned numMipLevels;
1450    unsigned i, layer;
1451    unsigned offset = st->upload.offset;
1452 
1453    assert(svga->tex_upload);
1454    assert(st->upload.buf);
1455 
1456    /* unmap the texture upload buffer */
1457    u_upload_unmap(svga->tex_upload);
1458 
1459    srcsurf = svga_buffer_handle(svga, st->upload.buf, 0);
1460    dstsurf = svga_texture(texture)->handle;
1461    assert(dstsurf);
1462 
1463    numMipLevels = texture->last_level + 1;
1464 
1465    for (i = 0, layer = st->slice; i < st->upload.nlayers; i++, layer++) {
1466       subResource = layer * numMipLevels + st->base.level;
1467 
1468       /* send a transferFromBuffer command to update the host texture surface */
1469       assert((offset & 15) == 0);
1470 
1471       SVGA_RETRY(svga, SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1472                                                         offset,
1473                                                         st->base.stride,
1474                                                         st->base.layer_stride,
1475                                                         dstsurf, subResource,
1476                                                         &st->upload.box));
1477       offset += st->base.layer_stride;
1478    }
1479 
1480    /* Mark the texture surface state as RENDERED */
1481    svga_set_texture_rendered_to(tex);
1482 
1483    pipe_resource_reference(&st->upload.buf, NULL);
1484 }
1485 
1486 /**
1487  * Does the device format backing this surface have an
1488  * alpha channel?
1489  *
1490  * \param texture[in]  The texture whose format we're querying
1491  * \return TRUE if the format has an alpha channel, FALSE otherwise
1492  *
1493  * For locally created textures, the device (svga) format is typically
1494  * identical to svga_format(texture->format), and we can use the gallium
1495  * format tests to determine whether the device format has an alpha channel
1496  * or not. However, for textures backed by imported svga surfaces that is
1497  * not always true, and we have to look at the SVGA3D utilities.
1498  */
1499 boolean
svga_texture_device_format_has_alpha(struct pipe_resource * texture)1500 svga_texture_device_format_has_alpha(struct pipe_resource *texture)
1501 {
1502    /* the svga_texture() call below is invalid for PIPE_BUFFER resources */
1503    assert(texture->target != PIPE_BUFFER);
1504 
1505    const struct svga3d_surface_desc *surf_desc =
1506       svga3dsurface_get_desc(svga_texture(texture)->key.format);
1507 
1508    enum svga3d_block_desc block_desc = surf_desc->block_desc;
1509 
1510    return !!((block_desc & SVGA3DBLOCKDESC_ALPHA) ||
1511              ((block_desc == SVGA3DBLOCKDESC_TYPELESS) &&
1512               (surf_desc->bitDepth.alpha > 0)));
1513 }
1514