• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26 
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/format/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42 
43 #include "pipebuffer/pb_buffer.h"
44 
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virtio-gpu/virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50 #include "virgl_staging_mgr.h"
51 
52 struct virgl_vertex_elements_state {
53    uint32_t handle;
54    uint8_t binding_map[PIPE_MAX_ATTRIBS];
55    uint8_t num_bindings;
56 };
57 
58 static uint32_t next_handle;
virgl_object_assign_handle(void)59 uint32_t virgl_object_assign_handle(void)
60 {
61    return ++next_handle;
62 }
63 
64 bool
virgl_can_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)65 virgl_can_rebind_resource(struct virgl_context *vctx,
66                           struct pipe_resource *res)
67 {
68    /* We cannot rebind resources that are referenced by host objects, which
69     * are
70     *
71     *  - VIRGL_OBJECT_SURFACE
72     *  - VIRGL_OBJECT_SAMPLER_VIEW
73     *  - VIRGL_OBJECT_STREAMOUT_TARGET
74     *
75     * Because surfaces cannot be created from buffers, we require the resource
76     * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
77     */
78    const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
79                                       PIPE_BIND_STREAM_OUTPUT);
80    const unsigned bind_history = virgl_resource(res)->bind_history;
81    return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
82 }
83 
84 void
virgl_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)85 virgl_rebind_resource(struct virgl_context *vctx,
86                       struct pipe_resource *res)
87 {
88    /* Queries use internally created buffers and do not go through transfers.
89     * Index buffers are not bindable.  They are not tracked.
90     */
91    ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
92                                                PIPE_BIND_CONSTANT_BUFFER |
93                                                PIPE_BIND_SHADER_BUFFER |
94                                                PIPE_BIND_SHADER_IMAGE);
95    const unsigned bind_history = virgl_resource(res)->bind_history;
96    unsigned i;
97 
98    assert(virgl_can_rebind_resource(vctx, res) &&
99           (bind_history & tracked_bind) == bind_history);
100 
101    if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
102       for (i = 0; i < vctx->num_vertex_buffers; i++) {
103          if (vctx->vertex_buffer[i].buffer.resource == res) {
104             vctx->vertex_array_dirty = true;
105             break;
106          }
107       }
108    }
109 
110    if (bind_history & PIPE_BIND_SHADER_BUFFER) {
111       uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
112       while (remaining_mask) {
113          int i = u_bit_scan(&remaining_mask);
114          if (vctx->atomic_buffers[i].buffer == res) {
115             const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
116             virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
117          }
118       }
119    }
120 
121    /* check per-stage shader bindings */
122    if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
123                        PIPE_BIND_SHADER_BUFFER |
124                        PIPE_BIND_SHADER_IMAGE)) {
125       enum pipe_shader_type shader_type;
126       for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
127          const struct virgl_shader_binding_state *binding =
128             &vctx->shader_bindings[shader_type];
129 
130          if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
131             uint32_t remaining_mask = binding->ubo_enabled_mask;
132             while (remaining_mask) {
133                int i = u_bit_scan(&remaining_mask);
134                if (binding->ubos[i].buffer == res) {
135                   const struct pipe_constant_buffer *ubo = &binding->ubos[i];
136                   virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
137                                                    ubo->buffer_offset,
138                                                    ubo->buffer_size,
139                                                    virgl_resource(res));
140                }
141             }
142          }
143 
144          if (bind_history & PIPE_BIND_SHADER_BUFFER) {
145             uint32_t remaining_mask = binding->ssbo_enabled_mask;
146             while (remaining_mask) {
147                int i = u_bit_scan(&remaining_mask);
148                if (binding->ssbos[i].buffer == res) {
149                   const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
150                   virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
151                                                   ssbo);
152                }
153             }
154          }
155 
156          if (bind_history & PIPE_BIND_SHADER_IMAGE) {
157             uint32_t remaining_mask = binding->image_enabled_mask;
158             while (remaining_mask) {
159                int i = u_bit_scan(&remaining_mask);
160                if (binding->images[i].resource == res) {
161                   const struct pipe_image_view *image = &binding->images[i];
162                   virgl_encode_set_shader_images(vctx, shader_type, i, 1,
163                                                  image);
164                }
165             }
166          }
167       }
168    }
169 }
170 
virgl_attach_res_framebuffer(struct virgl_context * vctx)171 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
172 {
173    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
174    struct pipe_surface *surf;
175    struct virgl_resource *res;
176    unsigned i;
177 
178    surf = vctx->framebuffer.zsbuf;
179    if (surf) {
180       res = virgl_resource(surf->texture);
181       if (res) {
182          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
183          virgl_resource_dirty(res, surf->u.tex.level);
184       }
185    }
186    for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
187       surf = vctx->framebuffer.cbufs[i];
188       if (surf) {
189          res = virgl_resource(surf->texture);
190          if (res) {
191             vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
192             virgl_resource_dirty(res, surf->u.tex.level);
193          }
194       }
195    }
196 }
197 
virgl_attach_res_sampler_views(struct virgl_context * vctx,enum pipe_shader_type shader_type)198 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
199                                            enum pipe_shader_type shader_type)
200 {
201    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
202    const struct virgl_shader_binding_state *binding =
203       &vctx->shader_bindings[shader_type];
204    uint32_t remaining_mask = binding->view_enabled_mask;
205    struct virgl_resource *res;
206 
207    while (remaining_mask) {
208       int i = u_bit_scan(&remaining_mask);
209       assert(binding->views[i] && binding->views[i]->texture);
210       res = virgl_resource(binding->views[i]->texture);
211       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
212    }
213 }
214 
virgl_attach_res_vertex_buffers(struct virgl_context * vctx)215 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
216 {
217    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
218    struct virgl_resource *res;
219    unsigned i;
220 
221    for (i = 0; i < vctx->num_vertex_buffers; i++) {
222       res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
223       if (res)
224          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
225    }
226 }
227 
virgl_attach_res_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)228 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
229 					  struct virgl_indexbuf *ib)
230 {
231    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
232    struct virgl_resource *res;
233 
234    res = virgl_resource(ib->buffer);
235    if (res)
236       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
237 }
238 
virgl_attach_res_so_targets(struct virgl_context * vctx)239 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
240 {
241    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
242    struct virgl_resource *res;
243    unsigned i;
244 
245    for (i = 0; i < vctx->num_so_targets; i++) {
246       res = virgl_resource(vctx->so_targets[i].base.buffer);
247       if (res)
248          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
249    }
250 }
251 
virgl_attach_res_uniform_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)252 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
253                                              enum pipe_shader_type shader_type)
254 {
255    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
256    const struct virgl_shader_binding_state *binding =
257       &vctx->shader_bindings[shader_type];
258    uint32_t remaining_mask = binding->ubo_enabled_mask;
259    struct virgl_resource *res;
260 
261    while (remaining_mask) {
262       int i = u_bit_scan(&remaining_mask);
263       res = virgl_resource(binding->ubos[i].buffer);
264       assert(res);
265       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
266    }
267 }
268 
virgl_attach_res_shader_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)269 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
270                                             enum pipe_shader_type shader_type)
271 {
272    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
273    const struct virgl_shader_binding_state *binding =
274       &vctx->shader_bindings[shader_type];
275    uint32_t remaining_mask = binding->ssbo_enabled_mask;
276    struct virgl_resource *res;
277 
278    while (remaining_mask) {
279       int i = u_bit_scan(&remaining_mask);
280       res = virgl_resource(binding->ssbos[i].buffer);
281       assert(res);
282       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
283    }
284 }
285 
virgl_attach_res_shader_images(struct virgl_context * vctx,enum pipe_shader_type shader_type)286 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
287                                            enum pipe_shader_type shader_type)
288 {
289    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
290    const struct virgl_shader_binding_state *binding =
291       &vctx->shader_bindings[shader_type];
292    uint32_t remaining_mask = binding->image_enabled_mask;
293    struct virgl_resource *res;
294 
295    while (remaining_mask) {
296       int i = u_bit_scan(&remaining_mask);
297       res = virgl_resource(binding->images[i].resource);
298       assert(res);
299       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
300    }
301 }
302 
virgl_attach_res_atomic_buffers(struct virgl_context * vctx)303 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
304 {
305    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
306    uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
307    struct virgl_resource *res;
308 
309    while (remaining_mask) {
310       int i = u_bit_scan(&remaining_mask);
311       res = virgl_resource(vctx->atomic_buffers[i].buffer);
312       assert(res);
313       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
314    }
315 }
316 
317 /*
318  * after flushing, the hw context still has a bunch of
319  * resources bound, so we need to rebind those here.
320  */
virgl_reemit_draw_resources(struct virgl_context * vctx)321 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
322 {
323    enum pipe_shader_type shader_type;
324 
325    /* reattach any flushed resources */
326    /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
327    virgl_attach_res_framebuffer(vctx);
328 
329    for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
330       virgl_attach_res_sampler_views(vctx, shader_type);
331       virgl_attach_res_uniform_buffers(vctx, shader_type);
332       virgl_attach_res_shader_buffers(vctx, shader_type);
333       virgl_attach_res_shader_images(vctx, shader_type);
334    }
335    virgl_attach_res_atomic_buffers(vctx);
336    virgl_attach_res_vertex_buffers(vctx);
337    virgl_attach_res_so_targets(vctx);
338 }
339 
virgl_reemit_compute_resources(struct virgl_context * vctx)340 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
341 {
342    virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
343    virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
344    virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
345    virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
346 
347    virgl_attach_res_atomic_buffers(vctx);
348 }
349 
virgl_create_surface(struct pipe_context * ctx,struct pipe_resource * resource,const struct pipe_surface * templ)350 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
351                                                 struct pipe_resource *resource,
352                                                 const struct pipe_surface *templ)
353 {
354    struct virgl_context *vctx = virgl_context(ctx);
355    struct virgl_surface *surf;
356    struct virgl_resource *res = virgl_resource(resource);
357    uint32_t handle;
358 
359    /* no support for buffer surfaces */
360    if (resource->target == PIPE_BUFFER)
361       return NULL;
362 
363    surf = CALLOC_STRUCT(virgl_surface);
364    if (!surf)
365       return NULL;
366 
367    assert(ctx->screen->get_param(ctx->screen,
368                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
369           (util_format_is_srgb(templ->format) ==
370            util_format_is_srgb(resource->format)));
371 
372    virgl_resource_dirty(res, 0);
373    handle = virgl_object_assign_handle();
374    pipe_reference_init(&surf->base.reference, 1);
375    pipe_resource_reference(&surf->base.texture, resource);
376    surf->base.context = ctx;
377    surf->base.format = templ->format;
378 
379    surf->base.width = u_minify(resource->width0, templ->u.tex.level);
380    surf->base.height = u_minify(resource->height0, templ->u.tex.level);
381    surf->base.u.tex.level = templ->u.tex.level;
382    surf->base.u.tex.first_layer = templ->u.tex.first_layer;
383    surf->base.u.tex.last_layer = templ->u.tex.last_layer;
384 
385    virgl_encoder_create_surface(vctx, handle, res, &surf->base);
386    surf->handle = handle;
387    return &surf->base;
388 }
389 
virgl_surface_destroy(struct pipe_context * ctx,struct pipe_surface * psurf)390 static void virgl_surface_destroy(struct pipe_context *ctx,
391                                  struct pipe_surface *psurf)
392 {
393    struct virgl_context *vctx = virgl_context(ctx);
394    struct virgl_surface *surf = virgl_surface(psurf);
395 
396    pipe_resource_reference(&surf->base.texture, NULL);
397    virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
398    FREE(surf);
399 }
400 
virgl_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * blend_state)401 static void *virgl_create_blend_state(struct pipe_context *ctx,
402                                               const struct pipe_blend_state *blend_state)
403 {
404    struct virgl_context *vctx = virgl_context(ctx);
405    uint32_t handle;
406    handle = virgl_object_assign_handle();
407 
408    virgl_encode_blend_state(vctx, handle, blend_state);
409    return (void *)(unsigned long)handle;
410 
411 }
412 
virgl_bind_blend_state(struct pipe_context * ctx,void * blend_state)413 static void virgl_bind_blend_state(struct pipe_context *ctx,
414                                            void *blend_state)
415 {
416    struct virgl_context *vctx = virgl_context(ctx);
417    uint32_t handle = (unsigned long)blend_state;
418    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
419 }
420 
virgl_delete_blend_state(struct pipe_context * ctx,void * blend_state)421 static void virgl_delete_blend_state(struct pipe_context *ctx,
422                                      void *blend_state)
423 {
424    struct virgl_context *vctx = virgl_context(ctx);
425    uint32_t handle = (unsigned long)blend_state;
426    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
427 }
428 
virgl_create_depth_stencil_alpha_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * blend_state)429 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
430                                                    const struct pipe_depth_stencil_alpha_state *blend_state)
431 {
432    struct virgl_context *vctx = virgl_context(ctx);
433    uint32_t handle;
434    handle = virgl_object_assign_handle();
435 
436    virgl_encode_dsa_state(vctx, handle, blend_state);
437    return (void *)(unsigned long)handle;
438 }
439 
virgl_bind_depth_stencil_alpha_state(struct pipe_context * ctx,void * blend_state)440 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
441                                                 void *blend_state)
442 {
443    struct virgl_context *vctx = virgl_context(ctx);
444    uint32_t handle = (unsigned long)blend_state;
445    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
446 }
447 
virgl_delete_depth_stencil_alpha_state(struct pipe_context * ctx,void * dsa_state)448 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
449                                                   void *dsa_state)
450 {
451    struct virgl_context *vctx = virgl_context(ctx);
452    uint32_t handle = (unsigned long)dsa_state;
453    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
454 }
455 
virgl_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * rs_state)456 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
457                                                    const struct pipe_rasterizer_state *rs_state)
458 {
459    struct virgl_context *vctx = virgl_context(ctx);
460    struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
461 
462    if (!vrs)
463       return NULL;
464    vrs->rs = *rs_state;
465    vrs->handle = virgl_object_assign_handle();
466 
467    assert(rs_state->depth_clip_near ||
468           virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
469 
470    virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
471    return (void *)vrs;
472 }
473 
virgl_bind_rasterizer_state(struct pipe_context * ctx,void * rs_state)474 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
475                                                 void *rs_state)
476 {
477    struct virgl_context *vctx = virgl_context(ctx);
478    uint32_t handle = 0;
479    if (rs_state) {
480       struct virgl_rasterizer_state *vrs = rs_state;
481       vctx->rs_state = *vrs;
482       handle = vrs->handle;
483    }
484    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
485 }
486 
virgl_delete_rasterizer_state(struct pipe_context * ctx,void * rs_state)487 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
488                                          void *rs_state)
489 {
490    struct virgl_context *vctx = virgl_context(ctx);
491    struct virgl_rasterizer_state *vrs = rs_state;
492    virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
493    FREE(vrs);
494 }
495 
virgl_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)496 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
497                                                 const struct pipe_framebuffer_state *state)
498 {
499    struct virgl_context *vctx = virgl_context(ctx);
500 
501    vctx->framebuffer = *state;
502    virgl_encoder_set_framebuffer_state(vctx, state);
503    virgl_attach_res_framebuffer(vctx);
504 }
505 
virgl_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)506 static void virgl_set_viewport_states(struct pipe_context *ctx,
507                                      unsigned start_slot,
508                                      unsigned num_viewports,
509                                      const struct pipe_viewport_state *state)
510 {
511    struct virgl_context *vctx = virgl_context(ctx);
512    virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
513 }
514 
virgl_create_vertex_elements_state(struct pipe_context * ctx,unsigned num_elements,const struct pipe_vertex_element * elements)515 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
516                                                         unsigned num_elements,
517                                                         const struct pipe_vertex_element *elements)
518 {
519    struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
520    struct virgl_context *vctx = virgl_context(ctx);
521    struct virgl_vertex_elements_state *state =
522       CALLOC_STRUCT(virgl_vertex_elements_state);
523 
524    for (int i = 0; i < num_elements; ++i) {
525       if (elements[i].instance_divisor) {
526 	 /* Virglrenderer doesn't deal with instance_divisor correctly if
527 	  * there isn't a 1:1 relationship between elements and bindings.
528 	  * So let's make sure there is, by duplicating bindings.
529 	  */
530 	 for (int j = 0; j < num_elements; ++j) {
531             new_elements[j] = elements[j];
532             new_elements[j].vertex_buffer_index = j;
533             state->binding_map[j] = elements[j].vertex_buffer_index;
534 	 }
535 	 elements = new_elements;
536 	 state->num_bindings = num_elements;
537 	 break;
538       }
539    }
540 
541    state->handle = virgl_object_assign_handle();
542    virgl_encoder_create_vertex_elements(vctx, state->handle,
543                                        num_elements, elements);
544    return state;
545 }
546 
virgl_delete_vertex_elements_state(struct pipe_context * ctx,void * ve)547 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
548                                               void *ve)
549 {
550    struct virgl_context *vctx = virgl_context(ctx);
551    struct virgl_vertex_elements_state *state =
552       (struct virgl_vertex_elements_state *)ve;
553    virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
554    FREE(state);
555 }
556 
virgl_bind_vertex_elements_state(struct pipe_context * ctx,void * ve)557 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
558                                                      void *ve)
559 {
560    struct virgl_context *vctx = virgl_context(ctx);
561    struct virgl_vertex_elements_state *state =
562       (struct virgl_vertex_elements_state *)ve;
563    vctx->vertex_elements = state;
564    virgl_encode_bind_object(vctx, state ? state->handle : 0,
565                             VIRGL_OBJECT_VERTEX_ELEMENTS);
566    vctx->vertex_array_dirty = TRUE;
567 }
568 
virgl_set_vertex_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)569 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
570                                     unsigned start_slot,
571                                     unsigned num_buffers,
572                                     const struct pipe_vertex_buffer *buffers)
573 {
574    struct virgl_context *vctx = virgl_context(ctx);
575 
576    util_set_vertex_buffers_count(vctx->vertex_buffer,
577                                  &vctx->num_vertex_buffers,
578                                  buffers, start_slot, num_buffers);
579 
580    if (buffers) {
581       for (unsigned i = 0; i < num_buffers; i++) {
582          struct virgl_resource *res =
583             virgl_resource(buffers[i].buffer.resource);
584          if (res && !buffers[i].is_user_buffer)
585             res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
586       }
587    }
588 
589    vctx->vertex_array_dirty = TRUE;
590 }
591 
virgl_hw_set_vertex_buffers(struct virgl_context * vctx)592 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
593 {
594    if (vctx->vertex_array_dirty) {
595       struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
596 
597       if (ve->num_bindings) {
598          struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
599          for (int i = 0; i < ve->num_bindings; ++i)
600             vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
601 
602          virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
603       } else
604          virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
605 
606       virgl_attach_res_vertex_buffers(vctx);
607 
608       vctx->vertex_array_dirty = FALSE;
609    }
610 }
611 
virgl_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref * ref)612 static void virgl_set_stencil_ref(struct pipe_context *ctx,
613                                  const struct pipe_stencil_ref *ref)
614 {
615    struct virgl_context *vctx = virgl_context(ctx);
616    virgl_encoder_set_stencil_ref(vctx, ref);
617 }
618 
virgl_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * color)619 static void virgl_set_blend_color(struct pipe_context *ctx,
620                                  const struct pipe_blend_color *color)
621 {
622    struct virgl_context *vctx = virgl_context(ctx);
623    virgl_encoder_set_blend_color(vctx, color);
624 }
625 
virgl_hw_set_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)626 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
627                                      struct virgl_indexbuf *ib)
628 {
629    virgl_encoder_set_index_buffer(vctx, ib);
630    virgl_attach_res_index_buffer(vctx, ib);
631 }
632 
virgl_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type shader,uint index,const struct pipe_constant_buffer * buf)633 static void virgl_set_constant_buffer(struct pipe_context *ctx,
634                                      enum pipe_shader_type shader, uint index,
635                                      const struct pipe_constant_buffer *buf)
636 {
637    struct virgl_context *vctx = virgl_context(ctx);
638    struct virgl_shader_binding_state *binding =
639       &vctx->shader_bindings[shader];
640 
641    if (buf && buf->buffer) {
642       struct virgl_resource *res = virgl_resource(buf->buffer);
643       res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
644 
645       virgl_encoder_set_uniform_buffer(vctx, shader, index,
646                                        buf->buffer_offset,
647                                        buf->buffer_size, res);
648 
649       pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
650       binding->ubos[index] = *buf;
651       binding->ubo_enabled_mask |= 1 << index;
652    } else {
653       static const struct pipe_constant_buffer dummy_ubo;
654       if (!buf)
655          buf = &dummy_ubo;
656       virgl_encoder_write_constant_buffer(vctx, shader, index,
657                                           buf->buffer_size / 4,
658                                           buf->user_buffer);
659 
660       pipe_resource_reference(&binding->ubos[index].buffer, NULL);
661       binding->ubo_enabled_mask &= ~(1 << index);
662    }
663 }
664 
virgl_shader_encoder(struct pipe_context * ctx,const struct pipe_shader_state * shader,unsigned type)665 static void *virgl_shader_encoder(struct pipe_context *ctx,
666                                   const struct pipe_shader_state *shader,
667                                   unsigned type)
668 {
669    struct virgl_context *vctx = virgl_context(ctx);
670    uint32_t handle;
671    struct tgsi_token *new_tokens;
672    int ret;
673 
674    new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
675    if (!new_tokens)
676       return NULL;
677 
678    handle = virgl_object_assign_handle();
679    /* encode VS state */
680    ret = virgl_encode_shader_state(vctx, handle, type,
681                                    &shader->stream_output, 0,
682                                    new_tokens);
683    if (ret) {
684       return NULL;
685    }
686 
687    FREE(new_tokens);
688    return (void *)(unsigned long)handle;
689 
690 }
virgl_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)691 static void *virgl_create_vs_state(struct pipe_context *ctx,
692                                    const struct pipe_shader_state *shader)
693 {
694    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
695 }
696 
virgl_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)697 static void *virgl_create_tcs_state(struct pipe_context *ctx,
698                                    const struct pipe_shader_state *shader)
699 {
700    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
701 }
702 
virgl_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)703 static void *virgl_create_tes_state(struct pipe_context *ctx,
704                                    const struct pipe_shader_state *shader)
705 {
706    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
707 }
708 
virgl_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)709 static void *virgl_create_gs_state(struct pipe_context *ctx,
710                                    const struct pipe_shader_state *shader)
711 {
712    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
713 }
714 
virgl_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)715 static void *virgl_create_fs_state(struct pipe_context *ctx,
716                                    const struct pipe_shader_state *shader)
717 {
718    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
719 }
720 
721 static void
virgl_delete_fs_state(struct pipe_context * ctx,void * fs)722 virgl_delete_fs_state(struct pipe_context *ctx,
723                      void *fs)
724 {
725    uint32_t handle = (unsigned long)fs;
726    struct virgl_context *vctx = virgl_context(ctx);
727 
728    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
729 }
730 
731 static void
virgl_delete_gs_state(struct pipe_context * ctx,void * gs)732 virgl_delete_gs_state(struct pipe_context *ctx,
733                      void *gs)
734 {
735    uint32_t handle = (unsigned long)gs;
736    struct virgl_context *vctx = virgl_context(ctx);
737 
738    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
739 }
740 
741 static void
virgl_delete_vs_state(struct pipe_context * ctx,void * vs)742 virgl_delete_vs_state(struct pipe_context *ctx,
743                      void *vs)
744 {
745    uint32_t handle = (unsigned long)vs;
746    struct virgl_context *vctx = virgl_context(ctx);
747 
748    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
749 }
750 
751 static void
virgl_delete_tcs_state(struct pipe_context * ctx,void * tcs)752 virgl_delete_tcs_state(struct pipe_context *ctx,
753                        void *tcs)
754 {
755    uint32_t handle = (unsigned long)tcs;
756    struct virgl_context *vctx = virgl_context(ctx);
757 
758    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
759 }
760 
761 static void
virgl_delete_tes_state(struct pipe_context * ctx,void * tes)762 virgl_delete_tes_state(struct pipe_context *ctx,
763                       void *tes)
764 {
765    uint32_t handle = (unsigned long)tes;
766    struct virgl_context *vctx = virgl_context(ctx);
767 
768    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
769 }
770 
virgl_bind_vs_state(struct pipe_context * ctx,void * vss)771 static void virgl_bind_vs_state(struct pipe_context *ctx,
772                                         void *vss)
773 {
774    uint32_t handle = (unsigned long)vss;
775    struct virgl_context *vctx = virgl_context(ctx);
776 
777    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
778 }
779 
virgl_bind_tcs_state(struct pipe_context * ctx,void * vss)780 static void virgl_bind_tcs_state(struct pipe_context *ctx,
781                                void *vss)
782 {
783    uint32_t handle = (unsigned long)vss;
784    struct virgl_context *vctx = virgl_context(ctx);
785 
786    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
787 }
788 
virgl_bind_tes_state(struct pipe_context * ctx,void * vss)789 static void virgl_bind_tes_state(struct pipe_context *ctx,
790                                void *vss)
791 {
792    uint32_t handle = (unsigned long)vss;
793    struct virgl_context *vctx = virgl_context(ctx);
794 
795    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
796 }
797 
virgl_bind_gs_state(struct pipe_context * ctx,void * vss)798 static void virgl_bind_gs_state(struct pipe_context *ctx,
799                                void *vss)
800 {
801    uint32_t handle = (unsigned long)vss;
802    struct virgl_context *vctx = virgl_context(ctx);
803 
804    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
805 }
806 
807 
virgl_bind_fs_state(struct pipe_context * ctx,void * vss)808 static void virgl_bind_fs_state(struct pipe_context *ctx,
809                                         void *vss)
810 {
811    uint32_t handle = (unsigned long)vss;
812    struct virgl_context *vctx = virgl_context(ctx);
813 
814    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
815 }
816 
virgl_clear(struct pipe_context * ctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)817 static void virgl_clear(struct pipe_context *ctx,
818                                 unsigned buffers,
819                                 const struct pipe_scissor_state *scissor_state,
820                                 const union pipe_color_union *color,
821                                 double depth, unsigned stencil)
822 {
823    struct virgl_context *vctx = virgl_context(ctx);
824 
825    if (!vctx->num_draws)
826       virgl_reemit_draw_resources(vctx);
827    vctx->num_draws++;
828 
829    virgl_encode_clear(vctx, buffers, color, depth, stencil);
830 }
831 
virgl_clear_texture(struct pipe_context * ctx,struct pipe_resource * res,unsigned int level,const struct pipe_box * box,const void * data)832 static void virgl_clear_texture(struct pipe_context *ctx,
833                                 struct pipe_resource *res,
834                                 unsigned int level,
835                                 const struct pipe_box *box,
836                                 const void *data)
837 {
838    struct virgl_context *vctx = virgl_context(ctx);
839    struct virgl_resource *vres = virgl_resource(res);
840 
841    virgl_encode_clear_texture(vctx, vres, level, box, data);
842 
843    /* Mark as dirty, since we are updating the host side resource
844     * without going through the corresponding guest side resource, and
845     * hence the two will diverge.
846     */
847    virgl_resource_dirty(vres, level);
848 }
849 
virgl_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * dinfo)850 static void virgl_draw_vbo(struct pipe_context *ctx,
851                                    const struct pipe_draw_info *dinfo)
852 {
853    struct virgl_context *vctx = virgl_context(ctx);
854    struct virgl_screen *rs = virgl_screen(ctx->screen);
855    struct virgl_indexbuf ib = {};
856    struct pipe_draw_info info = *dinfo;
857 
858    if (!dinfo->count_from_stream_output && !dinfo->indirect &&
859        !dinfo->primitive_restart &&
860        !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
861       return;
862 
863    if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
864       util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
865       util_primconvert_draw_vbo(vctx->primconvert, dinfo);
866       return;
867    }
868    if (info.index_size) {
869            pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
870            ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
871            ib.index_size = dinfo->index_size;
872            ib.offset = info.start * ib.index_size;
873 
874            if (ib.user_buffer) {
875                    u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4,
876                                  ib.user_buffer, &ib.offset, &ib.buffer);
877                    ib.user_buffer = NULL;
878            }
879    }
880 
881    if (!vctx->num_draws)
882       virgl_reemit_draw_resources(vctx);
883    vctx->num_draws++;
884 
885    virgl_hw_set_vertex_buffers(vctx);
886    if (info.index_size)
887       virgl_hw_set_index_buffer(vctx, &ib);
888 
889    virgl_encoder_draw_vbo(vctx, &info);
890 
891    pipe_resource_reference(&ib.buffer, NULL);
892 
893 }
894 
virgl_submit_cmd(struct virgl_winsys * vws,struct virgl_cmd_buf * cbuf,struct pipe_fence_handle ** fence)895 static void virgl_submit_cmd(struct virgl_winsys *vws,
896                              struct virgl_cmd_buf *cbuf,
897 			     struct pipe_fence_handle **fence)
898 {
899    if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
900       struct pipe_fence_handle *sync_fence = NULL;
901 
902       vws->submit_cmd(vws, cbuf, &sync_fence);
903 
904       vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
905       vws->fence_reference(vws, &sync_fence, NULL);
906    } else {
907       vws->submit_cmd(vws, cbuf, fence);
908    }
909 }
910 
virgl_flush_eq(struct virgl_context * ctx,void * closure,struct pipe_fence_handle ** fence)911 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
912 			   struct pipe_fence_handle **fence)
913 {
914    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
915 
916    /* skip empty cbuf */
917    if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
918        ctx->queue.num_dwords == 0 &&
919        !fence)
920       return;
921 
922    if (ctx->num_draws)
923       u_upload_unmap(ctx->uploader);
924 
925    /* send the buffer to the remote side for decoding */
926    ctx->num_draws = ctx->num_compute = 0;
927 
928    virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
929 
930    virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
931 
932    /* Reserve some space for transfers. */
933    if (ctx->encoded_transfers)
934       ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
935 
936    virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
937 
938    ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
939 
940    /* We have flushed the command queue, including any pending copy transfers
941     * involving staging resources.
942     */
943    ctx->queued_staging_res_size = 0;
944 }
945 
virgl_flush_from_st(struct pipe_context * ctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)946 static void virgl_flush_from_st(struct pipe_context *ctx,
947                                struct pipe_fence_handle **fence,
948                                enum pipe_flush_flags flags)
949 {
950    struct virgl_context *vctx = virgl_context(ctx);
951 
952    virgl_flush_eq(vctx, vctx, fence);
953 }
954 
virgl_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * texture,const struct pipe_sampler_view * state)955 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
956                                       struct pipe_resource *texture,
957                                       const struct pipe_sampler_view *state)
958 {
959    struct virgl_context *vctx = virgl_context(ctx);
960    struct virgl_sampler_view *grview;
961    uint32_t handle;
962    struct virgl_resource *res;
963 
964    if (!state)
965       return NULL;
966 
967    grview = CALLOC_STRUCT(virgl_sampler_view);
968    if (!grview)
969       return NULL;
970 
971    res = virgl_resource(texture);
972    handle = virgl_object_assign_handle();
973    virgl_encode_sampler_view(vctx, handle, res, state);
974 
975    grview->base = *state;
976    grview->base.reference.count = 1;
977 
978    grview->base.texture = NULL;
979    grview->base.context = ctx;
980    pipe_resource_reference(&grview->base.texture, texture);
981    grview->handle = handle;
982    return &grview->base;
983 }
984 
virgl_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type shader_type,unsigned start_slot,unsigned num_views,struct pipe_sampler_view ** views)985 static void virgl_set_sampler_views(struct pipe_context *ctx,
986                                    enum pipe_shader_type shader_type,
987                                    unsigned start_slot,
988                                    unsigned num_views,
989                                    struct pipe_sampler_view **views)
990 {
991    struct virgl_context *vctx = virgl_context(ctx);
992    struct virgl_shader_binding_state *binding =
993       &vctx->shader_bindings[shader_type];
994 
995    binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
996    for (unsigned i = 0; i < num_views; i++) {
997       unsigned idx = start_slot + i;
998       if (views && views[i]) {
999          struct virgl_resource *res = virgl_resource(views[i]->texture);
1000          res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
1001 
1002          pipe_sampler_view_reference(&binding->views[idx], views[i]);
1003          binding->view_enabled_mask |= 1 << idx;
1004       } else {
1005          pipe_sampler_view_reference(&binding->views[idx], NULL);
1006       }
1007    }
1008 
1009    virgl_encode_set_sampler_views(vctx, shader_type,
1010          start_slot, num_views, (struct virgl_sampler_view **)binding->views);
1011    virgl_attach_res_sampler_views(vctx, shader_type);
1012 }
1013 
1014 static void
virgl_texture_barrier(struct pipe_context * ctx,unsigned flags)1015 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
1016 {
1017    struct virgl_context *vctx = virgl_context(ctx);
1018    struct virgl_screen *rs = virgl_screen(ctx->screen);
1019 
1020    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER) &&
1021        !(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_BLEND_EQUATION))
1022       return;
1023    virgl_encode_texture_barrier(vctx, flags);
1024 }
1025 
virgl_destroy_sampler_view(struct pipe_context * ctx,struct pipe_sampler_view * view)1026 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1027                                  struct pipe_sampler_view *view)
1028 {
1029    struct virgl_context *vctx = virgl_context(ctx);
1030    struct virgl_sampler_view *grview = virgl_sampler_view(view);
1031 
1032    virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1033    pipe_resource_reference(&view->texture, NULL);
1034    FREE(view);
1035 }
1036 
virgl_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)1037 static void *virgl_create_sampler_state(struct pipe_context *ctx,
1038                                         const struct pipe_sampler_state *state)
1039 {
1040    struct virgl_context *vctx = virgl_context(ctx);
1041    uint32_t handle;
1042 
1043    handle = virgl_object_assign_handle();
1044 
1045    virgl_encode_sampler_state(vctx, handle, state);
1046    return (void *)(unsigned long)handle;
1047 }
1048 
virgl_delete_sampler_state(struct pipe_context * ctx,void * ss)1049 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1050                                       void *ss)
1051 {
1052    struct virgl_context *vctx = virgl_context(ctx);
1053    uint32_t handle = (unsigned long)ss;
1054 
1055    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1056 }
1057 
virgl_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)1058 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1059                                      enum pipe_shader_type shader,
1060                                      unsigned start_slot,
1061                                      unsigned num_samplers,
1062                                      void **samplers)
1063 {
1064    struct virgl_context *vctx = virgl_context(ctx);
1065    uint32_t handles[32];
1066    int i;
1067    for (i = 0; i < num_samplers; i++) {
1068       handles[i] = (unsigned long)(samplers[i]);
1069    }
1070    virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1071 }
1072 
virgl_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * ps)1073 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1074                                      const struct pipe_poly_stipple *ps)
1075 {
1076    struct virgl_context *vctx = virgl_context(ctx);
1077    virgl_encoder_set_polygon_stipple(vctx, ps);
1078 }
1079 
virgl_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissor,const struct pipe_scissor_state * ss)1080 static void virgl_set_scissor_states(struct pipe_context *ctx,
1081                                     unsigned start_slot,
1082                                     unsigned num_scissor,
1083                                    const struct pipe_scissor_state *ss)
1084 {
1085    struct virgl_context *vctx = virgl_context(ctx);
1086    virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1087 }
1088 
virgl_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)1089 static void virgl_set_sample_mask(struct pipe_context *ctx,
1090                                  unsigned sample_mask)
1091 {
1092    struct virgl_context *vctx = virgl_context(ctx);
1093    virgl_encoder_set_sample_mask(vctx, sample_mask);
1094 }
1095 
virgl_set_min_samples(struct pipe_context * ctx,unsigned min_samples)1096 static void virgl_set_min_samples(struct pipe_context *ctx,
1097                                  unsigned min_samples)
1098 {
1099    struct virgl_context *vctx = virgl_context(ctx);
1100    struct virgl_screen *rs = virgl_screen(ctx->screen);
1101 
1102    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1103       return;
1104    virgl_encoder_set_min_samples(vctx, min_samples);
1105 }
1106 
virgl_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * clip)1107 static void virgl_set_clip_state(struct pipe_context *ctx,
1108                                 const struct pipe_clip_state *clip)
1109 {
1110    struct virgl_context *vctx = virgl_context(ctx);
1111    virgl_encoder_set_clip_state(vctx, clip);
1112 }
1113 
virgl_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])1114 static void virgl_set_tess_state(struct pipe_context *ctx,
1115                                  const float default_outer_level[4],
1116                                  const float default_inner_level[2])
1117 {
1118    struct virgl_context *vctx = virgl_context(ctx);
1119    struct virgl_screen *rs = virgl_screen(ctx->screen);
1120 
1121    if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1122       return;
1123    virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1124 }
1125 
virgl_resource_copy_region(struct pipe_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1126 static void virgl_resource_copy_region(struct pipe_context *ctx,
1127                                       struct pipe_resource *dst,
1128                                       unsigned dst_level,
1129                                       unsigned dstx, unsigned dsty, unsigned dstz,
1130                                       struct pipe_resource *src,
1131                                       unsigned src_level,
1132                                       const struct pipe_box *src_box)
1133 {
1134    struct virgl_context *vctx = virgl_context(ctx);
1135    struct virgl_resource *dres = virgl_resource(dst);
1136    struct virgl_resource *sres = virgl_resource(src);
1137 
1138    if (dres->u.b.target == PIPE_BUFFER)
1139       util_range_add(&dres->u.b, &dres->valid_buffer_range, dstx, dstx + src_box->width);
1140    virgl_resource_dirty(dres, dst_level);
1141 
1142    virgl_encode_resource_copy_region(vctx, dres,
1143                                     dst_level, dstx, dsty, dstz,
1144                                     sres, src_level,
1145                                     src_box);
1146 }
1147 
1148 static void
virgl_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)1149 virgl_flush_resource(struct pipe_context *pipe,
1150                     struct pipe_resource *resource)
1151 {
1152 }
1153 
virgl_blit(struct pipe_context * ctx,const struct pipe_blit_info * blit)1154 static void virgl_blit(struct pipe_context *ctx,
1155                       const struct pipe_blit_info *blit)
1156 {
1157    struct virgl_context *vctx = virgl_context(ctx);
1158    struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1159    struct virgl_resource *sres = virgl_resource(blit->src.resource);
1160 
1161    assert(ctx->screen->get_param(ctx->screen,
1162                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1163           (util_format_is_srgb(blit->dst.resource->format) ==
1164             util_format_is_srgb(blit->dst.format)));
1165 
1166    virgl_resource_dirty(dres, blit->dst.level);
1167    virgl_encode_blit(vctx, dres, sres,
1168                     blit);
1169 }
1170 
virgl_set_hw_atomic_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1171 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1172                                         unsigned start_slot,
1173                                         unsigned count,
1174                                         const struct pipe_shader_buffer *buffers)
1175 {
1176    struct virgl_context *vctx = virgl_context(ctx);
1177 
1178    vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1179    for (unsigned i = 0; i < count; i++) {
1180       unsigned idx = start_slot + i;
1181       if (buffers && buffers[i].buffer) {
1182          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1183          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1184 
1185          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1186                                  buffers[i].buffer);
1187          vctx->atomic_buffers[idx] = buffers[i];
1188          vctx->atomic_buffer_enabled_mask |= 1 << idx;
1189       } else {
1190          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1191       }
1192    }
1193 
1194    virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1195 }
1196 
virgl_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)1197 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1198                                      enum pipe_shader_type shader,
1199                                      unsigned start_slot, unsigned count,
1200                                      const struct pipe_shader_buffer *buffers,
1201                                      unsigned writable_bitmask)
1202 {
1203    struct virgl_context *vctx = virgl_context(ctx);
1204    struct virgl_screen *rs = virgl_screen(ctx->screen);
1205    struct virgl_shader_binding_state *binding =
1206       &vctx->shader_bindings[shader];
1207 
1208    binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1209    for (unsigned i = 0; i < count; i++) {
1210       unsigned idx = start_slot + i;
1211       if (buffers && buffers[i].buffer) {
1212          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1213          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1214 
1215          pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1216          binding->ssbos[idx] = buffers[i];
1217          binding->ssbo_enabled_mask |= 1 << idx;
1218       } else {
1219          pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1220       }
1221    }
1222 
1223    uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1224       rs->caps.caps.v2.max_shader_buffer_frag_compute :
1225       rs->caps.caps.v2.max_shader_buffer_other_stages;
1226    if (!max_shader_buffer)
1227       return;
1228    virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1229 }
1230 
virgl_create_fence_fd(struct pipe_context * ctx,struct pipe_fence_handle ** fence,int fd,enum pipe_fd_type type)1231 static void virgl_create_fence_fd(struct pipe_context *ctx,
1232                                   struct pipe_fence_handle **fence,
1233                                   int fd,
1234                                   enum pipe_fd_type type)
1235 {
1236    assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1237    struct virgl_screen *rs = virgl_screen(ctx->screen);
1238 
1239    if (rs->vws->cs_create_fence)
1240       *fence = rs->vws->cs_create_fence(rs->vws, fd);
1241 }
1242 
virgl_fence_server_sync(struct pipe_context * ctx,struct pipe_fence_handle * fence)1243 static void virgl_fence_server_sync(struct pipe_context *ctx,
1244 			            struct pipe_fence_handle *fence)
1245 {
1246    struct virgl_context *vctx = virgl_context(ctx);
1247    struct virgl_screen *rs = virgl_screen(ctx->screen);
1248 
1249    if (rs->vws->fence_server_sync)
1250       rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1251 }
1252 
virgl_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_image_view * images)1253 static void virgl_set_shader_images(struct pipe_context *ctx,
1254                                     enum pipe_shader_type shader,
1255                                     unsigned start_slot, unsigned count,
1256                                     const struct pipe_image_view *images)
1257 {
1258    struct virgl_context *vctx = virgl_context(ctx);
1259    struct virgl_screen *rs = virgl_screen(ctx->screen);
1260    struct virgl_shader_binding_state *binding =
1261       &vctx->shader_bindings[shader];
1262 
1263    binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1264    for (unsigned i = 0; i < count; i++) {
1265       unsigned idx = start_slot + i;
1266       if (images && images[i].resource) {
1267          struct virgl_resource *res = virgl_resource(images[i].resource);
1268          res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1269 
1270          pipe_resource_reference(&binding->images[idx].resource,
1271                                  images[i].resource);
1272          binding->images[idx] = images[i];
1273          binding->image_enabled_mask |= 1 << idx;
1274       } else {
1275          pipe_resource_reference(&binding->images[idx].resource, NULL);
1276       }
1277    }
1278 
1279    uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1280      rs->caps.caps.v2.max_shader_image_frag_compute :
1281      rs->caps.caps.v2.max_shader_image_other_stages;
1282    if (!max_shader_images)
1283       return;
1284    virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1285 }
1286 
virgl_memory_barrier(struct pipe_context * ctx,unsigned flags)1287 static void virgl_memory_barrier(struct pipe_context *ctx,
1288                                  unsigned flags)
1289 {
1290    struct virgl_context *vctx = virgl_context(ctx);
1291    struct virgl_screen *rs = virgl_screen(ctx->screen);
1292 
1293    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1294       return;
1295    virgl_encode_memory_barrier(vctx, flags);
1296 }
1297 
virgl_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)1298 static void *virgl_create_compute_state(struct pipe_context *ctx,
1299                                         const struct pipe_compute_state *state)
1300 {
1301    struct virgl_context *vctx = virgl_context(ctx);
1302    uint32_t handle;
1303    const struct tgsi_token *new_tokens = state->prog;
1304    struct pipe_stream_output_info so_info = {};
1305    int ret;
1306 
1307    handle = virgl_object_assign_handle();
1308    ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1309                                    &so_info,
1310                                    state->req_local_mem,
1311                                    new_tokens);
1312    if (ret) {
1313       return NULL;
1314    }
1315 
1316    return (void *)(unsigned long)handle;
1317 }
1318 
virgl_bind_compute_state(struct pipe_context * ctx,void * state)1319 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1320 {
1321    uint32_t handle = (unsigned long)state;
1322    struct virgl_context *vctx = virgl_context(ctx);
1323 
1324    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1325 }
1326 
virgl_delete_compute_state(struct pipe_context * ctx,void * state)1327 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1328 {
1329    uint32_t handle = (unsigned long)state;
1330    struct virgl_context *vctx = virgl_context(ctx);
1331 
1332    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1333 }
1334 
virgl_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * info)1335 static void virgl_launch_grid(struct pipe_context *ctx,
1336                               const struct pipe_grid_info *info)
1337 {
1338    struct virgl_context *vctx = virgl_context(ctx);
1339 
1340    if (!vctx->num_compute)
1341       virgl_reemit_compute_resources(vctx);
1342    vctx->num_compute++;
1343 
1344    virgl_encode_launch_grid(vctx, info);
1345 }
1346 
1347 static void
virgl_release_shader_binding(struct virgl_context * vctx,enum pipe_shader_type shader_type)1348 virgl_release_shader_binding(struct virgl_context *vctx,
1349                              enum pipe_shader_type shader_type)
1350 {
1351    struct virgl_shader_binding_state *binding =
1352       &vctx->shader_bindings[shader_type];
1353 
1354    while (binding->view_enabled_mask) {
1355       int i = u_bit_scan(&binding->view_enabled_mask);
1356       pipe_sampler_view_reference(
1357             (struct pipe_sampler_view **)&binding->views[i], NULL);
1358    }
1359 
1360    while (binding->ubo_enabled_mask) {
1361       int i = u_bit_scan(&binding->ubo_enabled_mask);
1362       pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1363    }
1364 
1365    while (binding->ssbo_enabled_mask) {
1366       int i = u_bit_scan(&binding->ssbo_enabled_mask);
1367       pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1368    }
1369 
1370    while (binding->image_enabled_mask) {
1371       int i = u_bit_scan(&binding->image_enabled_mask);
1372       pipe_resource_reference(&binding->images[i].resource, NULL);
1373    }
1374 }
1375 
1376 static void
virgl_context_destroy(struct pipe_context * ctx)1377 virgl_context_destroy( struct pipe_context *ctx )
1378 {
1379    struct virgl_context *vctx = virgl_context(ctx);
1380    struct virgl_screen *rs = virgl_screen(ctx->screen);
1381    enum pipe_shader_type shader_type;
1382 
1383    vctx->framebuffer.zsbuf = NULL;
1384    vctx->framebuffer.nr_cbufs = 0;
1385    virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1386    virgl_flush_eq(vctx, vctx, NULL);
1387 
1388    for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1389       virgl_release_shader_binding(vctx, shader_type);
1390 
1391    while (vctx->atomic_buffer_enabled_mask) {
1392       int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1393       pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1394    }
1395 
1396    rs->vws->cmd_buf_destroy(vctx->cbuf);
1397    if (vctx->uploader)
1398       u_upload_destroy(vctx->uploader);
1399    if (vctx->supports_staging)
1400       virgl_staging_destroy(&vctx->staging);
1401    util_primconvert_destroy(vctx->primconvert);
1402    virgl_transfer_queue_fini(&vctx->queue);
1403 
1404    slab_destroy_child(&vctx->transfer_pool);
1405    FREE(vctx);
1406 }
1407 
virgl_get_sample_position(struct pipe_context * ctx,unsigned sample_count,unsigned index,float * out_value)1408 static void virgl_get_sample_position(struct pipe_context *ctx,
1409 				      unsigned sample_count,
1410 				      unsigned index,
1411 				      float *out_value)
1412 {
1413    struct virgl_context *vctx = virgl_context(ctx);
1414    struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1415 
1416    if (sample_count > vs->caps.caps.v1.max_samples) {
1417       debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1418 		   sample_count, vs->caps.caps.v1.max_samples);
1419       return;
1420    }
1421 
1422    /* The following is basically copied from dri/i965gen6_get_sample_position
1423     * The only addition is that we hold the msaa positions for all sample
1424     * counts in a flat array. */
1425    uint32_t bits = 0;
1426    if (sample_count == 1) {
1427       out_value[0] = out_value[1] = 0.5f;
1428       return;
1429    } else if (sample_count == 2) {
1430       bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1431    } else if (sample_count <= 4) {
1432       bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1433    } else if (sample_count <= 8) {
1434       bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1435    } else if (sample_count <= 16) {
1436       bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1437    }
1438    out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1439    out_value[1] = (bits & 0xf) / 16.0f;
1440 
1441    if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1442       debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1443                    index, sample_count, out_value[0], out_value[1]);
1444 }
1445 
virgl_send_tweaks(struct virgl_context * vctx,struct virgl_screen * rs)1446 static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1447 {
1448    if (rs->tweak_gles_emulate_bgra)
1449       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1450 
1451    if (rs->tweak_gles_apply_bgra_dest_swizzle)
1452       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1453 
1454    if (rs->tweak_gles_tf3_value > 0)
1455       virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1456                          rs->tweak_gles_tf3_value);
1457 }
1458 
virgl_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)1459 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1460                                           void *priv,
1461                                           unsigned flags)
1462 {
1463    struct virgl_context *vctx;
1464    struct virgl_screen *rs = virgl_screen(pscreen);
1465    vctx = CALLOC_STRUCT(virgl_context);
1466    const char *host_debug_flagstring;
1467 
1468    vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1469    if (!vctx->cbuf) {
1470       FREE(vctx);
1471       return NULL;
1472    }
1473 
1474    vctx->base.destroy = virgl_context_destroy;
1475    vctx->base.create_surface = virgl_create_surface;
1476    vctx->base.surface_destroy = virgl_surface_destroy;
1477    vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1478    vctx->base.create_blend_state = virgl_create_blend_state;
1479    vctx->base.bind_blend_state = virgl_bind_blend_state;
1480    vctx->base.delete_blend_state = virgl_delete_blend_state;
1481    vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1482    vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1483    vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1484    vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1485    vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1486    vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1487 
1488    vctx->base.set_viewport_states = virgl_set_viewport_states;
1489    vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1490    vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1491    vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1492    vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1493    vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1494 
1495    vctx->base.set_tess_state = virgl_set_tess_state;
1496    vctx->base.create_vs_state = virgl_create_vs_state;
1497    vctx->base.create_tcs_state = virgl_create_tcs_state;
1498    vctx->base.create_tes_state = virgl_create_tes_state;
1499    vctx->base.create_gs_state = virgl_create_gs_state;
1500    vctx->base.create_fs_state = virgl_create_fs_state;
1501 
1502    vctx->base.bind_vs_state = virgl_bind_vs_state;
1503    vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1504    vctx->base.bind_tes_state = virgl_bind_tes_state;
1505    vctx->base.bind_gs_state = virgl_bind_gs_state;
1506    vctx->base.bind_fs_state = virgl_bind_fs_state;
1507 
1508    vctx->base.delete_vs_state = virgl_delete_vs_state;
1509    vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1510    vctx->base.delete_tes_state = virgl_delete_tes_state;
1511    vctx->base.delete_gs_state = virgl_delete_gs_state;
1512    vctx->base.delete_fs_state = virgl_delete_fs_state;
1513 
1514    vctx->base.create_compute_state = virgl_create_compute_state;
1515    vctx->base.bind_compute_state = virgl_bind_compute_state;
1516    vctx->base.delete_compute_state = virgl_delete_compute_state;
1517    vctx->base.launch_grid = virgl_launch_grid;
1518 
1519    vctx->base.clear = virgl_clear;
1520    vctx->base.clear_texture = virgl_clear_texture;
1521    vctx->base.draw_vbo = virgl_draw_vbo;
1522    vctx->base.flush = virgl_flush_from_st;
1523    vctx->base.screen = pscreen;
1524    vctx->base.create_sampler_view = virgl_create_sampler_view;
1525    vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1526    vctx->base.set_sampler_views = virgl_set_sampler_views;
1527    vctx->base.texture_barrier = virgl_texture_barrier;
1528 
1529    vctx->base.create_sampler_state = virgl_create_sampler_state;
1530    vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1531    vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1532 
1533    vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1534    vctx->base.set_scissor_states = virgl_set_scissor_states;
1535    vctx->base.set_sample_mask = virgl_set_sample_mask;
1536    vctx->base.set_min_samples = virgl_set_min_samples;
1537    vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1538    vctx->base.set_clip_state = virgl_set_clip_state;
1539 
1540    vctx->base.set_blend_color = virgl_set_blend_color;
1541 
1542    vctx->base.get_sample_position = virgl_get_sample_position;
1543 
1544    vctx->base.resource_copy_region = virgl_resource_copy_region;
1545    vctx->base.flush_resource = virgl_flush_resource;
1546    vctx->base.blit =  virgl_blit;
1547    vctx->base.create_fence_fd = virgl_create_fence_fd;
1548    vctx->base.fence_server_sync = virgl_fence_server_sync;
1549 
1550    vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1551    vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1552    vctx->base.set_shader_images = virgl_set_shader_images;
1553    vctx->base.memory_barrier = virgl_memory_barrier;
1554 
1555    virgl_init_context_resource_functions(&vctx->base);
1556    virgl_init_query_functions(vctx);
1557    virgl_init_so_functions(vctx);
1558 
1559    slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1560    virgl_transfer_queue_init(&vctx->queue, vctx);
1561    vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1562                        (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1563 
1564    /* Reserve some space for transfers. */
1565    if (vctx->encoded_transfers)
1566       vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1567 
1568    vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1569    vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1570                                      PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1571    if (!vctx->uploader)
1572            goto fail;
1573    vctx->base.stream_uploader = vctx->uploader;
1574    vctx->base.const_uploader = vctx->uploader;
1575 
1576    /* We use a special staging buffer as the source of copy transfers. */
1577    if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1578        vctx->encoded_transfers) {
1579       virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
1580       vctx->supports_staging = true;
1581    }
1582 
1583    vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1584    virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1585 
1586    virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1587 
1588    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1589       host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1590       if (host_debug_flagstring)
1591          virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1592    }
1593 
1594    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1595       virgl_send_tweaks(vctx, rs);
1596 
1597    return &vctx->base;
1598 fail:
1599    virgl_context_destroy(&vctx->base);
1600    return NULL;
1601 }
1602