• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26 
27 #include "compiler/nir/nir.h"
28 #include "pipe/p_context.h"
29 #include "pipe/p_defines.h"
30 #include "pipe/p_screen.h"
31 #include "pipe/p_state.h"
32 #include "nir/nir_to_tgsi.h"
33 #include "util/u_draw.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "util/format/u_format.h"
37 #include "util/u_prim.h"
38 #include "util/u_transfer.h"
39 #include "util/u_helpers.h"
40 #include "util/slab.h"
41 #include "util/u_upload_mgr.h"
42 #include "util/u_blitter.h"
43 #include "tgsi/tgsi_text.h"
44 #include "indices/u_primconvert.h"
45 
46 #include "pipebuffer/pb_buffer.h"
47 
48 #include "virgl_encode.h"
49 #include "virgl_context.h"
50 #include "virtio-gpu/virgl_protocol.h"
51 #include "virgl_resource.h"
52 #include "virgl_screen.h"
53 #include "virgl_staging_mgr.h"
54 
55 struct virgl_vertex_elements_state {
56    uint32_t handle;
57    uint8_t binding_map[PIPE_MAX_ATTRIBS];
58    uint8_t num_bindings;
59 };
60 
61 static uint32_t next_handle;
virgl_object_assign_handle(void)62 uint32_t virgl_object_assign_handle(void)
63 {
64    return p_atomic_inc_return(&next_handle);
65 }
66 
67 bool
virgl_can_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)68 virgl_can_rebind_resource(struct virgl_context *vctx,
69                           struct pipe_resource *res)
70 {
71    /* We cannot rebind resources that are referenced by host objects, which
72     * are
73     *
74     *  - VIRGL_OBJECT_SURFACE
75     *  - VIRGL_OBJECT_SAMPLER_VIEW
76     *  - VIRGL_OBJECT_STREAMOUT_TARGET
77     *
78     * Because surfaces cannot be created from buffers, we require the resource
79     * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
80     */
81    const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
82                                       PIPE_BIND_STREAM_OUTPUT);
83    const unsigned bind_history = virgl_resource(res)->bind_history;
84    return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
85 }
86 
87 void
virgl_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)88 virgl_rebind_resource(struct virgl_context *vctx,
89                       struct pipe_resource *res)
90 {
91    /* Queries use internally created buffers and do not go through transfers.
92     * Index buffers are not bindable.  They are not tracked.
93     */
94    ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
95                                                PIPE_BIND_CONSTANT_BUFFER |
96                                                PIPE_BIND_SHADER_BUFFER |
97                                                PIPE_BIND_SHADER_IMAGE);
98    const unsigned bind_history = virgl_resource(res)->bind_history;
99    unsigned i;
100 
101    assert(virgl_can_rebind_resource(vctx, res) &&
102           (bind_history & tracked_bind) == bind_history);
103 
104    if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
105       for (i = 0; i < vctx->num_vertex_buffers; i++) {
106          if (vctx->vertex_buffer[i].buffer.resource == res) {
107             vctx->vertex_array_dirty = true;
108             break;
109          }
110       }
111    }
112 
113    if (bind_history & PIPE_BIND_SHADER_BUFFER) {
114       uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
115       while (remaining_mask) {
116          int i = u_bit_scan(&remaining_mask);
117          if (vctx->atomic_buffers[i].buffer == res) {
118             const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
119             virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
120          }
121       }
122    }
123 
124    /* check per-stage shader bindings */
125    if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
126                        PIPE_BIND_SHADER_BUFFER |
127                        PIPE_BIND_SHADER_IMAGE)) {
128       enum pipe_shader_type shader_type;
129       for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
130          const struct virgl_shader_binding_state *binding =
131             &vctx->shader_bindings[shader_type];
132 
133          if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
134             uint32_t remaining_mask = binding->ubo_enabled_mask;
135             while (remaining_mask) {
136                int i = u_bit_scan(&remaining_mask);
137                if (binding->ubos[i].buffer == res) {
138                   const struct pipe_constant_buffer *ubo = &binding->ubos[i];
139                   virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
140                                                    ubo->buffer_offset,
141                                                    ubo->buffer_size,
142                                                    virgl_resource(res));
143                }
144             }
145          }
146 
147          if (bind_history & PIPE_BIND_SHADER_BUFFER) {
148             uint32_t remaining_mask = binding->ssbo_enabled_mask;
149             while (remaining_mask) {
150                int i = u_bit_scan(&remaining_mask);
151                if (binding->ssbos[i].buffer == res) {
152                   const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
153                   virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
154                                                   ssbo);
155                }
156             }
157          }
158 
159          if (bind_history & PIPE_BIND_SHADER_IMAGE) {
160             uint32_t remaining_mask = binding->image_enabled_mask;
161             while (remaining_mask) {
162                int i = u_bit_scan(&remaining_mask);
163                if (binding->images[i].resource == res) {
164                   const struct pipe_image_view *image = &binding->images[i];
165                   virgl_encode_set_shader_images(vctx, shader_type, i, 1,
166                                                  image);
167                }
168             }
169          }
170       }
171    }
172 }
173 
virgl_attach_res_framebuffer(struct virgl_context * vctx)174 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
175 {
176    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
177    struct pipe_surface *surf;
178    struct virgl_resource *res;
179    unsigned i;
180 
181    surf = vctx->framebuffer.zsbuf;
182    if (surf) {
183       res = virgl_resource(surf->texture);
184       if (res) {
185          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
186          virgl_resource_dirty(res, surf->u.tex.level);
187       }
188    }
189    for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
190       surf = vctx->framebuffer.cbufs[i];
191       if (surf) {
192          res = virgl_resource(surf->texture);
193          if (res) {
194             vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
195             virgl_resource_dirty(res, surf->u.tex.level);
196          }
197       }
198    }
199 }
200 
virgl_attach_res_sampler_views(struct virgl_context * vctx,enum pipe_shader_type shader_type)201 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
202                                            enum pipe_shader_type shader_type)
203 {
204    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
205    const struct virgl_shader_binding_state *binding =
206       &vctx->shader_bindings[shader_type];
207    uint32_t remaining_mask = binding->view_enabled_mask;
208    struct virgl_resource *res;
209 
210    while (remaining_mask) {
211       int i = u_bit_scan(&remaining_mask);
212       assert(binding->views[i] && binding->views[i]->texture);
213       res = virgl_resource(binding->views[i]->texture);
214       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
215    }
216 }
217 
virgl_attach_res_vertex_buffers(struct virgl_context * vctx)218 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
219 {
220    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
221    struct virgl_resource *res;
222    unsigned i;
223 
224    for (i = 0; i < vctx->num_vertex_buffers; i++) {
225       res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
226       if (res)
227          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
228    }
229 }
230 
virgl_attach_res_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)231 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
232 					  struct virgl_indexbuf *ib)
233 {
234    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
235    struct virgl_resource *res;
236 
237    res = virgl_resource(ib->buffer);
238    if (res)
239       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
240 }
241 
virgl_attach_res_so_targets(struct virgl_context * vctx)242 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
243 {
244    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
245    struct virgl_resource *res;
246    unsigned i;
247 
248    for (i = 0; i < vctx->num_so_targets; i++) {
249       res = virgl_resource(vctx->so_targets[i].base.buffer);
250       if (res)
251          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
252    }
253 }
254 
virgl_attach_res_uniform_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)255 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
256                                              enum pipe_shader_type shader_type)
257 {
258    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
259    const struct virgl_shader_binding_state *binding =
260       &vctx->shader_bindings[shader_type];
261    uint32_t remaining_mask = binding->ubo_enabled_mask;
262    struct virgl_resource *res;
263 
264    while (remaining_mask) {
265       int i = u_bit_scan(&remaining_mask);
266       res = virgl_resource(binding->ubos[i].buffer);
267       assert(res);
268       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
269    }
270 }
271 
virgl_attach_res_shader_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)272 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
273                                             enum pipe_shader_type shader_type)
274 {
275    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
276    const struct virgl_shader_binding_state *binding =
277       &vctx->shader_bindings[shader_type];
278    uint32_t remaining_mask = binding->ssbo_enabled_mask;
279    struct virgl_resource *res;
280 
281    while (remaining_mask) {
282       int i = u_bit_scan(&remaining_mask);
283       res = virgl_resource(binding->ssbos[i].buffer);
284       assert(res);
285       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
286    }
287 }
288 
virgl_attach_res_shader_images(struct virgl_context * vctx,enum pipe_shader_type shader_type)289 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
290                                            enum pipe_shader_type shader_type)
291 {
292    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
293    const struct virgl_shader_binding_state *binding =
294       &vctx->shader_bindings[shader_type];
295    uint32_t remaining_mask = binding->image_enabled_mask;
296    struct virgl_resource *res;
297 
298    while (remaining_mask) {
299       int i = u_bit_scan(&remaining_mask);
300       res = virgl_resource(binding->images[i].resource);
301       assert(res);
302       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
303    }
304 }
305 
virgl_attach_res_atomic_buffers(struct virgl_context * vctx)306 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
307 {
308    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
309    uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
310    struct virgl_resource *res;
311 
312    while (remaining_mask) {
313       int i = u_bit_scan(&remaining_mask);
314       res = virgl_resource(vctx->atomic_buffers[i].buffer);
315       assert(res);
316       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
317    }
318 }
319 
320 /*
321  * after flushing, the hw context still has a bunch of
322  * resources bound, so we need to rebind those here.
323  */
virgl_reemit_draw_resources(struct virgl_context * vctx)324 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
325 {
326    enum pipe_shader_type shader_type;
327 
328    /* reattach any flushed resources */
329    /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
330    virgl_attach_res_framebuffer(vctx);
331 
332    for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
333       virgl_attach_res_sampler_views(vctx, shader_type);
334       virgl_attach_res_uniform_buffers(vctx, shader_type);
335       virgl_attach_res_shader_buffers(vctx, shader_type);
336       virgl_attach_res_shader_images(vctx, shader_type);
337    }
338    virgl_attach_res_atomic_buffers(vctx);
339    virgl_attach_res_vertex_buffers(vctx);
340    virgl_attach_res_so_targets(vctx);
341 }
342 
virgl_reemit_compute_resources(struct virgl_context * vctx)343 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
344 {
345    virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
346    virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
347    virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
348    virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
349 
350    virgl_attach_res_atomic_buffers(vctx);
351 }
352 
virgl_create_surface(struct pipe_context * ctx,struct pipe_resource * resource,const struct pipe_surface * templ)353 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
354                                                 struct pipe_resource *resource,
355                                                 const struct pipe_surface *templ)
356 {
357    struct virgl_context *vctx = virgl_context(ctx);
358    struct virgl_surface *surf;
359    struct virgl_resource *res = virgl_resource(resource);
360    uint32_t handle;
361 
362    /* no support for buffer surfaces */
363    if (resource->target == PIPE_BUFFER)
364       return NULL;
365 
366    surf = CALLOC_STRUCT(virgl_surface);
367    if (!surf)
368       return NULL;
369 
370    assert(ctx->screen->get_param(ctx->screen,
371                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
372           (util_format_is_srgb(templ->format) ==
373            util_format_is_srgb(resource->format)));
374 
375    virgl_resource_dirty(res, 0);
376    handle = virgl_object_assign_handle();
377    pipe_reference_init(&surf->base.reference, 1);
378    pipe_resource_reference(&surf->base.texture, resource);
379    surf->base.context = ctx;
380    surf->base.format = templ->format;
381 
382    surf->base.width = u_minify(resource->width0, templ->u.tex.level);
383    surf->base.height = u_minify(resource->height0, templ->u.tex.level);
384    surf->base.u.tex.level = templ->u.tex.level;
385    surf->base.u.tex.first_layer = templ->u.tex.first_layer;
386    surf->base.u.tex.last_layer = templ->u.tex.last_layer;
387    surf->base.nr_samples = templ->nr_samples;
388 
389    virgl_encoder_create_surface(vctx, handle, res, &surf->base);
390    surf->handle = handle;
391    return &surf->base;
392 }
393 
virgl_surface_destroy(struct pipe_context * ctx,struct pipe_surface * psurf)394 static void virgl_surface_destroy(struct pipe_context *ctx,
395                                  struct pipe_surface *psurf)
396 {
397    struct virgl_context *vctx = virgl_context(ctx);
398    struct virgl_surface *surf = virgl_surface(psurf);
399 
400    pipe_resource_reference(&surf->base.texture, NULL);
401    virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
402    FREE(surf);
403 }
404 
virgl_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * blend_state)405 static void *virgl_create_blend_state(struct pipe_context *ctx,
406                                               const struct pipe_blend_state *blend_state)
407 {
408    struct virgl_context *vctx = virgl_context(ctx);
409    uint32_t handle;
410    handle = virgl_object_assign_handle();
411 
412    virgl_encode_blend_state(vctx, handle, blend_state);
413    return (void *)(unsigned long)handle;
414 
415 }
416 
virgl_bind_blend_state(struct pipe_context * ctx,void * blend_state)417 static void virgl_bind_blend_state(struct pipe_context *ctx,
418                                            void *blend_state)
419 {
420    struct virgl_context *vctx = virgl_context(ctx);
421    uint32_t handle = (unsigned long)blend_state;
422    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
423 }
424 
virgl_delete_blend_state(struct pipe_context * ctx,void * blend_state)425 static void virgl_delete_blend_state(struct pipe_context *ctx,
426                                      void *blend_state)
427 {
428    struct virgl_context *vctx = virgl_context(ctx);
429    uint32_t handle = (unsigned long)blend_state;
430    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
431 }
432 
virgl_create_depth_stencil_alpha_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * blend_state)433 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
434                                                    const struct pipe_depth_stencil_alpha_state *blend_state)
435 {
436    struct virgl_context *vctx = virgl_context(ctx);
437    uint32_t handle;
438    handle = virgl_object_assign_handle();
439 
440    virgl_encode_dsa_state(vctx, handle, blend_state);
441    return (void *)(unsigned long)handle;
442 }
443 
virgl_bind_depth_stencil_alpha_state(struct pipe_context * ctx,void * blend_state)444 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
445                                                 void *blend_state)
446 {
447    struct virgl_context *vctx = virgl_context(ctx);
448    uint32_t handle = (unsigned long)blend_state;
449    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
450 }
451 
virgl_delete_depth_stencil_alpha_state(struct pipe_context * ctx,void * dsa_state)452 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
453                                                   void *dsa_state)
454 {
455    struct virgl_context *vctx = virgl_context(ctx);
456    uint32_t handle = (unsigned long)dsa_state;
457    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
458 }
459 
virgl_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * rs_state)460 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
461                                                    const struct pipe_rasterizer_state *rs_state)
462 {
463    struct virgl_context *vctx = virgl_context(ctx);
464    struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
465 
466    if (!vrs)
467       return NULL;
468    vrs->rs = *rs_state;
469    vrs->handle = virgl_object_assign_handle();
470 
471    assert(rs_state->depth_clip_near ||
472           virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
473 
474    virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
475    return (void *)vrs;
476 }
477 
virgl_bind_rasterizer_state(struct pipe_context * ctx,void * rs_state)478 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
479                                                 void *rs_state)
480 {
481    struct virgl_context *vctx = virgl_context(ctx);
482    uint32_t handle = 0;
483    if (rs_state) {
484       struct virgl_rasterizer_state *vrs = rs_state;
485       vctx->rs_state = *vrs;
486       handle = vrs->handle;
487    }
488    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
489 }
490 
virgl_delete_rasterizer_state(struct pipe_context * ctx,void * rs_state)491 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
492                                          void *rs_state)
493 {
494    struct virgl_context *vctx = virgl_context(ctx);
495    struct virgl_rasterizer_state *vrs = rs_state;
496    virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
497    FREE(vrs);
498 }
499 
virgl_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)500 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
501                                                 const struct pipe_framebuffer_state *state)
502 {
503    struct virgl_context *vctx = virgl_context(ctx);
504 
505    vctx->framebuffer = *state;
506    virgl_encoder_set_framebuffer_state(vctx, state);
507    virgl_attach_res_framebuffer(vctx);
508 }
509 
virgl_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)510 static void virgl_set_viewport_states(struct pipe_context *ctx,
511                                      unsigned start_slot,
512                                      unsigned num_viewports,
513                                      const struct pipe_viewport_state *state)
514 {
515    struct virgl_context *vctx = virgl_context(ctx);
516    virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
517 }
518 
virgl_create_vertex_elements_state(struct pipe_context * ctx,unsigned num_elements,const struct pipe_vertex_element * elements)519 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
520                                                         unsigned num_elements,
521                                                         const struct pipe_vertex_element *elements)
522 {
523    struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
524    struct virgl_context *vctx = virgl_context(ctx);
525    struct virgl_vertex_elements_state *state =
526       CALLOC_STRUCT(virgl_vertex_elements_state);
527 
528    for (int i = 0; i < num_elements; ++i) {
529       if (elements[i].instance_divisor) {
530 	 /* Virglrenderer doesn't deal with instance_divisor correctly if
531 	  * there isn't a 1:1 relationship between elements and bindings.
532 	  * So let's make sure there is, by duplicating bindings.
533 	  */
534 	 for (int j = 0; j < num_elements; ++j) {
535             new_elements[j] = elements[j];
536             new_elements[j].vertex_buffer_index = j;
537             state->binding_map[j] = elements[j].vertex_buffer_index;
538 	 }
539 	 elements = new_elements;
540 	 state->num_bindings = num_elements;
541 	 break;
542       }
543    }
544 
545    state->handle = virgl_object_assign_handle();
546    virgl_encoder_create_vertex_elements(vctx, state->handle,
547                                        num_elements, elements);
548    return state;
549 }
550 
virgl_delete_vertex_elements_state(struct pipe_context * ctx,void * ve)551 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
552                                               void *ve)
553 {
554    struct virgl_context *vctx = virgl_context(ctx);
555    struct virgl_vertex_elements_state *state =
556       (struct virgl_vertex_elements_state *)ve;
557    virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
558    FREE(state);
559 }
560 
virgl_bind_vertex_elements_state(struct pipe_context * ctx,void * ve)561 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
562                                                      void *ve)
563 {
564    struct virgl_context *vctx = virgl_context(ctx);
565    struct virgl_vertex_elements_state *state =
566       (struct virgl_vertex_elements_state *)ve;
567    vctx->vertex_elements = state;
568    virgl_encode_bind_object(vctx, state ? state->handle : 0,
569                             VIRGL_OBJECT_VERTEX_ELEMENTS);
570    vctx->vertex_array_dirty = TRUE;
571 }
572 
virgl_set_vertex_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned num_buffers,unsigned unbind_num_trailing_slots,bool take_ownership,const struct pipe_vertex_buffer * buffers)573 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
574                                     unsigned start_slot,
575                                     unsigned num_buffers,
576                                      unsigned unbind_num_trailing_slots,
577                                      bool take_ownership,
578                                     const struct pipe_vertex_buffer *buffers)
579 {
580    struct virgl_context *vctx = virgl_context(ctx);
581 
582    util_set_vertex_buffers_count(vctx->vertex_buffer,
583                                  &vctx->num_vertex_buffers,
584                                  buffers, start_slot, num_buffers,
585                                  unbind_num_trailing_slots,
586                                  take_ownership);
587 
588    if (buffers) {
589       for (unsigned i = 0; i < num_buffers; i++) {
590          struct virgl_resource *res =
591             virgl_resource(buffers[i].buffer.resource);
592          if (res && !buffers[i].is_user_buffer)
593             res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
594       }
595    }
596 
597    vctx->vertex_array_dirty = TRUE;
598 }
599 
virgl_hw_set_vertex_buffers(struct virgl_context * vctx)600 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
601 {
602    if (vctx->vertex_array_dirty) {
603       struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
604 
605       if (ve->num_bindings) {
606          struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
607          for (int i = 0; i < ve->num_bindings; ++i)
608             vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
609 
610          virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
611       } else
612          virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
613 
614       virgl_attach_res_vertex_buffers(vctx);
615 
616       vctx->vertex_array_dirty = FALSE;
617    }
618 }
619 
virgl_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref ref)620 static void virgl_set_stencil_ref(struct pipe_context *ctx,
621                                  const struct pipe_stencil_ref ref)
622 {
623    struct virgl_context *vctx = virgl_context(ctx);
624    virgl_encoder_set_stencil_ref(vctx, &ref);
625 }
626 
virgl_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * color)627 static void virgl_set_blend_color(struct pipe_context *ctx,
628                                  const struct pipe_blend_color *color)
629 {
630    struct virgl_context *vctx = virgl_context(ctx);
631    virgl_encoder_set_blend_color(vctx, color);
632 }
633 
virgl_hw_set_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)634 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
635                                      struct virgl_indexbuf *ib)
636 {
637    virgl_encoder_set_index_buffer(vctx, ib);
638    virgl_attach_res_index_buffer(vctx, ib);
639 }
640 
virgl_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * buf)641 static void virgl_set_constant_buffer(struct pipe_context *ctx,
642                                      enum pipe_shader_type shader, uint index,
643                                       bool take_ownership,
644                                      const struct pipe_constant_buffer *buf)
645 {
646    struct virgl_context *vctx = virgl_context(ctx);
647    struct virgl_shader_binding_state *binding =
648       &vctx->shader_bindings[shader];
649 
650    if (buf && buf->buffer) {
651       struct virgl_resource *res = virgl_resource(buf->buffer);
652       res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
653 
654       virgl_encoder_set_uniform_buffer(vctx, shader, index,
655                                        buf->buffer_offset,
656                                        buf->buffer_size, res);
657 
658       if (take_ownership) {
659          pipe_resource_reference(&binding->ubos[index].buffer, NULL);
660          binding->ubos[index].buffer = buf->buffer;
661       } else {
662          pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
663       }
664       binding->ubos[index] = *buf;
665       binding->ubo_enabled_mask |= 1 << index;
666    } else {
667       static const struct pipe_constant_buffer dummy_ubo;
668       if (!buf)
669          buf = &dummy_ubo;
670       virgl_encoder_write_constant_buffer(vctx, shader, index,
671                                           buf->buffer_size / 4,
672                                           buf->user_buffer);
673 
674       pipe_resource_reference(&binding->ubos[index].buffer, NULL);
675       binding->ubo_enabled_mask &= ~(1 << index);
676    }
677 }
678 
virgl_shader_encoder(struct pipe_context * ctx,const struct pipe_shader_state * shader,unsigned type)679 static void *virgl_shader_encoder(struct pipe_context *ctx,
680                                   const struct pipe_shader_state *shader,
681                                   unsigned type)
682 {
683    struct virgl_context *vctx = virgl_context(ctx);
684    uint32_t handle;
685    const struct tgsi_token *tokens;
686    const struct tgsi_token *ntt_tokens = NULL;
687    struct tgsi_token *new_tokens;
688    int ret;
689 
690    if (shader->type == PIPE_SHADER_IR_NIR) {
691       nir_shader *s = nir_shader_clone(NULL, shader->ir.nir);
692       ntt_tokens = tokens = nir_to_tgsi(s, vctx->base.screen); /* takes ownership */
693    } else {
694       tokens = shader->tokens;
695    }
696 
697    new_tokens = virgl_tgsi_transform((struct virgl_screen *)vctx->base.screen, tokens);
698    if (!new_tokens)
699       return NULL;
700 
701    handle = virgl_object_assign_handle();
702    /* encode VS state */
703    ret = virgl_encode_shader_state(vctx, handle, type,
704                                    &shader->stream_output, 0,
705                                    new_tokens);
706    if (ret) {
707       FREE((void *)ntt_tokens);
708       return NULL;
709    }
710 
711    FREE((void *)ntt_tokens);
712    FREE(new_tokens);
713    return (void *)(unsigned long)handle;
714 
715 }
virgl_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)716 static void *virgl_create_vs_state(struct pipe_context *ctx,
717                                    const struct pipe_shader_state *shader)
718 {
719    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
720 }
721 
virgl_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)722 static void *virgl_create_tcs_state(struct pipe_context *ctx,
723                                    const struct pipe_shader_state *shader)
724 {
725    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
726 }
727 
virgl_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)728 static void *virgl_create_tes_state(struct pipe_context *ctx,
729                                    const struct pipe_shader_state *shader)
730 {
731    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
732 }
733 
virgl_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)734 static void *virgl_create_gs_state(struct pipe_context *ctx,
735                                    const struct pipe_shader_state *shader)
736 {
737    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
738 }
739 
virgl_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)740 static void *virgl_create_fs_state(struct pipe_context *ctx,
741                                    const struct pipe_shader_state *shader)
742 {
743    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
744 }
745 
746 static void
virgl_delete_fs_state(struct pipe_context * ctx,void * fs)747 virgl_delete_fs_state(struct pipe_context *ctx,
748                      void *fs)
749 {
750    uint32_t handle = (unsigned long)fs;
751    struct virgl_context *vctx = virgl_context(ctx);
752 
753    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
754 }
755 
756 static void
virgl_delete_gs_state(struct pipe_context * ctx,void * gs)757 virgl_delete_gs_state(struct pipe_context *ctx,
758                      void *gs)
759 {
760    uint32_t handle = (unsigned long)gs;
761    struct virgl_context *vctx = virgl_context(ctx);
762 
763    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
764 }
765 
766 static void
virgl_delete_vs_state(struct pipe_context * ctx,void * vs)767 virgl_delete_vs_state(struct pipe_context *ctx,
768                      void *vs)
769 {
770    uint32_t handle = (unsigned long)vs;
771    struct virgl_context *vctx = virgl_context(ctx);
772 
773    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
774 }
775 
776 static void
virgl_delete_tcs_state(struct pipe_context * ctx,void * tcs)777 virgl_delete_tcs_state(struct pipe_context *ctx,
778                        void *tcs)
779 {
780    uint32_t handle = (unsigned long)tcs;
781    struct virgl_context *vctx = virgl_context(ctx);
782 
783    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
784 }
785 
786 static void
virgl_delete_tes_state(struct pipe_context * ctx,void * tes)787 virgl_delete_tes_state(struct pipe_context *ctx,
788                       void *tes)
789 {
790    uint32_t handle = (unsigned long)tes;
791    struct virgl_context *vctx = virgl_context(ctx);
792 
793    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
794 }
795 
virgl_bind_vs_state(struct pipe_context * ctx,void * vss)796 static void virgl_bind_vs_state(struct pipe_context *ctx,
797                                         void *vss)
798 {
799    uint32_t handle = (unsigned long)vss;
800    struct virgl_context *vctx = virgl_context(ctx);
801 
802    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
803 }
804 
virgl_bind_tcs_state(struct pipe_context * ctx,void * vss)805 static void virgl_bind_tcs_state(struct pipe_context *ctx,
806                                void *vss)
807 {
808    uint32_t handle = (unsigned long)vss;
809    struct virgl_context *vctx = virgl_context(ctx);
810 
811    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
812 }
813 
virgl_bind_tes_state(struct pipe_context * ctx,void * vss)814 static void virgl_bind_tes_state(struct pipe_context *ctx,
815                                void *vss)
816 {
817    uint32_t handle = (unsigned long)vss;
818    struct virgl_context *vctx = virgl_context(ctx);
819 
820    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
821 }
822 
virgl_bind_gs_state(struct pipe_context * ctx,void * vss)823 static void virgl_bind_gs_state(struct pipe_context *ctx,
824                                void *vss)
825 {
826    uint32_t handle = (unsigned long)vss;
827    struct virgl_context *vctx = virgl_context(ctx);
828 
829    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
830 }
831 
832 
virgl_bind_fs_state(struct pipe_context * ctx,void * vss)833 static void virgl_bind_fs_state(struct pipe_context *ctx,
834                                         void *vss)
835 {
836    uint32_t handle = (unsigned long)vss;
837    struct virgl_context *vctx = virgl_context(ctx);
838 
839    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
840 }
841 
virgl_clear(struct pipe_context * ctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)842 static void virgl_clear(struct pipe_context *ctx,
843                                 unsigned buffers,
844                                 const struct pipe_scissor_state *scissor_state,
845                                 const union pipe_color_union *color,
846                                 double depth, unsigned stencil)
847 {
848    struct virgl_context *vctx = virgl_context(ctx);
849 
850    if (!vctx->num_draws)
851       virgl_reemit_draw_resources(vctx);
852    vctx->num_draws++;
853 
854    virgl_encode_clear(vctx, buffers, color, depth, stencil);
855 }
856 
virgl_clear_texture(struct pipe_context * ctx,struct pipe_resource * res,unsigned int level,const struct pipe_box * box,const void * data)857 static void virgl_clear_texture(struct pipe_context *ctx,
858                                 struct pipe_resource *res,
859                                 unsigned int level,
860                                 const struct pipe_box *box,
861                                 const void *data)
862 {
863    struct virgl_context *vctx = virgl_context(ctx);
864    struct virgl_resource *vres = virgl_resource(res);
865 
866    virgl_encode_clear_texture(vctx, vres, level, box, data);
867 
868    /* Mark as dirty, since we are updating the host side resource
869     * without going through the corresponding guest side resource, and
870     * hence the two will diverge.
871     */
872    virgl_resource_dirty(vres, level);
873 }
874 
virgl_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * dinfo,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)875 static void virgl_draw_vbo(struct pipe_context *ctx,
876                            const struct pipe_draw_info *dinfo,
877                            unsigned drawid_offset,
878                            const struct pipe_draw_indirect_info *indirect,
879                            const struct pipe_draw_start_count_bias *draws,
880                            unsigned num_draws)
881 {
882    if (num_draws > 1) {
883       util_draw_multi(ctx, dinfo, drawid_offset, indirect, draws, num_draws);
884       return;
885    }
886 
887    if (!indirect && (!draws[0].count || !dinfo->instance_count))
888       return;
889 
890    struct virgl_context *vctx = virgl_context(ctx);
891    struct virgl_screen *rs = virgl_screen(ctx->screen);
892    struct virgl_indexbuf ib = {};
893    struct pipe_draw_info info = *dinfo;
894 
895    if (!indirect &&
896        !dinfo->primitive_restart &&
897        !u_trim_pipe_prim(dinfo->mode, (unsigned*)&draws[0].count))
898       return;
899 
900    if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
901       util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
902       util_primconvert_draw_vbo(vctx->primconvert, dinfo, drawid_offset, indirect, draws, num_draws);
903       return;
904    }
905    if (info.index_size) {
906            pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
907            ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
908            ib.index_size = dinfo->index_size;
909            ib.offset = draws[0].start * ib.index_size;
910 
911            if (ib.user_buffer) {
912                    unsigned start_offset = draws[0].start * ib.index_size;
913                    u_upload_data(vctx->uploader, start_offset,
914                                  draws[0].count * ib.index_size, 4,
915                                  (char*)ib.user_buffer + start_offset,
916                                  &ib.offset, &ib.buffer);
917                    ib.offset -= start_offset;
918                    ib.user_buffer = NULL;
919            }
920    }
921 
922    if (!vctx->num_draws)
923       virgl_reemit_draw_resources(vctx);
924    vctx->num_draws++;
925 
926    virgl_hw_set_vertex_buffers(vctx);
927    if (info.index_size)
928       virgl_hw_set_index_buffer(vctx, &ib);
929 
930    virgl_encoder_draw_vbo(vctx, &info, drawid_offset, indirect, &draws[0]);
931 
932    pipe_resource_reference(&ib.buffer, NULL);
933 
934 }
935 
virgl_submit_cmd(struct virgl_winsys * vws,struct virgl_cmd_buf * cbuf,struct pipe_fence_handle ** fence)936 static void virgl_submit_cmd(struct virgl_winsys *vws,
937                              struct virgl_cmd_buf *cbuf,
938 			     struct pipe_fence_handle **fence)
939 {
940    if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
941       struct pipe_fence_handle *sync_fence = NULL;
942 
943       vws->submit_cmd(vws, cbuf, &sync_fence);
944 
945       vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
946       vws->fence_reference(vws, &sync_fence, NULL);
947    } else {
948       vws->submit_cmd(vws, cbuf, fence);
949    }
950 }
951 
virgl_flush_eq(struct virgl_context * ctx,void * closure,struct pipe_fence_handle ** fence)952 void virgl_flush_eq(struct virgl_context *ctx, void *closure,
953 		    struct pipe_fence_handle **fence)
954 {
955    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
956 
957    /* skip empty cbuf */
958    if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
959        ctx->queue.num_dwords == 0 &&
960        !fence)
961       return;
962 
963    if (ctx->num_draws)
964       u_upload_unmap(ctx->uploader);
965 
966    /* send the buffer to the remote side for decoding */
967    ctx->num_draws = ctx->num_compute = 0;
968 
969    virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
970 
971    virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
972 
973    /* Reserve some space for transfers. */
974    if (ctx->encoded_transfers)
975       ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
976 
977    virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
978 
979    ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
980 
981    /* We have flushed the command queue, including any pending copy transfers
982     * involving staging resources.
983     */
984    ctx->queued_staging_res_size = 0;
985 }
986 
virgl_flush_from_st(struct pipe_context * ctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)987 static void virgl_flush_from_st(struct pipe_context *ctx,
988                                struct pipe_fence_handle **fence,
989                                enum pipe_flush_flags flags)
990 {
991    struct virgl_context *vctx = virgl_context(ctx);
992 
993    virgl_flush_eq(vctx, vctx, fence);
994 }
995 
virgl_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * texture,const struct pipe_sampler_view * state)996 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
997                                       struct pipe_resource *texture,
998                                       const struct pipe_sampler_view *state)
999 {
1000    struct virgl_context *vctx = virgl_context(ctx);
1001    struct virgl_sampler_view *grview;
1002    uint32_t handle;
1003    struct virgl_resource *res;
1004 
1005    if (!state)
1006       return NULL;
1007 
1008    grview = CALLOC_STRUCT(virgl_sampler_view);
1009    if (!grview)
1010       return NULL;
1011 
1012    res = virgl_resource(texture);
1013    handle = virgl_object_assign_handle();
1014    virgl_encode_sampler_view(vctx, handle, res, state);
1015 
1016    grview->base = *state;
1017    grview->base.reference.count = 1;
1018 
1019    grview->base.texture = NULL;
1020    grview->base.context = ctx;
1021    pipe_resource_reference(&grview->base.texture, texture);
1022    grview->handle = handle;
1023    return &grview->base;
1024 }
1025 
virgl_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type shader_type,unsigned start_slot,unsigned num_views,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)1026 static void virgl_set_sampler_views(struct pipe_context *ctx,
1027                                    enum pipe_shader_type shader_type,
1028                                    unsigned start_slot,
1029                                    unsigned num_views,
1030                                    unsigned unbind_num_trailing_slots,
1031                                    bool take_ownership,
1032                                    struct pipe_sampler_view **views)
1033 {
1034    struct virgl_context *vctx = virgl_context(ctx);
1035    struct virgl_shader_binding_state *binding =
1036       &vctx->shader_bindings[shader_type];
1037 
1038    binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
1039    for (unsigned i = 0; i < num_views; i++) {
1040       unsigned idx = start_slot + i;
1041       if (views && views[i]) {
1042          struct virgl_resource *res = virgl_resource(views[i]->texture);
1043          res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
1044 
1045          if (take_ownership) {
1046             pipe_sampler_view_reference(&binding->views[idx], NULL);
1047             binding->views[idx] = views[i];
1048          } else {
1049             pipe_sampler_view_reference(&binding->views[idx], views[i]);
1050          }
1051          binding->view_enabled_mask |= 1 << idx;
1052       } else {
1053          pipe_sampler_view_reference(&binding->views[idx], NULL);
1054       }
1055    }
1056 
1057    virgl_encode_set_sampler_views(vctx, shader_type,
1058          start_slot, num_views, (struct virgl_sampler_view **)binding->views);
1059    virgl_attach_res_sampler_views(vctx, shader_type);
1060 
1061    if (unbind_num_trailing_slots) {
1062       virgl_set_sampler_views(ctx, shader_type, start_slot + num_views,
1063                               unbind_num_trailing_slots, 0, false, NULL);
1064    }
1065 }
1066 
1067 static void
virgl_texture_barrier(struct pipe_context * ctx,unsigned flags)1068 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
1069 {
1070    struct virgl_context *vctx = virgl_context(ctx);
1071    struct virgl_screen *rs = virgl_screen(ctx->screen);
1072 
1073    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER) &&
1074        !(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_BLEND_EQUATION))
1075       return;
1076    virgl_encode_texture_barrier(vctx, flags);
1077 }
1078 
virgl_destroy_sampler_view(struct pipe_context * ctx,struct pipe_sampler_view * view)1079 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1080                                  struct pipe_sampler_view *view)
1081 {
1082    struct virgl_context *vctx = virgl_context(ctx);
1083    struct virgl_sampler_view *grview = virgl_sampler_view(view);
1084 
1085    virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1086    pipe_resource_reference(&view->texture, NULL);
1087    FREE(view);
1088 }
1089 
virgl_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)1090 static void *virgl_create_sampler_state(struct pipe_context *ctx,
1091                                         const struct pipe_sampler_state *state)
1092 {
1093    struct virgl_context *vctx = virgl_context(ctx);
1094    uint32_t handle;
1095 
1096    handle = virgl_object_assign_handle();
1097 
1098    virgl_encode_sampler_state(vctx, handle, state);
1099    return (void *)(unsigned long)handle;
1100 }
1101 
virgl_delete_sampler_state(struct pipe_context * ctx,void * ss)1102 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1103                                       void *ss)
1104 {
1105    struct virgl_context *vctx = virgl_context(ctx);
1106    uint32_t handle = (unsigned long)ss;
1107 
1108    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1109 }
1110 
virgl_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)1111 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1112                                      enum pipe_shader_type shader,
1113                                      unsigned start_slot,
1114                                      unsigned num_samplers,
1115                                      void **samplers)
1116 {
1117    struct virgl_context *vctx = virgl_context(ctx);
1118    uint32_t handles[32];
1119    int i;
1120    for (i = 0; i < num_samplers; i++) {
1121       handles[i] = (unsigned long)(samplers[i]);
1122    }
1123    virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1124 }
1125 
virgl_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * ps)1126 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1127                                      const struct pipe_poly_stipple *ps)
1128 {
1129    struct virgl_context *vctx = virgl_context(ctx);
1130    virgl_encoder_set_polygon_stipple(vctx, ps);
1131 }
1132 
virgl_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissor,const struct pipe_scissor_state * ss)1133 static void virgl_set_scissor_states(struct pipe_context *ctx,
1134                                     unsigned start_slot,
1135                                     unsigned num_scissor,
1136                                    const struct pipe_scissor_state *ss)
1137 {
1138    struct virgl_context *vctx = virgl_context(ctx);
1139    virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1140 }
1141 
virgl_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)1142 static void virgl_set_sample_mask(struct pipe_context *ctx,
1143                                  unsigned sample_mask)
1144 {
1145    struct virgl_context *vctx = virgl_context(ctx);
1146    virgl_encoder_set_sample_mask(vctx, sample_mask);
1147 }
1148 
virgl_set_min_samples(struct pipe_context * ctx,unsigned min_samples)1149 static void virgl_set_min_samples(struct pipe_context *ctx,
1150                                  unsigned min_samples)
1151 {
1152    struct virgl_context *vctx = virgl_context(ctx);
1153    struct virgl_screen *rs = virgl_screen(ctx->screen);
1154 
1155    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1156       return;
1157    virgl_encoder_set_min_samples(vctx, min_samples);
1158 }
1159 
virgl_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * clip)1160 static void virgl_set_clip_state(struct pipe_context *ctx,
1161                                 const struct pipe_clip_state *clip)
1162 {
1163    struct virgl_context *vctx = virgl_context(ctx);
1164    virgl_encoder_set_clip_state(vctx, clip);
1165 }
1166 
virgl_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])1167 static void virgl_set_tess_state(struct pipe_context *ctx,
1168                                  const float default_outer_level[4],
1169                                  const float default_inner_level[2])
1170 {
1171    struct virgl_context *vctx = virgl_context(ctx);
1172    struct virgl_screen *rs = virgl_screen(ctx->screen);
1173 
1174    if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1175       return;
1176    virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1177 }
1178 
virgl_set_patch_vertices(struct pipe_context * ctx,uint8_t patch_vertices)1179 static void virgl_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
1180 {
1181    struct virgl_context *vctx = virgl_context(ctx);
1182 
1183    vctx->patch_vertices = patch_vertices;
1184 }
1185 
virgl_resource_copy_region(struct pipe_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1186 static void virgl_resource_copy_region(struct pipe_context *ctx,
1187                                       struct pipe_resource *dst,
1188                                       unsigned dst_level,
1189                                       unsigned dstx, unsigned dsty, unsigned dstz,
1190                                       struct pipe_resource *src,
1191                                       unsigned src_level,
1192                                       const struct pipe_box *src_box)
1193 {
1194    struct virgl_context *vctx = virgl_context(ctx);
1195    struct virgl_resource *dres = virgl_resource(dst);
1196    struct virgl_resource *sres = virgl_resource(src);
1197 
1198    if (dres->b.target == PIPE_BUFFER)
1199       util_range_add(&dres->b, &dres->valid_buffer_range, dstx, dstx + src_box->width);
1200    virgl_resource_dirty(dres, dst_level);
1201 
1202    virgl_encode_resource_copy_region(vctx, dres,
1203                                     dst_level, dstx, dsty, dstz,
1204                                     sres, src_level,
1205                                     src_box);
1206 }
1207 
1208 static void
virgl_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)1209 virgl_flush_resource(struct pipe_context *pipe,
1210                     struct pipe_resource *resource)
1211 {
1212 }
1213 
virgl_blit(struct pipe_context * ctx,const struct pipe_blit_info * blit)1214 static void virgl_blit(struct pipe_context *ctx,
1215                       const struct pipe_blit_info *blit)
1216 {
1217    struct virgl_context *vctx = virgl_context(ctx);
1218    struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1219    struct virgl_resource *sres = virgl_resource(blit->src.resource);
1220 
1221    assert(ctx->screen->get_param(ctx->screen,
1222                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1223           (util_format_is_srgb(blit->dst.resource->format) ==
1224             util_format_is_srgb(blit->dst.format)));
1225 
1226    virgl_resource_dirty(dres, blit->dst.level);
1227    virgl_encode_blit(vctx, dres, sres,
1228                     blit);
1229 }
1230 
virgl_set_hw_atomic_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1231 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1232                                         unsigned start_slot,
1233                                         unsigned count,
1234                                         const struct pipe_shader_buffer *buffers)
1235 {
1236    struct virgl_context *vctx = virgl_context(ctx);
1237 
1238    vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1239    for (unsigned i = 0; i < count; i++) {
1240       unsigned idx = start_slot + i;
1241       if (buffers && buffers[i].buffer) {
1242          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1243          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1244 
1245          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1246                                  buffers[i].buffer);
1247          vctx->atomic_buffers[idx] = buffers[i];
1248          vctx->atomic_buffer_enabled_mask |= 1 << idx;
1249       } else {
1250          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1251       }
1252    }
1253 
1254    virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1255 }
1256 
virgl_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)1257 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1258                                      enum pipe_shader_type shader,
1259                                      unsigned start_slot, unsigned count,
1260                                      const struct pipe_shader_buffer *buffers,
1261                                      unsigned writable_bitmask)
1262 {
1263    struct virgl_context *vctx = virgl_context(ctx);
1264    struct virgl_screen *rs = virgl_screen(ctx->screen);
1265    struct virgl_shader_binding_state *binding =
1266       &vctx->shader_bindings[shader];
1267 
1268    binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1269    for (unsigned i = 0; i < count; i++) {
1270       unsigned idx = start_slot + i;
1271       if (buffers && buffers[i].buffer) {
1272          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1273          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1274 
1275          pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1276          binding->ssbos[idx] = buffers[i];
1277          binding->ssbo_enabled_mask |= 1 << idx;
1278       } else {
1279          pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1280       }
1281    }
1282 
1283    uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1284       rs->caps.caps.v2.max_shader_buffer_frag_compute :
1285       rs->caps.caps.v2.max_shader_buffer_other_stages;
1286    if (!max_shader_buffer)
1287       return;
1288    virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1289 }
1290 
virgl_create_fence_fd(struct pipe_context * ctx,struct pipe_fence_handle ** fence,int fd,enum pipe_fd_type type)1291 static void virgl_create_fence_fd(struct pipe_context *ctx,
1292                                   struct pipe_fence_handle **fence,
1293                                   int fd,
1294                                   enum pipe_fd_type type)
1295 {
1296    assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1297    struct virgl_screen *rs = virgl_screen(ctx->screen);
1298 
1299    if (rs->vws->cs_create_fence)
1300       *fence = rs->vws->cs_create_fence(rs->vws, fd);
1301 }
1302 
virgl_fence_server_sync(struct pipe_context * ctx,struct pipe_fence_handle * fence)1303 static void virgl_fence_server_sync(struct pipe_context *ctx,
1304 			            struct pipe_fence_handle *fence)
1305 {
1306    struct virgl_context *vctx = virgl_context(ctx);
1307    struct virgl_screen *rs = virgl_screen(ctx->screen);
1308 
1309    if (rs->vws->fence_server_sync)
1310       rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1311 }
1312 
virgl_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)1313 static void virgl_set_shader_images(struct pipe_context *ctx,
1314                                     enum pipe_shader_type shader,
1315                                     unsigned start_slot, unsigned count,
1316                                     unsigned unbind_num_trailing_slots,
1317                                     const struct pipe_image_view *images)
1318 {
1319    struct virgl_context *vctx = virgl_context(ctx);
1320    struct virgl_screen *rs = virgl_screen(ctx->screen);
1321    struct virgl_shader_binding_state *binding =
1322       &vctx->shader_bindings[shader];
1323 
1324    binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1325    for (unsigned i = 0; i < count; i++) {
1326       unsigned idx = start_slot + i;
1327       if (images && images[i].resource) {
1328          struct virgl_resource *res = virgl_resource(images[i].resource);
1329          res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1330 
1331          pipe_resource_reference(&binding->images[idx].resource,
1332                                  images[i].resource);
1333          binding->images[idx] = images[i];
1334          binding->image_enabled_mask |= 1 << idx;
1335       } else {
1336          pipe_resource_reference(&binding->images[idx].resource, NULL);
1337       }
1338    }
1339 
1340    uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1341      rs->caps.caps.v2.max_shader_image_frag_compute :
1342      rs->caps.caps.v2.max_shader_image_other_stages;
1343    if (!max_shader_images)
1344       return;
1345    virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1346 
1347    if (unbind_num_trailing_slots) {
1348       virgl_set_shader_images(ctx, shader, start_slot + count,
1349                               unbind_num_trailing_slots, 0, NULL);
1350    }
1351 }
1352 
virgl_memory_barrier(struct pipe_context * ctx,unsigned flags)1353 static void virgl_memory_barrier(struct pipe_context *ctx,
1354                                  unsigned flags)
1355 {
1356    struct virgl_context *vctx = virgl_context(ctx);
1357    struct virgl_screen *rs = virgl_screen(ctx->screen);
1358 
1359    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1360       return;
1361    virgl_encode_memory_barrier(vctx, flags);
1362 }
1363 
virgl_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)1364 static void *virgl_create_compute_state(struct pipe_context *ctx,
1365                                         const struct pipe_compute_state *state)
1366 {
1367    struct virgl_context *vctx = virgl_context(ctx);
1368    uint32_t handle;
1369    const struct tgsi_token *ntt_tokens = NULL;
1370    const struct tgsi_token *tokens;
1371    struct pipe_stream_output_info so_info = {};
1372    int ret;
1373 
1374    if (state->ir_type == PIPE_SHADER_IR_NIR) {
1375       nir_shader *s = nir_shader_clone(NULL, state->prog);
1376       ntt_tokens = tokens = nir_to_tgsi(s, vctx->base.screen); /* takes ownership */
1377    } else {
1378       tokens = state->prog;
1379    }
1380 
1381    handle = virgl_object_assign_handle();
1382    ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1383                                    &so_info,
1384                                    state->req_local_mem,
1385                                    tokens);
1386    if (ret) {
1387       FREE((void *)ntt_tokens);
1388       return NULL;
1389    }
1390 
1391    FREE((void *)ntt_tokens);
1392 
1393    return (void *)(unsigned long)handle;
1394 }
1395 
virgl_bind_compute_state(struct pipe_context * ctx,void * state)1396 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1397 {
1398    uint32_t handle = (unsigned long)state;
1399    struct virgl_context *vctx = virgl_context(ctx);
1400 
1401    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1402 }
1403 
virgl_delete_compute_state(struct pipe_context * ctx,void * state)1404 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1405 {
1406    uint32_t handle = (unsigned long)state;
1407    struct virgl_context *vctx = virgl_context(ctx);
1408 
1409    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1410 }
1411 
virgl_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * info)1412 static void virgl_launch_grid(struct pipe_context *ctx,
1413                               const struct pipe_grid_info *info)
1414 {
1415    struct virgl_context *vctx = virgl_context(ctx);
1416 
1417    if (!vctx->num_compute)
1418       virgl_reemit_compute_resources(vctx);
1419    vctx->num_compute++;
1420 
1421    virgl_encode_launch_grid(vctx, info);
1422 }
1423 
1424 static void
virgl_release_shader_binding(struct virgl_context * vctx,enum pipe_shader_type shader_type)1425 virgl_release_shader_binding(struct virgl_context *vctx,
1426                              enum pipe_shader_type shader_type)
1427 {
1428    struct virgl_shader_binding_state *binding =
1429       &vctx->shader_bindings[shader_type];
1430 
1431    while (binding->view_enabled_mask) {
1432       int i = u_bit_scan(&binding->view_enabled_mask);
1433       pipe_sampler_view_reference(
1434             (struct pipe_sampler_view **)&binding->views[i], NULL);
1435    }
1436 
1437    while (binding->ubo_enabled_mask) {
1438       int i = u_bit_scan(&binding->ubo_enabled_mask);
1439       pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1440    }
1441 
1442    while (binding->ssbo_enabled_mask) {
1443       int i = u_bit_scan(&binding->ssbo_enabled_mask);
1444       pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1445    }
1446 
1447    while (binding->image_enabled_mask) {
1448       int i = u_bit_scan(&binding->image_enabled_mask);
1449       pipe_resource_reference(&binding->images[i].resource, NULL);
1450    }
1451 }
1452 
1453 static void
virgl_emit_string_marker(struct pipe_context * ctx,const char * message,int len)1454 virgl_emit_string_marker(struct pipe_context *ctx, const char *message,  int len)
1455 {
1456     struct virgl_context *vctx = virgl_context(ctx);
1457     virgl_encode_emit_string_marker(vctx, message, len);
1458 }
1459 
1460 static void
virgl_context_destroy(struct pipe_context * ctx)1461 virgl_context_destroy( struct pipe_context *ctx )
1462 {
1463    struct virgl_context *vctx = virgl_context(ctx);
1464    struct virgl_screen *rs = virgl_screen(ctx->screen);
1465    enum pipe_shader_type shader_type;
1466 
1467    vctx->framebuffer.zsbuf = NULL;
1468    vctx->framebuffer.nr_cbufs = 0;
1469    virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1470    virgl_flush_eq(vctx, vctx, NULL);
1471 
1472    for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1473       virgl_release_shader_binding(vctx, shader_type);
1474 
1475    while (vctx->atomic_buffer_enabled_mask) {
1476       int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1477       pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1478    }
1479 
1480    rs->vws->cmd_buf_destroy(vctx->cbuf);
1481    if (vctx->uploader)
1482       u_upload_destroy(vctx->uploader);
1483    if (vctx->supports_staging)
1484       virgl_staging_destroy(&vctx->staging);
1485    util_primconvert_destroy(vctx->primconvert);
1486    virgl_transfer_queue_fini(&vctx->queue);
1487 
1488    slab_destroy_child(&vctx->transfer_pool);
1489    FREE(vctx);
1490 }
1491 
virgl_get_sample_position(struct pipe_context * ctx,unsigned sample_count,unsigned index,float * out_value)1492 static void virgl_get_sample_position(struct pipe_context *ctx,
1493 				      unsigned sample_count,
1494 				      unsigned index,
1495 				      float *out_value)
1496 {
1497    struct virgl_context *vctx = virgl_context(ctx);
1498    struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1499 
1500    if (sample_count > vs->caps.caps.v1.max_samples) {
1501       debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1502 		   sample_count, vs->caps.caps.v1.max_samples);
1503       return;
1504    }
1505 
1506    /* The following is basically copied from dri/i965gen6_get_sample_position
1507     * The only addition is that we hold the msaa positions for all sample
1508     * counts in a flat array. */
1509    uint32_t bits = 0;
1510    if (sample_count == 1) {
1511       out_value[0] = out_value[1] = 0.5f;
1512       return;
1513    } else if (sample_count == 2) {
1514       bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1515    } else if (sample_count <= 4) {
1516       bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1517    } else if (sample_count <= 8) {
1518       bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1519    } else if (sample_count <= 16) {
1520       bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1521    }
1522    out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1523    out_value[1] = (bits & 0xf) / 16.0f;
1524 
1525    if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1526       debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1527                    index, sample_count, out_value[0], out_value[1]);
1528 }
1529 
virgl_send_tweaks(struct virgl_context * vctx,struct virgl_screen * rs)1530 static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1531 {
1532    if (rs->tweak_gles_emulate_bgra)
1533       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1534 
1535    if (rs->tweak_gles_apply_bgra_dest_swizzle)
1536       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1537 
1538    if (rs->tweak_gles_tf3_value > 0)
1539       virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1540                          rs->tweak_gles_tf3_value);
1541 }
1542 
virgl_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)1543 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1544                                           void *priv,
1545                                           unsigned flags)
1546 {
1547    struct virgl_context *vctx;
1548    struct virgl_screen *rs = virgl_screen(pscreen);
1549    vctx = CALLOC_STRUCT(virgl_context);
1550    const char *host_debug_flagstring;
1551 
1552    vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1553    if (!vctx->cbuf) {
1554       FREE(vctx);
1555       return NULL;
1556    }
1557 
1558    vctx->base.destroy = virgl_context_destroy;
1559    vctx->base.create_surface = virgl_create_surface;
1560    vctx->base.surface_destroy = virgl_surface_destroy;
1561    vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1562    vctx->base.create_blend_state = virgl_create_blend_state;
1563    vctx->base.bind_blend_state = virgl_bind_blend_state;
1564    vctx->base.delete_blend_state = virgl_delete_blend_state;
1565    vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1566    vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1567    vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1568    vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1569    vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1570    vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1571 
1572    vctx->base.set_viewport_states = virgl_set_viewport_states;
1573    vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1574    vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1575    vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1576    vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1577    vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1578 
1579    vctx->base.set_tess_state = virgl_set_tess_state;
1580    vctx->base.set_patch_vertices = virgl_set_patch_vertices;
1581    vctx->base.create_vs_state = virgl_create_vs_state;
1582    vctx->base.create_tcs_state = virgl_create_tcs_state;
1583    vctx->base.create_tes_state = virgl_create_tes_state;
1584    vctx->base.create_gs_state = virgl_create_gs_state;
1585    vctx->base.create_fs_state = virgl_create_fs_state;
1586 
1587    vctx->base.bind_vs_state = virgl_bind_vs_state;
1588    vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1589    vctx->base.bind_tes_state = virgl_bind_tes_state;
1590    vctx->base.bind_gs_state = virgl_bind_gs_state;
1591    vctx->base.bind_fs_state = virgl_bind_fs_state;
1592 
1593    vctx->base.delete_vs_state = virgl_delete_vs_state;
1594    vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1595    vctx->base.delete_tes_state = virgl_delete_tes_state;
1596    vctx->base.delete_gs_state = virgl_delete_gs_state;
1597    vctx->base.delete_fs_state = virgl_delete_fs_state;
1598 
1599    vctx->base.create_compute_state = virgl_create_compute_state;
1600    vctx->base.bind_compute_state = virgl_bind_compute_state;
1601    vctx->base.delete_compute_state = virgl_delete_compute_state;
1602    vctx->base.launch_grid = virgl_launch_grid;
1603 
1604    vctx->base.clear = virgl_clear;
1605    vctx->base.clear_texture = virgl_clear_texture;
1606    vctx->base.draw_vbo = virgl_draw_vbo;
1607    vctx->base.flush = virgl_flush_from_st;
1608    vctx->base.screen = pscreen;
1609    vctx->base.create_sampler_view = virgl_create_sampler_view;
1610    vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1611    vctx->base.set_sampler_views = virgl_set_sampler_views;
1612    vctx->base.texture_barrier = virgl_texture_barrier;
1613 
1614    vctx->base.create_sampler_state = virgl_create_sampler_state;
1615    vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1616    vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1617 
1618    vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1619    vctx->base.set_scissor_states = virgl_set_scissor_states;
1620    vctx->base.set_sample_mask = virgl_set_sample_mask;
1621    vctx->base.set_min_samples = virgl_set_min_samples;
1622    vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1623    vctx->base.set_clip_state = virgl_set_clip_state;
1624 
1625    vctx->base.set_blend_color = virgl_set_blend_color;
1626 
1627    vctx->base.get_sample_position = virgl_get_sample_position;
1628 
1629    vctx->base.resource_copy_region = virgl_resource_copy_region;
1630    vctx->base.flush_resource = virgl_flush_resource;
1631    vctx->base.blit =  virgl_blit;
1632    vctx->base.create_fence_fd = virgl_create_fence_fd;
1633    vctx->base.fence_server_sync = virgl_fence_server_sync;
1634 
1635    vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1636    vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1637    vctx->base.set_shader_images = virgl_set_shader_images;
1638    vctx->base.memory_barrier = virgl_memory_barrier;
1639    vctx->base.emit_string_marker = virgl_emit_string_marker;
1640 
1641    virgl_init_context_resource_functions(&vctx->base);
1642    virgl_init_query_functions(vctx);
1643    virgl_init_so_functions(vctx);
1644 
1645    slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1646    virgl_transfer_queue_init(&vctx->queue, vctx);
1647    vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1648                        (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1649 
1650    /* Reserve some space for transfers. */
1651    if (vctx->encoded_transfers)
1652       vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1653 
1654    vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1655    vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1656                                      PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1657    if (!vctx->uploader)
1658            goto fail;
1659    vctx->base.stream_uploader = vctx->uploader;
1660    vctx->base.const_uploader = vctx->uploader;
1661 
1662    /* We use a special staging buffer as the source of copy transfers. */
1663    if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1664        vctx->encoded_transfers) {
1665       virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
1666       vctx->supports_staging = true;
1667    }
1668 
1669    vctx->hw_sub_ctx_id = p_atomic_inc_return(&rs->sub_ctx_id);
1670    virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1671 
1672    virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1673 
1674    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1675       host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1676       if (host_debug_flagstring)
1677          virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1678    }
1679 
1680    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1681       virgl_send_tweaks(vctx, rs);
1682 
1683    return &vctx->base;
1684 fail:
1685    virgl_context_destroy(&vctx->base);
1686    return NULL;
1687 }
1688