• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26 
27 #include "compiler/nir/nir.h"
28 #include "pipe/p_context.h"
29 #include "pipe/p_defines.h"
30 #include "pipe/p_screen.h"
31 #include "pipe/p_state.h"
32 #include "nir/nir_to_tgsi.h"
33 #include "util/u_draw.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "util/format/u_format.h"
37 #include "util/u_prim.h"
38 #include "util/u_transfer.h"
39 #include "util/u_helpers.h"
40 #include "util/slab.h"
41 #include "util/u_upload_mgr.h"
42 #include "util/u_blitter.h"
43 #include "tgsi/tgsi_text.h"
44 #include "indices/u_primconvert.h"
45 
46 #include "virgl_encode.h"
47 #include "virgl_context.h"
48 #include "virtio-gpu/virgl_protocol.h"
49 #include "virgl_resource.h"
50 #include "virgl_screen.h"
51 #include "virgl_staging_mgr.h"
52 
53 struct virgl_vertex_elements_state {
54    uint32_t handle;
55    uint8_t binding_map[PIPE_MAX_ATTRIBS];
56    uint8_t num_bindings;
57 };
58 
59 static uint32_t next_handle;
virgl_object_assign_handle(void)60 uint32_t virgl_object_assign_handle(void)
61 {
62    return p_atomic_inc_return(&next_handle);
63 }
64 
65 bool
virgl_can_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)66 virgl_can_rebind_resource(struct virgl_context *vctx,
67                           struct pipe_resource *res)
68 {
69    /* We cannot rebind resources that are referenced by host objects, which
70     * are
71     *
72     *  - VIRGL_OBJECT_SURFACE
73     *  - VIRGL_OBJECT_SAMPLER_VIEW
74     *  - VIRGL_OBJECT_STREAMOUT_TARGET
75     *
76     * Because surfaces cannot be created from buffers, we require the resource
77     * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
78     */
79    const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
80                                       PIPE_BIND_STREAM_OUTPUT);
81    const unsigned bind_history = virgl_resource(res)->bind_history;
82    return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
83 }
84 
85 void
virgl_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)86 virgl_rebind_resource(struct virgl_context *vctx,
87                       struct pipe_resource *res)
88 {
89    /* Queries use internally created buffers and do not go through transfers.
90     * Index buffers are not bindable.  They are not tracked.
91     */
92    ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
93                                                PIPE_BIND_CONSTANT_BUFFER |
94                                                PIPE_BIND_SHADER_BUFFER |
95                                                PIPE_BIND_SHADER_IMAGE);
96    const unsigned bind_history = virgl_resource(res)->bind_history;
97    unsigned i;
98 
99    assert(virgl_can_rebind_resource(vctx, res) &&
100           (bind_history & tracked_bind) == bind_history);
101 
102    if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
103       for (i = 0; i < vctx->num_vertex_buffers; i++) {
104          if (vctx->vertex_buffer[i].buffer.resource == res) {
105             vctx->vertex_array_dirty = true;
106             break;
107          }
108       }
109    }
110 
111    if (bind_history & PIPE_BIND_SHADER_BUFFER) {
112       uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
113       while (remaining_mask) {
114          int i = u_bit_scan(&remaining_mask);
115          if (vctx->atomic_buffers[i].buffer == res) {
116             const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
117             virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
118          }
119       }
120    }
121 
122    /* check per-stage shader bindings */
123    if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
124                        PIPE_BIND_SHADER_BUFFER |
125                        PIPE_BIND_SHADER_IMAGE)) {
126       enum pipe_shader_type shader_type;
127       for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
128          const struct virgl_shader_binding_state *binding =
129             &vctx->shader_bindings[shader_type];
130 
131          if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
132             uint32_t remaining_mask = binding->ubo_enabled_mask;
133             while (remaining_mask) {
134                int i = u_bit_scan(&remaining_mask);
135                if (binding->ubos[i].buffer == res) {
136                   const struct pipe_constant_buffer *ubo = &binding->ubos[i];
137                   virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
138                                                    ubo->buffer_offset,
139                                                    ubo->buffer_size,
140                                                    virgl_resource(res));
141                }
142             }
143          }
144 
145          if (bind_history & PIPE_BIND_SHADER_BUFFER) {
146             uint32_t remaining_mask = binding->ssbo_enabled_mask;
147             while (remaining_mask) {
148                int i = u_bit_scan(&remaining_mask);
149                if (binding->ssbos[i].buffer == res) {
150                   const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
151                   virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
152                                                   ssbo);
153                }
154             }
155          }
156 
157          if (bind_history & PIPE_BIND_SHADER_IMAGE) {
158             uint32_t remaining_mask = binding->image_enabled_mask;
159             while (remaining_mask) {
160                int i = u_bit_scan(&remaining_mask);
161                if (binding->images[i].resource == res) {
162                   const struct pipe_image_view *image = &binding->images[i];
163                   virgl_encode_set_shader_images(vctx, shader_type, i, 1,
164                                                  image);
165                }
166             }
167          }
168       }
169    }
170 }
171 
virgl_attach_res_framebuffer(struct virgl_context * vctx)172 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
173 {
174    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
175    struct pipe_surface *surf;
176    struct virgl_resource *res;
177    unsigned i;
178 
179    surf = vctx->framebuffer.zsbuf;
180    if (surf) {
181       res = virgl_resource(surf->texture);
182       if (res) {
183          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
184          virgl_resource_dirty(res, surf->u.tex.level);
185       }
186    }
187    for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
188       surf = vctx->framebuffer.cbufs[i];
189       if (surf) {
190          res = virgl_resource(surf->texture);
191          if (res) {
192             vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
193             virgl_resource_dirty(res, surf->u.tex.level);
194          }
195       }
196    }
197 }
198 
virgl_attach_res_sampler_views(struct virgl_context * vctx,enum pipe_shader_type shader_type)199 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
200                                            enum pipe_shader_type shader_type)
201 {
202    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
203    const struct virgl_shader_binding_state *binding =
204       &vctx->shader_bindings[shader_type];
205 
206    for (int i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i) {
207       if (binding->views[i] && binding->views[i]->texture) {
208          struct virgl_resource *res = virgl_resource(binding->views[i]->texture);
209          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
210       }
211    }
212 }
213 
virgl_attach_res_vertex_buffers(struct virgl_context * vctx)214 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
215 {
216    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
217    struct virgl_resource *res;
218    unsigned i;
219 
220    for (i = 0; i < vctx->num_vertex_buffers; i++) {
221       res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
222       if (res)
223          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
224    }
225 }
226 
virgl_attach_res_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)227 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
228 					  struct virgl_indexbuf *ib)
229 {
230    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
231    struct virgl_resource *res;
232 
233    res = virgl_resource(ib->buffer);
234    if (res)
235       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
236 }
237 
virgl_attach_res_so_targets(struct virgl_context * vctx)238 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
239 {
240    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
241    struct virgl_resource *res;
242    unsigned i;
243 
244    for (i = 0; i < vctx->num_so_targets; i++) {
245       res = virgl_resource(vctx->so_targets[i].base.buffer);
246       if (res)
247          vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
248    }
249 }
250 
virgl_attach_res_uniform_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)251 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
252                                              enum pipe_shader_type shader_type)
253 {
254    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
255    const struct virgl_shader_binding_state *binding =
256       &vctx->shader_bindings[shader_type];
257    uint32_t remaining_mask = binding->ubo_enabled_mask;
258    struct virgl_resource *res;
259 
260    while (remaining_mask) {
261       int i = u_bit_scan(&remaining_mask);
262       res = virgl_resource(binding->ubos[i].buffer);
263       assert(res);
264       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
265    }
266 }
267 
virgl_attach_res_shader_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)268 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
269                                             enum pipe_shader_type shader_type)
270 {
271    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
272    const struct virgl_shader_binding_state *binding =
273       &vctx->shader_bindings[shader_type];
274    uint32_t remaining_mask = binding->ssbo_enabled_mask;
275    struct virgl_resource *res;
276 
277    while (remaining_mask) {
278       int i = u_bit_scan(&remaining_mask);
279       res = virgl_resource(binding->ssbos[i].buffer);
280       assert(res);
281       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
282    }
283 }
284 
virgl_attach_res_shader_images(struct virgl_context * vctx,enum pipe_shader_type shader_type)285 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
286                                            enum pipe_shader_type shader_type)
287 {
288    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
289    const struct virgl_shader_binding_state *binding =
290       &vctx->shader_bindings[shader_type];
291    uint32_t remaining_mask = binding->image_enabled_mask;
292    struct virgl_resource *res;
293 
294    while (remaining_mask) {
295       int i = u_bit_scan(&remaining_mask);
296       res = virgl_resource(binding->images[i].resource);
297       assert(res);
298       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
299    }
300 }
301 
virgl_attach_res_atomic_buffers(struct virgl_context * vctx)302 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
303 {
304    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
305    uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
306    struct virgl_resource *res;
307 
308    while (remaining_mask) {
309       int i = u_bit_scan(&remaining_mask);
310       res = virgl_resource(vctx->atomic_buffers[i].buffer);
311       assert(res);
312       vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
313    }
314 }
315 
316 /*
317  * after flushing, the hw context still has a bunch of
318  * resources bound, so we need to rebind those here.
319  */
virgl_reemit_draw_resources(struct virgl_context * vctx)320 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
321 {
322    enum pipe_shader_type shader_type;
323 
324    /* reattach any flushed resources */
325    /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
326    virgl_attach_res_framebuffer(vctx);
327 
328    for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
329       virgl_attach_res_sampler_views(vctx, shader_type);
330       virgl_attach_res_uniform_buffers(vctx, shader_type);
331       virgl_attach_res_shader_buffers(vctx, shader_type);
332       virgl_attach_res_shader_images(vctx, shader_type);
333    }
334    virgl_attach_res_atomic_buffers(vctx);
335    virgl_attach_res_vertex_buffers(vctx);
336    virgl_attach_res_so_targets(vctx);
337 }
338 
virgl_reemit_compute_resources(struct virgl_context * vctx)339 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
340 {
341    virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
342    virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
343    virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
344    virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
345 
346    virgl_attach_res_atomic_buffers(vctx);
347 }
348 
virgl_create_surface(struct pipe_context * ctx,struct pipe_resource * resource,const struct pipe_surface * templ)349 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
350                                                 struct pipe_resource *resource,
351                                                 const struct pipe_surface *templ)
352 {
353    struct virgl_context *vctx = virgl_context(ctx);
354    struct virgl_surface *surf;
355    struct virgl_resource *res = virgl_resource(resource);
356    uint32_t handle;
357 
358    /* no support for buffer surfaces */
359    if (resource->target == PIPE_BUFFER)
360       return NULL;
361 
362    surf = CALLOC_STRUCT(virgl_surface);
363    if (!surf)
364       return NULL;
365 
366    assert(ctx->screen->get_param(ctx->screen,
367                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
368           (util_format_is_srgb(templ->format) ==
369            util_format_is_srgb(resource->format)));
370 
371    virgl_resource_dirty(res, 0);
372    handle = virgl_object_assign_handle();
373    pipe_reference_init(&surf->base.reference, 1);
374    pipe_resource_reference(&surf->base.texture, resource);
375    surf->base.context = ctx;
376    surf->base.format = templ->format;
377 
378    surf->base.width = u_minify(resource->width0, templ->u.tex.level);
379    surf->base.height = u_minify(resource->height0, templ->u.tex.level);
380    surf->base.u.tex.level = templ->u.tex.level;
381    surf->base.u.tex.first_layer = templ->u.tex.first_layer;
382    surf->base.u.tex.last_layer = templ->u.tex.last_layer;
383    surf->base.nr_samples = templ->nr_samples;
384 
385    virgl_encoder_create_surface(vctx, handle, res, &surf->base);
386    surf->handle = handle;
387    return &surf->base;
388 }
389 
virgl_surface_destroy(struct pipe_context * ctx,struct pipe_surface * psurf)390 static void virgl_surface_destroy(struct pipe_context *ctx,
391                                  struct pipe_surface *psurf)
392 {
393    struct virgl_context *vctx = virgl_context(ctx);
394    struct virgl_surface *surf = virgl_surface(psurf);
395 
396    pipe_resource_reference(&surf->base.texture, NULL);
397    virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
398    FREE(surf);
399 }
400 
virgl_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * blend_state)401 static void *virgl_create_blend_state(struct pipe_context *ctx,
402                                               const struct pipe_blend_state *blend_state)
403 {
404    struct virgl_context *vctx = virgl_context(ctx);
405    uint32_t handle;
406    handle = virgl_object_assign_handle();
407 
408    virgl_encode_blend_state(vctx, handle, blend_state);
409    return (void *)(unsigned long)handle;
410 
411 }
412 
virgl_bind_blend_state(struct pipe_context * ctx,void * blend_state)413 static void virgl_bind_blend_state(struct pipe_context *ctx,
414                                            void *blend_state)
415 {
416    struct virgl_context *vctx = virgl_context(ctx);
417    uint32_t handle = (unsigned long)blend_state;
418    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
419 }
420 
virgl_delete_blend_state(struct pipe_context * ctx,void * blend_state)421 static void virgl_delete_blend_state(struct pipe_context *ctx,
422                                      void *blend_state)
423 {
424    struct virgl_context *vctx = virgl_context(ctx);
425    uint32_t handle = (unsigned long)blend_state;
426    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
427 }
428 
virgl_create_depth_stencil_alpha_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * blend_state)429 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
430                                                    const struct pipe_depth_stencil_alpha_state *blend_state)
431 {
432    struct virgl_context *vctx = virgl_context(ctx);
433    uint32_t handle;
434    handle = virgl_object_assign_handle();
435 
436    virgl_encode_dsa_state(vctx, handle, blend_state);
437    return (void *)(unsigned long)handle;
438 }
439 
virgl_bind_depth_stencil_alpha_state(struct pipe_context * ctx,void * blend_state)440 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
441                                                 void *blend_state)
442 {
443    struct virgl_context *vctx = virgl_context(ctx);
444    uint32_t handle = (unsigned long)blend_state;
445    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
446 }
447 
virgl_delete_depth_stencil_alpha_state(struct pipe_context * ctx,void * dsa_state)448 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
449                                                   void *dsa_state)
450 {
451    struct virgl_context *vctx = virgl_context(ctx);
452    uint32_t handle = (unsigned long)dsa_state;
453    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
454 }
455 
virgl_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * rs_state)456 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
457                                                    const struct pipe_rasterizer_state *rs_state)
458 {
459    struct virgl_context *vctx = virgl_context(ctx);
460    struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
461 
462    if (!vrs)
463       return NULL;
464    vrs->rs = *rs_state;
465    vrs->handle = virgl_object_assign_handle();
466 
467    assert(rs_state->depth_clip_near ||
468           virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
469 
470    virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
471    return (void *)vrs;
472 }
473 
virgl_bind_rasterizer_state(struct pipe_context * ctx,void * rs_state)474 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
475                                                 void *rs_state)
476 {
477    struct virgl_context *vctx = virgl_context(ctx);
478    uint32_t handle = 0;
479    if (rs_state) {
480       struct virgl_rasterizer_state *vrs = rs_state;
481       vctx->rs_state = *vrs;
482       handle = vrs->handle;
483    }
484    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
485 }
486 
virgl_delete_rasterizer_state(struct pipe_context * ctx,void * rs_state)487 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
488                                          void *rs_state)
489 {
490    struct virgl_context *vctx = virgl_context(ctx);
491    struct virgl_rasterizer_state *vrs = rs_state;
492    virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
493    FREE(vrs);
494 }
495 
virgl_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)496 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
497                                                 const struct pipe_framebuffer_state *state)
498 {
499    struct virgl_context *vctx = virgl_context(ctx);
500 
501    vctx->framebuffer = *state;
502    virgl_encoder_set_framebuffer_state(vctx, state);
503    virgl_attach_res_framebuffer(vctx);
504 }
505 
virgl_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)506 static void virgl_set_viewport_states(struct pipe_context *ctx,
507                                      unsigned start_slot,
508                                      unsigned num_viewports,
509                                      const struct pipe_viewport_state *state)
510 {
511    struct virgl_context *vctx = virgl_context(ctx);
512    virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
513 }
514 
virgl_create_vertex_elements_state(struct pipe_context * ctx,unsigned num_elements,const struct pipe_vertex_element * elements)515 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
516                                                         unsigned num_elements,
517                                                         const struct pipe_vertex_element *elements)
518 {
519    struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
520    struct virgl_context *vctx = virgl_context(ctx);
521    struct virgl_vertex_elements_state *state =
522       CALLOC_STRUCT(virgl_vertex_elements_state);
523 
524    for (int i = 0; i < num_elements; ++i) {
525       if (elements[i].instance_divisor) {
526 	 /* Virglrenderer doesn't deal with instance_divisor correctly if
527 	  * there isn't a 1:1 relationship between elements and bindings.
528 	  * So let's make sure there is, by duplicating bindings.
529 	  */
530 	 for (int j = 0; j < num_elements; ++j) {
531             new_elements[j] = elements[j];
532             new_elements[j].vertex_buffer_index = j;
533             state->binding_map[j] = elements[j].vertex_buffer_index;
534 	 }
535 	 elements = new_elements;
536 	 state->num_bindings = num_elements;
537 	 break;
538       }
539    }
540 
541    state->handle = virgl_object_assign_handle();
542    virgl_encoder_create_vertex_elements(vctx, state->handle,
543                                        num_elements, elements);
544    return state;
545 }
546 
virgl_delete_vertex_elements_state(struct pipe_context * ctx,void * ve)547 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
548                                               void *ve)
549 {
550    struct virgl_context *vctx = virgl_context(ctx);
551    struct virgl_vertex_elements_state *state =
552       (struct virgl_vertex_elements_state *)ve;
553    virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
554    FREE(state);
555 }
556 
virgl_bind_vertex_elements_state(struct pipe_context * ctx,void * ve)557 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
558                                                      void *ve)
559 {
560    struct virgl_context *vctx = virgl_context(ctx);
561    struct virgl_vertex_elements_state *state =
562       (struct virgl_vertex_elements_state *)ve;
563    vctx->vertex_elements = state;
564    virgl_encode_bind_object(vctx, state ? state->handle : 0,
565                             VIRGL_OBJECT_VERTEX_ELEMENTS);
566    vctx->vertex_array_dirty = TRUE;
567 }
568 
virgl_set_vertex_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned num_buffers,unsigned unbind_num_trailing_slots,bool take_ownership,const struct pipe_vertex_buffer * buffers)569 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
570                                     unsigned start_slot,
571                                     unsigned num_buffers,
572                                      unsigned unbind_num_trailing_slots,
573                                      bool take_ownership,
574                                     const struct pipe_vertex_buffer *buffers)
575 {
576    struct virgl_context *vctx = virgl_context(ctx);
577 
578    util_set_vertex_buffers_count(vctx->vertex_buffer,
579                                  &vctx->num_vertex_buffers,
580                                  buffers, start_slot, num_buffers,
581                                  unbind_num_trailing_slots,
582                                  take_ownership);
583 
584    if (buffers) {
585       for (unsigned i = 0; i < num_buffers; i++) {
586          struct virgl_resource *res =
587             virgl_resource(buffers[i].buffer.resource);
588          if (res && !buffers[i].is_user_buffer)
589             res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
590       }
591    }
592 
593    vctx->vertex_array_dirty = TRUE;
594 }
595 
virgl_hw_set_vertex_buffers(struct virgl_context * vctx)596 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
597 {
598    if (vctx->vertex_array_dirty) {
599       struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
600 
601       if (ve->num_bindings) {
602          struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
603          for (int i = 0; i < ve->num_bindings; ++i)
604             vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
605 
606          virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
607       } else
608          virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
609 
610       virgl_attach_res_vertex_buffers(vctx);
611 
612       vctx->vertex_array_dirty = FALSE;
613    }
614 }
615 
virgl_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref ref)616 static void virgl_set_stencil_ref(struct pipe_context *ctx,
617                                  const struct pipe_stencil_ref ref)
618 {
619    struct virgl_context *vctx = virgl_context(ctx);
620    virgl_encoder_set_stencil_ref(vctx, &ref);
621 }
622 
virgl_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * color)623 static void virgl_set_blend_color(struct pipe_context *ctx,
624                                  const struct pipe_blend_color *color)
625 {
626    struct virgl_context *vctx = virgl_context(ctx);
627    virgl_encoder_set_blend_color(vctx, color);
628 }
629 
virgl_hw_set_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)630 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
631                                      struct virgl_indexbuf *ib)
632 {
633    virgl_encoder_set_index_buffer(vctx, ib);
634    virgl_attach_res_index_buffer(vctx, ib);
635 }
636 
virgl_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * buf)637 static void virgl_set_constant_buffer(struct pipe_context *ctx,
638                                      enum pipe_shader_type shader, uint index,
639                                       bool take_ownership,
640                                      const struct pipe_constant_buffer *buf)
641 {
642    struct virgl_context *vctx = virgl_context(ctx);
643    struct virgl_shader_binding_state *binding =
644       &vctx->shader_bindings[shader];
645 
646    if (buf && buf->buffer) {
647       struct virgl_resource *res = virgl_resource(buf->buffer);
648       res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
649 
650       virgl_encoder_set_uniform_buffer(vctx, shader, index,
651                                        buf->buffer_offset,
652                                        buf->buffer_size, res);
653 
654       if (take_ownership) {
655          pipe_resource_reference(&binding->ubos[index].buffer, NULL);
656          binding->ubos[index].buffer = buf->buffer;
657       } else {
658          pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
659       }
660       binding->ubos[index] = *buf;
661       binding->ubo_enabled_mask |= 1 << index;
662    } else {
663       static const struct pipe_constant_buffer dummy_ubo;
664       if (!buf)
665          buf = &dummy_ubo;
666       virgl_encoder_write_constant_buffer(vctx, shader, index,
667                                           buf->buffer_size / 4,
668                                           buf->user_buffer);
669 
670       pipe_resource_reference(&binding->ubos[index].buffer, NULL);
671       binding->ubo_enabled_mask &= ~(1 << index);
672    }
673 }
674 
virgl_shader_encoder(struct pipe_context * ctx,const struct pipe_shader_state * shader,unsigned type)675 static void *virgl_shader_encoder(struct pipe_context *ctx,
676                                   const struct pipe_shader_state *shader,
677                                   unsigned type)
678 {
679    struct virgl_context *vctx = virgl_context(ctx);
680    uint32_t handle;
681    const struct tgsi_token *tokens;
682    const struct tgsi_token *ntt_tokens = NULL;
683    struct tgsi_token *new_tokens;
684    int ret;
685    bool is_separable = false;
686 
687    if (shader->type == PIPE_SHADER_IR_NIR) {
688       struct nir_to_tgsi_options options = {
689          .unoptimized_ra = true,
690          .lower_fabs = true
691       };
692       nir_shader *s = nir_shader_clone(NULL, shader->ir.nir);
693 
694       /* Propagare the separable shader property to the host, unless
695        * it is an internal shader - these are marked separable even though they are not. */
696       is_separable = s->info.separate_shader && !s->info.internal;
697       ntt_tokens = tokens = nir_to_tgsi_options(s, vctx->base.screen, &options); /* takes ownership */
698    } else {
699       tokens = shader->tokens;
700    }
701 
702    new_tokens = virgl_tgsi_transform((struct virgl_screen *)vctx->base.screen, tokens, is_separable);
703    if (!new_tokens)
704       return NULL;
705 
706    handle = virgl_object_assign_handle();
707    /* encode VS state */
708    ret = virgl_encode_shader_state(vctx, handle, type,
709                                    &shader->stream_output, 0,
710                                    new_tokens);
711    if (ret) {
712       FREE((void *)ntt_tokens);
713       return NULL;
714    }
715 
716    FREE((void *)ntt_tokens);
717    FREE(new_tokens);
718    return (void *)(unsigned long)handle;
719 
720 }
virgl_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)721 static void *virgl_create_vs_state(struct pipe_context *ctx,
722                                    const struct pipe_shader_state *shader)
723 {
724    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
725 }
726 
virgl_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)727 static void *virgl_create_tcs_state(struct pipe_context *ctx,
728                                    const struct pipe_shader_state *shader)
729 {
730    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
731 }
732 
virgl_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)733 static void *virgl_create_tes_state(struct pipe_context *ctx,
734                                    const struct pipe_shader_state *shader)
735 {
736    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
737 }
738 
virgl_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)739 static void *virgl_create_gs_state(struct pipe_context *ctx,
740                                    const struct pipe_shader_state *shader)
741 {
742    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
743 }
744 
virgl_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)745 static void *virgl_create_fs_state(struct pipe_context *ctx,
746                                    const struct pipe_shader_state *shader)
747 {
748    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
749 }
750 
751 static void
virgl_delete_fs_state(struct pipe_context * ctx,void * fs)752 virgl_delete_fs_state(struct pipe_context *ctx,
753                      void *fs)
754 {
755    uint32_t handle = (unsigned long)fs;
756    struct virgl_context *vctx = virgl_context(ctx);
757 
758    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
759 }
760 
761 static void
virgl_delete_gs_state(struct pipe_context * ctx,void * gs)762 virgl_delete_gs_state(struct pipe_context *ctx,
763                      void *gs)
764 {
765    uint32_t handle = (unsigned long)gs;
766    struct virgl_context *vctx = virgl_context(ctx);
767 
768    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
769 }
770 
771 static void
virgl_delete_vs_state(struct pipe_context * ctx,void * vs)772 virgl_delete_vs_state(struct pipe_context *ctx,
773                      void *vs)
774 {
775    uint32_t handle = (unsigned long)vs;
776    struct virgl_context *vctx = virgl_context(ctx);
777 
778    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
779 }
780 
781 static void
virgl_delete_tcs_state(struct pipe_context * ctx,void * tcs)782 virgl_delete_tcs_state(struct pipe_context *ctx,
783                        void *tcs)
784 {
785    uint32_t handle = (unsigned long)tcs;
786    struct virgl_context *vctx = virgl_context(ctx);
787 
788    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
789 }
790 
791 static void
virgl_delete_tes_state(struct pipe_context * ctx,void * tes)792 virgl_delete_tes_state(struct pipe_context *ctx,
793                       void *tes)
794 {
795    uint32_t handle = (unsigned long)tes;
796    struct virgl_context *vctx = virgl_context(ctx);
797 
798    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
799 }
800 
virgl_bind_vs_state(struct pipe_context * ctx,void * vss)801 static void virgl_bind_vs_state(struct pipe_context *ctx,
802                                         void *vss)
803 {
804    uint32_t handle = (unsigned long)vss;
805    struct virgl_context *vctx = virgl_context(ctx);
806 
807    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
808 }
809 
virgl_bind_tcs_state(struct pipe_context * ctx,void * vss)810 static void virgl_bind_tcs_state(struct pipe_context *ctx,
811                                void *vss)
812 {
813    uint32_t handle = (unsigned long)vss;
814    struct virgl_context *vctx = virgl_context(ctx);
815 
816    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
817 }
818 
virgl_bind_tes_state(struct pipe_context * ctx,void * vss)819 static void virgl_bind_tes_state(struct pipe_context *ctx,
820                                void *vss)
821 {
822    uint32_t handle = (unsigned long)vss;
823    struct virgl_context *vctx = virgl_context(ctx);
824 
825    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
826 }
827 
virgl_bind_gs_state(struct pipe_context * ctx,void * vss)828 static void virgl_bind_gs_state(struct pipe_context *ctx,
829                                void *vss)
830 {
831    uint32_t handle = (unsigned long)vss;
832    struct virgl_context *vctx = virgl_context(ctx);
833 
834    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
835 }
836 
837 
virgl_bind_fs_state(struct pipe_context * ctx,void * vss)838 static void virgl_bind_fs_state(struct pipe_context *ctx,
839                                         void *vss)
840 {
841    uint32_t handle = (unsigned long)vss;
842    struct virgl_context *vctx = virgl_context(ctx);
843 
844    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
845 }
846 
virgl_clear(struct pipe_context * ctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)847 static void virgl_clear(struct pipe_context *ctx,
848                                 unsigned buffers,
849                                 const struct pipe_scissor_state *scissor_state,
850                                 const union pipe_color_union *color,
851                                 double depth, unsigned stencil)
852 {
853    struct virgl_context *vctx = virgl_context(ctx);
854 
855    if (!vctx->num_draws)
856       virgl_reemit_draw_resources(vctx);
857    vctx->num_draws++;
858 
859    virgl_encode_clear(vctx, buffers, color, depth, stencil);
860 }
861 
virgl_clear_texture(struct pipe_context * ctx,struct pipe_resource * res,unsigned int level,const struct pipe_box * box,const void * data)862 static void virgl_clear_texture(struct pipe_context *ctx,
863                                 struct pipe_resource *res,
864                                 unsigned int level,
865                                 const struct pipe_box *box,
866                                 const void *data)
867 {
868    struct virgl_context *vctx = virgl_context(ctx);
869    struct virgl_resource *vres = virgl_resource(res);
870 
871    virgl_encode_clear_texture(vctx, vres, level, box, data);
872 
873    /* Mark as dirty, since we are updating the host side resource
874     * without going through the corresponding guest side resource, and
875     * hence the two will diverge.
876     */
877    virgl_resource_dirty(vres, level);
878 }
879 
virgl_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * dinfo,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)880 static void virgl_draw_vbo(struct pipe_context *ctx,
881                            const struct pipe_draw_info *dinfo,
882                            unsigned drawid_offset,
883                            const struct pipe_draw_indirect_info *indirect,
884                            const struct pipe_draw_start_count_bias *draws,
885                            unsigned num_draws)
886 {
887    if (num_draws > 1) {
888       util_draw_multi(ctx, dinfo, drawid_offset, indirect, draws, num_draws);
889       return;
890    }
891 
892    if (!indirect && (!draws[0].count || !dinfo->instance_count))
893       return;
894 
895    struct virgl_context *vctx = virgl_context(ctx);
896    struct virgl_screen *rs = virgl_screen(ctx->screen);
897    struct virgl_indexbuf ib = {};
898    struct pipe_draw_info info = *dinfo;
899 
900    if (!indirect &&
901        !dinfo->primitive_restart &&
902        !u_trim_pipe_prim(dinfo->mode, (unsigned*)&draws[0].count))
903       return;
904 
905    if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
906       util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
907       util_primconvert_draw_vbo(vctx->primconvert, dinfo, drawid_offset, indirect, draws, num_draws);
908       return;
909    }
910    if (info.index_size) {
911            pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
912            ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
913            ib.index_size = dinfo->index_size;
914            ib.offset = draws[0].start * ib.index_size;
915 
916            if (ib.user_buffer) {
917                    unsigned start_offset = draws[0].start * ib.index_size;
918                    u_upload_data(vctx->uploader, start_offset,
919                                  draws[0].count * ib.index_size, 4,
920                                  (char*)ib.user_buffer + start_offset,
921                                  &ib.offset, &ib.buffer);
922                    ib.offset -= start_offset;
923                    ib.user_buffer = NULL;
924            }
925    }
926 
927    if (!vctx->num_draws)
928       virgl_reemit_draw_resources(vctx);
929    vctx->num_draws++;
930 
931    virgl_hw_set_vertex_buffers(vctx);
932    if (info.index_size)
933       virgl_hw_set_index_buffer(vctx, &ib);
934 
935    virgl_encoder_draw_vbo(vctx, &info, drawid_offset, indirect, &draws[0]);
936 
937    pipe_resource_reference(&ib.buffer, NULL);
938 
939 }
940 
virgl_submit_cmd(struct virgl_winsys * vws,struct virgl_cmd_buf * cbuf,struct pipe_fence_handle ** fence)941 static void virgl_submit_cmd(struct virgl_winsys *vws,
942                              struct virgl_cmd_buf *cbuf,
943 			     struct pipe_fence_handle **fence)
944 {
945    if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
946       struct pipe_fence_handle *sync_fence = NULL;
947 
948       vws->submit_cmd(vws, cbuf, &sync_fence);
949 
950       vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
951       vws->fence_reference(vws, &sync_fence, NULL);
952    } else {
953       vws->submit_cmd(vws, cbuf, fence);
954    }
955 }
956 
virgl_flush_eq(struct virgl_context * ctx,void * closure,struct pipe_fence_handle ** fence)957 void virgl_flush_eq(struct virgl_context *ctx, void *closure,
958                     struct pipe_fence_handle **fence)
959 {
960    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
961 
962    /* skip empty cbuf */
963    if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
964        ctx->queue.num_dwords == 0 &&
965        !fence)
966       return;
967 
968    if (ctx->num_draws)
969       u_upload_unmap(ctx->uploader);
970 
971    /* send the buffer to the remote side for decoding */
972    ctx->num_draws = ctx->num_compute = 0;
973 
974    virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
975 
976    virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
977 
978    /* Reserve some space for transfers. */
979    if (ctx->encoded_transfers)
980       ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
981 
982    virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
983 
984    ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
985 
986    /* We have flushed the command queue, including any pending copy transfers
987     * involving staging resources.
988     */
989    ctx->queued_staging_res_size = 0;
990 }
991 
virgl_flush_from_st(struct pipe_context * ctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)992 static void virgl_flush_from_st(struct pipe_context *ctx,
993                                struct pipe_fence_handle **fence,
994                                enum pipe_flush_flags flags)
995 {
996    struct virgl_context *vctx = virgl_context(ctx);
997 
998    virgl_flush_eq(vctx, vctx, fence);
999 }
1000 
virgl_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * texture,const struct pipe_sampler_view * state)1001 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
1002                                       struct pipe_resource *texture,
1003                                       const struct pipe_sampler_view *state)
1004 {
1005    struct virgl_context *vctx = virgl_context(ctx);
1006    struct virgl_sampler_view *grview;
1007    uint32_t handle;
1008    struct virgl_resource *res;
1009 
1010    if (!state)
1011       return NULL;
1012 
1013    grview = CALLOC_STRUCT(virgl_sampler_view);
1014    if (!grview)
1015       return NULL;
1016 
1017    res = virgl_resource(texture);
1018    handle = virgl_object_assign_handle();
1019    virgl_encode_sampler_view(vctx, handle, res, state);
1020 
1021    grview->base = *state;
1022    grview->base.reference.count = 1;
1023 
1024    grview->base.texture = NULL;
1025    grview->base.context = ctx;
1026    pipe_resource_reference(&grview->base.texture, texture);
1027    grview->handle = handle;
1028    return &grview->base;
1029 }
1030 
virgl_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type shader_type,unsigned start_slot,unsigned num_views,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)1031 static void virgl_set_sampler_views(struct pipe_context *ctx,
1032                                    enum pipe_shader_type shader_type,
1033                                    unsigned start_slot,
1034                                    unsigned num_views,
1035                                    unsigned unbind_num_trailing_slots,
1036                                    bool take_ownership,
1037                                    struct pipe_sampler_view **views)
1038 {
1039    struct virgl_context *vctx = virgl_context(ctx);
1040    struct virgl_shader_binding_state *binding =
1041       &vctx->shader_bindings[shader_type];
1042 
1043    for (unsigned i = 0; i < num_views; i++) {
1044       unsigned idx = start_slot + i;
1045       if (views && views[i]) {
1046          struct virgl_resource *res = virgl_resource(views[i]->texture);
1047          res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
1048 
1049          if (take_ownership) {
1050             pipe_sampler_view_reference(&binding->views[idx], NULL);
1051             binding->views[idx] = views[i];
1052          } else {
1053             pipe_sampler_view_reference(&binding->views[idx], views[i]);
1054          }
1055       } else {
1056          pipe_sampler_view_reference(&binding->views[idx], NULL);
1057       }
1058    }
1059 
1060    virgl_encode_set_sampler_views(vctx, shader_type,
1061          start_slot, num_views, (struct virgl_sampler_view **)binding->views);
1062    virgl_attach_res_sampler_views(vctx, shader_type);
1063 
1064    if (unbind_num_trailing_slots) {
1065       virgl_set_sampler_views(ctx, shader_type, start_slot + num_views,
1066                               unbind_num_trailing_slots, 0, false, NULL);
1067    }
1068 }
1069 
1070 static void
virgl_texture_barrier(struct pipe_context * ctx,unsigned flags)1071 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
1072 {
1073    struct virgl_context *vctx = virgl_context(ctx);
1074    struct virgl_screen *rs = virgl_screen(ctx->screen);
1075 
1076    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER) &&
1077        !(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_BLEND_EQUATION))
1078       return;
1079    virgl_encode_texture_barrier(vctx, flags);
1080 }
1081 
virgl_destroy_sampler_view(struct pipe_context * ctx,struct pipe_sampler_view * view)1082 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1083                                  struct pipe_sampler_view *view)
1084 {
1085    struct virgl_context *vctx = virgl_context(ctx);
1086    struct virgl_sampler_view *grview = virgl_sampler_view(view);
1087 
1088    virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1089    pipe_resource_reference(&view->texture, NULL);
1090    FREE(view);
1091 }
1092 
virgl_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)1093 static void *virgl_create_sampler_state(struct pipe_context *ctx,
1094                                         const struct pipe_sampler_state *state)
1095 {
1096    struct virgl_context *vctx = virgl_context(ctx);
1097    uint32_t handle;
1098 
1099    handle = virgl_object_assign_handle();
1100 
1101    virgl_encode_sampler_state(vctx, handle, state);
1102    return (void *)(unsigned long)handle;
1103 }
1104 
virgl_delete_sampler_state(struct pipe_context * ctx,void * ss)1105 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1106                                       void *ss)
1107 {
1108    struct virgl_context *vctx = virgl_context(ctx);
1109    uint32_t handle = (unsigned long)ss;
1110 
1111    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1112 }
1113 
virgl_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)1114 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1115                                      enum pipe_shader_type shader,
1116                                      unsigned start_slot,
1117                                      unsigned num_samplers,
1118                                      void **samplers)
1119 {
1120    struct virgl_context *vctx = virgl_context(ctx);
1121    uint32_t handles[32];
1122    int i;
1123    for (i = 0; i < num_samplers; i++) {
1124       handles[i] = (unsigned long)(samplers[i]);
1125    }
1126    virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1127 }
1128 
virgl_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * ps)1129 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1130                                      const struct pipe_poly_stipple *ps)
1131 {
1132    struct virgl_context *vctx = virgl_context(ctx);
1133    virgl_encoder_set_polygon_stipple(vctx, ps);
1134 }
1135 
virgl_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissor,const struct pipe_scissor_state * ss)1136 static void virgl_set_scissor_states(struct pipe_context *ctx,
1137                                     unsigned start_slot,
1138                                     unsigned num_scissor,
1139                                    const struct pipe_scissor_state *ss)
1140 {
1141    struct virgl_context *vctx = virgl_context(ctx);
1142    virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1143 }
1144 
virgl_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)1145 static void virgl_set_sample_mask(struct pipe_context *ctx,
1146                                  unsigned sample_mask)
1147 {
1148    struct virgl_context *vctx = virgl_context(ctx);
1149    virgl_encoder_set_sample_mask(vctx, sample_mask);
1150 }
1151 
virgl_set_min_samples(struct pipe_context * ctx,unsigned min_samples)1152 static void virgl_set_min_samples(struct pipe_context *ctx,
1153                                  unsigned min_samples)
1154 {
1155    struct virgl_context *vctx = virgl_context(ctx);
1156    struct virgl_screen *rs = virgl_screen(ctx->screen);
1157 
1158    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1159       return;
1160    virgl_encoder_set_min_samples(vctx, min_samples);
1161 }
1162 
virgl_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * clip)1163 static void virgl_set_clip_state(struct pipe_context *ctx,
1164                                 const struct pipe_clip_state *clip)
1165 {
1166    struct virgl_context *vctx = virgl_context(ctx);
1167    virgl_encoder_set_clip_state(vctx, clip);
1168 }
1169 
virgl_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])1170 static void virgl_set_tess_state(struct pipe_context *ctx,
1171                                  const float default_outer_level[4],
1172                                  const float default_inner_level[2])
1173 {
1174    struct virgl_context *vctx = virgl_context(ctx);
1175    struct virgl_screen *rs = virgl_screen(ctx->screen);
1176 
1177    if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1178       return;
1179    virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1180 }
1181 
virgl_set_patch_vertices(struct pipe_context * ctx,uint8_t patch_vertices)1182 static void virgl_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
1183 {
1184    struct virgl_context *vctx = virgl_context(ctx);
1185 
1186    vctx->patch_vertices = patch_vertices;
1187 }
1188 
virgl_resource_copy_region(struct pipe_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1189 static void virgl_resource_copy_region(struct pipe_context *ctx,
1190                                       struct pipe_resource *dst,
1191                                       unsigned dst_level,
1192                                       unsigned dstx, unsigned dsty, unsigned dstz,
1193                                       struct pipe_resource *src,
1194                                       unsigned src_level,
1195                                       const struct pipe_box *src_box)
1196 {
1197    struct virgl_context *vctx = virgl_context(ctx);
1198    struct virgl_resource *dres = virgl_resource(dst);
1199    struct virgl_resource *sres = virgl_resource(src);
1200 
1201    if (dres->b.target == PIPE_BUFFER)
1202       util_range_add(&dres->b, &dres->valid_buffer_range, dstx, dstx + src_box->width);
1203    virgl_resource_dirty(dres, dst_level);
1204 
1205    virgl_encode_resource_copy_region(vctx, dres,
1206                                     dst_level, dstx, dsty, dstz,
1207                                     sres, src_level,
1208                                     src_box);
1209 }
1210 
1211 static void
virgl_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)1212 virgl_flush_resource(struct pipe_context *pipe,
1213                     struct pipe_resource *resource)
1214 {
1215 }
1216 
virgl_blit(struct pipe_context * ctx,const struct pipe_blit_info * blit)1217 static void virgl_blit(struct pipe_context *ctx,
1218                       const struct pipe_blit_info *blit)
1219 {
1220    struct virgl_context *vctx = virgl_context(ctx);
1221    struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1222    struct virgl_resource *sres = virgl_resource(blit->src.resource);
1223 
1224    assert(ctx->screen->get_param(ctx->screen,
1225                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1226           (util_format_is_srgb(blit->dst.resource->format) ==
1227             util_format_is_srgb(blit->dst.format)));
1228 
1229    virgl_resource_dirty(dres, blit->dst.level);
1230    virgl_encode_blit(vctx, dres, sres,
1231                     blit);
1232 }
1233 
virgl_set_hw_atomic_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1234 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1235                                         unsigned start_slot,
1236                                         unsigned count,
1237                                         const struct pipe_shader_buffer *buffers)
1238 {
1239    struct virgl_context *vctx = virgl_context(ctx);
1240 
1241    vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1242    for (unsigned i = 0; i < count; i++) {
1243       unsigned idx = start_slot + i;
1244       if (buffers && buffers[i].buffer) {
1245          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1246          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1247 
1248          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1249                                  buffers[i].buffer);
1250          vctx->atomic_buffers[idx] = buffers[i];
1251          vctx->atomic_buffer_enabled_mask |= 1 << idx;
1252       } else {
1253          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1254       }
1255    }
1256 
1257    virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1258 }
1259 
virgl_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)1260 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1261                                      enum pipe_shader_type shader,
1262                                      unsigned start_slot, unsigned count,
1263                                      const struct pipe_shader_buffer *buffers,
1264                                      unsigned writable_bitmask)
1265 {
1266    struct virgl_context *vctx = virgl_context(ctx);
1267    struct virgl_screen *rs = virgl_screen(ctx->screen);
1268    struct virgl_shader_binding_state *binding =
1269       &vctx->shader_bindings[shader];
1270 
1271    binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1272    for (unsigned i = 0; i < count; i++) {
1273       unsigned idx = start_slot + i;
1274       if (buffers && buffers[i].buffer) {
1275          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1276          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1277 
1278          pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1279          binding->ssbos[idx] = buffers[i];
1280          binding->ssbo_enabled_mask |= 1 << idx;
1281       } else {
1282          pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1283       }
1284    }
1285 
1286    uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1287       rs->caps.caps.v2.max_shader_buffer_frag_compute :
1288       rs->caps.caps.v2.max_shader_buffer_other_stages;
1289    if (!max_shader_buffer)
1290       return;
1291    virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1292 }
1293 
virgl_create_fence_fd(struct pipe_context * ctx,struct pipe_fence_handle ** fence,int fd,enum pipe_fd_type type)1294 static void virgl_create_fence_fd(struct pipe_context *ctx,
1295                                   struct pipe_fence_handle **fence,
1296                                   int fd,
1297                                   enum pipe_fd_type type)
1298 {
1299    assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1300    struct virgl_screen *rs = virgl_screen(ctx->screen);
1301 
1302    if (rs->vws->cs_create_fence)
1303       *fence = rs->vws->cs_create_fence(rs->vws, fd);
1304 }
1305 
virgl_fence_server_sync(struct pipe_context * ctx,struct pipe_fence_handle * fence)1306 static void virgl_fence_server_sync(struct pipe_context *ctx,
1307 			            struct pipe_fence_handle *fence)
1308 {
1309    struct virgl_context *vctx = virgl_context(ctx);
1310    struct virgl_screen *rs = virgl_screen(ctx->screen);
1311 
1312    if (rs->vws->fence_server_sync)
1313       rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1314 }
1315 
virgl_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)1316 static void virgl_set_shader_images(struct pipe_context *ctx,
1317                                     enum pipe_shader_type shader,
1318                                     unsigned start_slot, unsigned count,
1319                                     unsigned unbind_num_trailing_slots,
1320                                     const struct pipe_image_view *images)
1321 {
1322    struct virgl_context *vctx = virgl_context(ctx);
1323    struct virgl_screen *rs = virgl_screen(ctx->screen);
1324    struct virgl_shader_binding_state *binding =
1325       &vctx->shader_bindings[shader];
1326 
1327    binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1328    for (unsigned i = 0; i < count; i++) {
1329       unsigned idx = start_slot + i;
1330       if (images && images[i].resource) {
1331          struct virgl_resource *res = virgl_resource(images[i].resource);
1332          res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1333 
1334          pipe_resource_reference(&binding->images[idx].resource,
1335                                  images[i].resource);
1336          binding->images[idx] = images[i];
1337          binding->image_enabled_mask |= 1 << idx;
1338       } else {
1339          pipe_resource_reference(&binding->images[idx].resource, NULL);
1340       }
1341    }
1342 
1343    uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1344      rs->caps.caps.v2.max_shader_image_frag_compute :
1345      rs->caps.caps.v2.max_shader_image_other_stages;
1346    if (!max_shader_images)
1347       return;
1348    virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1349 
1350    if (unbind_num_trailing_slots) {
1351       virgl_set_shader_images(ctx, shader, start_slot + count,
1352                               unbind_num_trailing_slots, 0, NULL);
1353    }
1354 }
1355 
virgl_memory_barrier(struct pipe_context * ctx,unsigned flags)1356 static void virgl_memory_barrier(struct pipe_context *ctx,
1357                                  unsigned flags)
1358 {
1359    struct virgl_context *vctx = virgl_context(ctx);
1360    struct virgl_screen *rs = virgl_screen(ctx->screen);
1361 
1362    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1363       return;
1364    virgl_encode_memory_barrier(vctx, flags);
1365 }
1366 
virgl_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)1367 static void *virgl_create_compute_state(struct pipe_context *ctx,
1368                                         const struct pipe_compute_state *state)
1369 {
1370    struct virgl_context *vctx = virgl_context(ctx);
1371    uint32_t handle;
1372    const struct tgsi_token *ntt_tokens = NULL;
1373    const struct tgsi_token *tokens;
1374    struct pipe_stream_output_info so_info = {};
1375    int ret;
1376 
1377    if (state->ir_type == PIPE_SHADER_IR_NIR) {
1378       struct nir_to_tgsi_options options = {
1379          .unoptimized_ra = true,
1380          .lower_fabs = true
1381       };
1382       nir_shader *s = nir_shader_clone(NULL, state->prog);
1383       ntt_tokens = tokens = nir_to_tgsi_options(s, vctx->base.screen, &options); /* takes ownership */
1384    } else {
1385       tokens = state->prog;
1386    }
1387 
1388    void *new_tokens = virgl_tgsi_transform((struct virgl_screen *)vctx->base.screen, tokens, false);
1389    if (!new_tokens)
1390       return NULL;
1391 
1392    handle = virgl_object_assign_handle();
1393    ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1394                                    &so_info,
1395                                    state->req_local_mem,
1396                                    new_tokens);
1397    if (ret) {
1398       FREE((void *)ntt_tokens);
1399       return NULL;
1400    }
1401 
1402    FREE((void *)ntt_tokens);
1403    FREE(new_tokens);
1404 
1405    return (void *)(unsigned long)handle;
1406 }
1407 
virgl_bind_compute_state(struct pipe_context * ctx,void * state)1408 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1409 {
1410    uint32_t handle = (unsigned long)state;
1411    struct virgl_context *vctx = virgl_context(ctx);
1412 
1413    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1414 }
1415 
virgl_delete_compute_state(struct pipe_context * ctx,void * state)1416 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1417 {
1418    uint32_t handle = (unsigned long)state;
1419    struct virgl_context *vctx = virgl_context(ctx);
1420 
1421    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1422 }
1423 
virgl_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * info)1424 static void virgl_launch_grid(struct pipe_context *ctx,
1425                               const struct pipe_grid_info *info)
1426 {
1427    struct virgl_context *vctx = virgl_context(ctx);
1428 
1429    if (!vctx->num_compute)
1430       virgl_reemit_compute_resources(vctx);
1431    vctx->num_compute++;
1432 
1433    virgl_encode_launch_grid(vctx, info);
1434 }
1435 
1436 static void
virgl_release_shader_binding(struct virgl_context * vctx,enum pipe_shader_type shader_type)1437 virgl_release_shader_binding(struct virgl_context *vctx,
1438                              enum pipe_shader_type shader_type)
1439 {
1440    struct virgl_shader_binding_state *binding =
1441       &vctx->shader_bindings[shader_type];
1442 
1443    for (int i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i) {
1444       if (binding->views[i]) {
1445          pipe_sampler_view_reference(
1446                   (struct pipe_sampler_view **)&binding->views[i], NULL);
1447       }
1448    }
1449 
1450    while (binding->ubo_enabled_mask) {
1451       int i = u_bit_scan(&binding->ubo_enabled_mask);
1452       pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1453    }
1454 
1455    while (binding->ssbo_enabled_mask) {
1456       int i = u_bit_scan(&binding->ssbo_enabled_mask);
1457       pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1458    }
1459 
1460    while (binding->image_enabled_mask) {
1461       int i = u_bit_scan(&binding->image_enabled_mask);
1462       pipe_resource_reference(&binding->images[i].resource, NULL);
1463    }
1464 }
1465 
1466 static void
virgl_emit_string_marker(struct pipe_context * ctx,const char * message,int len)1467 virgl_emit_string_marker(struct pipe_context *ctx, const char *message,  int len)
1468 {
1469     struct virgl_context *vctx = virgl_context(ctx);
1470     virgl_encode_emit_string_marker(vctx, message, len);
1471 }
1472 
1473 static void
virgl_context_destroy(struct pipe_context * ctx)1474 virgl_context_destroy( struct pipe_context *ctx )
1475 {
1476    struct virgl_context *vctx = virgl_context(ctx);
1477    struct virgl_screen *rs = virgl_screen(ctx->screen);
1478    enum pipe_shader_type shader_type;
1479 
1480    vctx->framebuffer.zsbuf = NULL;
1481    vctx->framebuffer.nr_cbufs = 0;
1482    virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1483    virgl_flush_eq(vctx, vctx, NULL);
1484 
1485    for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1486       virgl_release_shader_binding(vctx, shader_type);
1487 
1488    while (vctx->atomic_buffer_enabled_mask) {
1489       int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1490       pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1491    }
1492 
1493    rs->vws->cmd_buf_destroy(vctx->cbuf);
1494    if (vctx->uploader)
1495       u_upload_destroy(vctx->uploader);
1496    if (vctx->supports_staging)
1497       virgl_staging_destroy(&vctx->staging);
1498    util_primconvert_destroy(vctx->primconvert);
1499    virgl_transfer_queue_fini(&vctx->queue);
1500 
1501    slab_destroy_child(&vctx->transfer_pool);
1502    FREE(vctx);
1503 }
1504 
virgl_get_sample_position(struct pipe_context * ctx,unsigned sample_count,unsigned index,float * out_value)1505 static void virgl_get_sample_position(struct pipe_context *ctx,
1506 				      unsigned sample_count,
1507 				      unsigned index,
1508 				      float *out_value)
1509 {
1510    struct virgl_context *vctx = virgl_context(ctx);
1511    struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1512 
1513    if (sample_count > vs->caps.caps.v1.max_samples) {
1514       debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1515 		   sample_count, vs->caps.caps.v1.max_samples);
1516       return;
1517    }
1518 
1519    /* The following is basically copied from dri/i965gen6_get_sample_position
1520     * The only addition is that we hold the msaa positions for all sample
1521     * counts in a flat array. */
1522    uint32_t bits = 0;
1523    if (sample_count == 1) {
1524       out_value[0] = out_value[1] = 0.5f;
1525       return;
1526    } else if (sample_count == 2) {
1527       bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1528    } else if (sample_count <= 4) {
1529       bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1530    } else if (sample_count <= 8) {
1531       bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1532    } else if (sample_count <= 16) {
1533       bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1534    }
1535    out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1536    out_value[1] = (bits & 0xf) / 16.0f;
1537 
1538    if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1539       debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1540                    index, sample_count, out_value[0], out_value[1]);
1541 }
1542 
virgl_send_tweaks(struct virgl_context * vctx,struct virgl_screen * rs)1543 static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1544 {
1545    if (rs->tweak_gles_emulate_bgra)
1546       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1547 
1548    if (rs->tweak_gles_apply_bgra_dest_swizzle)
1549       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1550 
1551    if (rs->tweak_gles_tf3_value > 0)
1552       virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1553                          rs->tweak_gles_tf3_value);
1554 }
1555 
virgl_link_shader(struct pipe_context * ctx,void ** handles)1556 static void virgl_link_shader(struct pipe_context *ctx, void **handles)
1557 {
1558    struct virgl_context *vctx = virgl_context(ctx);
1559    uint32_t shader_handles[PIPE_SHADER_TYPES];
1560    for (uint32_t i = 0; i < PIPE_SHADER_TYPES; ++i)
1561       shader_handles[i] = (uintptr_t)handles[i];
1562    virgl_encode_link_shader(vctx, shader_handles);
1563 }
1564 
virgl_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)1565 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1566                                           void *priv,
1567                                           unsigned flags)
1568 {
1569    struct virgl_context *vctx;
1570    struct virgl_screen *rs = virgl_screen(pscreen);
1571    vctx = CALLOC_STRUCT(virgl_context);
1572    const char *host_debug_flagstring;
1573 
1574    vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1575    if (!vctx->cbuf) {
1576       FREE(vctx);
1577       return NULL;
1578    }
1579 
1580    vctx->base.destroy = virgl_context_destroy;
1581    vctx->base.create_surface = virgl_create_surface;
1582    vctx->base.surface_destroy = virgl_surface_destroy;
1583    vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1584    vctx->base.create_blend_state = virgl_create_blend_state;
1585    vctx->base.bind_blend_state = virgl_bind_blend_state;
1586    vctx->base.delete_blend_state = virgl_delete_blend_state;
1587    vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1588    vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1589    vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1590    vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1591    vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1592    vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1593 
1594    vctx->base.set_viewport_states = virgl_set_viewport_states;
1595    vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1596    vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1597    vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1598    vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1599    vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1600 
1601    vctx->base.set_tess_state = virgl_set_tess_state;
1602    vctx->base.set_patch_vertices = virgl_set_patch_vertices;
1603    vctx->base.create_vs_state = virgl_create_vs_state;
1604    vctx->base.create_tcs_state = virgl_create_tcs_state;
1605    vctx->base.create_tes_state = virgl_create_tes_state;
1606    vctx->base.create_gs_state = virgl_create_gs_state;
1607    vctx->base.create_fs_state = virgl_create_fs_state;
1608 
1609    vctx->base.bind_vs_state = virgl_bind_vs_state;
1610    vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1611    vctx->base.bind_tes_state = virgl_bind_tes_state;
1612    vctx->base.bind_gs_state = virgl_bind_gs_state;
1613    vctx->base.bind_fs_state = virgl_bind_fs_state;
1614 
1615    vctx->base.delete_vs_state = virgl_delete_vs_state;
1616    vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1617    vctx->base.delete_tes_state = virgl_delete_tes_state;
1618    vctx->base.delete_gs_state = virgl_delete_gs_state;
1619    vctx->base.delete_fs_state = virgl_delete_fs_state;
1620 
1621    vctx->base.create_compute_state = virgl_create_compute_state;
1622    vctx->base.bind_compute_state = virgl_bind_compute_state;
1623    vctx->base.delete_compute_state = virgl_delete_compute_state;
1624    vctx->base.launch_grid = virgl_launch_grid;
1625 
1626    vctx->base.clear = virgl_clear;
1627    vctx->base.clear_texture = virgl_clear_texture;
1628    vctx->base.draw_vbo = virgl_draw_vbo;
1629    vctx->base.flush = virgl_flush_from_st;
1630    vctx->base.screen = pscreen;
1631    vctx->base.create_sampler_view = virgl_create_sampler_view;
1632    vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1633    vctx->base.set_sampler_views = virgl_set_sampler_views;
1634    vctx->base.texture_barrier = virgl_texture_barrier;
1635 
1636    vctx->base.create_sampler_state = virgl_create_sampler_state;
1637    vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1638    vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1639 
1640    vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1641    vctx->base.set_scissor_states = virgl_set_scissor_states;
1642    vctx->base.set_sample_mask = virgl_set_sample_mask;
1643    vctx->base.set_min_samples = virgl_set_min_samples;
1644    vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1645    vctx->base.set_clip_state = virgl_set_clip_state;
1646 
1647    vctx->base.set_blend_color = virgl_set_blend_color;
1648 
1649    vctx->base.get_sample_position = virgl_get_sample_position;
1650 
1651    vctx->base.resource_copy_region = virgl_resource_copy_region;
1652    vctx->base.flush_resource = virgl_flush_resource;
1653    vctx->base.blit =  virgl_blit;
1654    vctx->base.create_fence_fd = virgl_create_fence_fd;
1655    vctx->base.fence_server_sync = virgl_fence_server_sync;
1656 
1657    vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1658    vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1659    vctx->base.set_shader_images = virgl_set_shader_images;
1660    vctx->base.memory_barrier = virgl_memory_barrier;
1661    vctx->base.emit_string_marker = virgl_emit_string_marker;
1662 
1663    if (rs->caps.caps.v2.host_feature_check_version >= 7)
1664       vctx->base.link_shader = virgl_link_shader;
1665 
1666    virgl_init_context_resource_functions(&vctx->base);
1667    virgl_init_query_functions(vctx);
1668    virgl_init_so_functions(vctx);
1669 
1670    slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1671    virgl_transfer_queue_init(&vctx->queue, vctx);
1672    vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1673                        (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1674 
1675    /* Reserve some space for transfers. */
1676    if (vctx->encoded_transfers)
1677       vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1678 
1679    vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1680    vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1681                                      PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1682    if (!vctx->uploader)
1683            goto fail;
1684    vctx->base.stream_uploader = vctx->uploader;
1685    vctx->base.const_uploader = vctx->uploader;
1686 
1687    /* We use a special staging buffer as the source of copy transfers. */
1688    if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1689        vctx->encoded_transfers) {
1690       virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
1691       vctx->supports_staging = true;
1692    }
1693 
1694    vctx->hw_sub_ctx_id = p_atomic_inc_return(&rs->sub_ctx_id);
1695    virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1696 
1697    virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1698 
1699    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1700       host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1701       if (host_debug_flagstring)
1702          virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1703    }
1704 
1705    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1706       virgl_send_tweaks(vctx, rs);
1707 
1708    return &vctx->base;
1709 fail:
1710    virgl_context_destroy(&vctx->base);
1711    return NULL;
1712 }
1713