1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25 #ifndef _WIN32
26 #include <libsync.h>
27 #endif
28
29 #include "pipe/p_shader_tokens.h"
30
31 #include "compiler/nir/nir.h"
32 #include "pipe/p_context.h"
33 #include "pipe/p_defines.h"
34 #include "pipe/p_screen.h"
35 #include "pipe/p_state.h"
36 #include "nir/nir_to_tgsi.h"
37 #include "util/format/u_format.h"
38 #include "indices/u_primconvert.h"
39 #include "util/u_draw.h"
40 #include "util/u_inlines.h"
41 #include "util/u_memory.h"
42 #include "util/u_prim.h"
43 #include "util/u_surface.h"
44 #include "util/u_transfer.h"
45 #include "util/u_helpers.h"
46 #include "util/slab.h"
47 #include "util/u_upload_mgr.h"
48 #include "util/u_blitter.h"
49
50 #include "virgl_encode.h"
51 #include "virgl_context.h"
52 #include "virtio-gpu/virgl_protocol.h"
53 #include "virgl_resource.h"
54 #include "virgl_screen.h"
55 #include "virgl_staging_mgr.h"
56 #include "virgl_video.h"
57
58 static uint32_t next_handle;
virgl_object_assign_handle(void)59 uint32_t virgl_object_assign_handle(void)
60 {
61 return p_atomic_inc_return(&next_handle);
62 }
63
64 bool
virgl_can_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)65 virgl_can_rebind_resource(struct virgl_context *vctx,
66 struct pipe_resource *res)
67 {
68 /* We cannot rebind resources that are referenced by host objects, which
69 * are
70 *
71 * - VIRGL_OBJECT_SURFACE
72 * - VIRGL_OBJECT_SAMPLER_VIEW
73 * - VIRGL_OBJECT_STREAMOUT_TARGET
74 *
75 * Because surfaces cannot be created from buffers, we require the resource
76 * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
77 */
78 const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
79 PIPE_BIND_STREAM_OUTPUT);
80 const unsigned bind_history = virgl_resource(res)->bind_history;
81 return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
82 }
83
84 void
virgl_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)85 virgl_rebind_resource(struct virgl_context *vctx,
86 struct pipe_resource *res)
87 {
88 /* Queries use internally created buffers and do not go through transfers.
89 * Index buffers are not bindable. They are not tracked.
90 */
91 ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
92 PIPE_BIND_CONSTANT_BUFFER |
93 PIPE_BIND_SHADER_BUFFER |
94 PIPE_BIND_SHADER_IMAGE);
95 const unsigned bind_history = virgl_resource(res)->bind_history;
96 unsigned i;
97
98 assert(virgl_can_rebind_resource(vctx, res) &&
99 (bind_history & tracked_bind) == bind_history);
100
101 if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
102 for (i = 0; i < vctx->num_vertex_buffers; i++) {
103 if (vctx->vertex_buffer[i].buffer.resource == res) {
104 vctx->vertex_array_dirty = true;
105 break;
106 }
107 }
108 }
109
110 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
111 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
112 while (remaining_mask) {
113 int i = u_bit_scan(&remaining_mask);
114 if (vctx->atomic_buffers[i].buffer == res) {
115 const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
116 virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
117 }
118 }
119 }
120
121 /* check per-stage shader bindings */
122 if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
123 PIPE_BIND_SHADER_BUFFER |
124 PIPE_BIND_SHADER_IMAGE)) {
125 enum pipe_shader_type shader_type;
126 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
127 const struct virgl_shader_binding_state *binding =
128 &vctx->shader_bindings[shader_type];
129
130 if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
131 uint32_t remaining_mask = binding->ubo_enabled_mask;
132 while (remaining_mask) {
133 int i = u_bit_scan(&remaining_mask);
134 if (binding->ubos[i].buffer == res) {
135 const struct pipe_constant_buffer *ubo = &binding->ubos[i];
136 virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
137 ubo->buffer_offset,
138 ubo->buffer_size,
139 virgl_resource(res));
140 }
141 }
142 }
143
144 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
145 uint32_t remaining_mask = binding->ssbo_enabled_mask;
146 while (remaining_mask) {
147 int i = u_bit_scan(&remaining_mask);
148 if (binding->ssbos[i].buffer == res) {
149 const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
150 virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
151 ssbo);
152 }
153 }
154 }
155
156 if (bind_history & PIPE_BIND_SHADER_IMAGE) {
157 uint32_t remaining_mask = binding->image_enabled_mask;
158 while (remaining_mask) {
159 int i = u_bit_scan(&remaining_mask);
160 if (binding->images[i].resource == res) {
161 const struct pipe_image_view *image = &binding->images[i];
162 virgl_encode_set_shader_images(vctx, shader_type, i, 1,
163 image);
164 }
165 }
166 }
167 }
168 }
169 }
170
virgl_attach_res_framebuffer(struct virgl_context * vctx)171 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
172 {
173 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
174 struct pipe_surface *surf;
175 struct virgl_resource *res;
176 unsigned i;
177
178 surf = vctx->framebuffer.zsbuf;
179 if (surf) {
180 res = virgl_resource(surf->texture);
181 if (res) {
182 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
183 virgl_resource_dirty(res, surf->u.tex.level);
184 }
185 }
186 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
187 surf = vctx->framebuffer.cbufs[i];
188 if (surf) {
189 res = virgl_resource(surf->texture);
190 if (res) {
191 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
192 virgl_resource_dirty(res, surf->u.tex.level);
193 }
194 }
195 }
196 }
197
virgl_attach_res_sampler_views(struct virgl_context * vctx,enum pipe_shader_type shader_type)198 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
199 enum pipe_shader_type shader_type)
200 {
201 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
202 const struct virgl_shader_binding_state *binding =
203 &vctx->shader_bindings[shader_type];
204
205 for (int i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i) {
206 if (binding->views[i] && binding->views[i]->texture) {
207 struct virgl_resource *res = virgl_resource(binding->views[i]->texture);
208 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
209 }
210 }
211 }
212
virgl_attach_res_vertex_buffers(struct virgl_context * vctx)213 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
214 {
215 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
216 struct virgl_resource *res;
217 unsigned i;
218
219 for (i = 0; i < vctx->num_vertex_buffers; i++) {
220 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
221 if (res)
222 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
223 }
224 }
225
virgl_attach_res_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)226 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
227 struct virgl_indexbuf *ib)
228 {
229 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
230 struct virgl_resource *res;
231
232 res = virgl_resource(ib->buffer);
233 if (res)
234 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
235 }
236
virgl_attach_res_so_targets(struct virgl_context * vctx)237 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
238 {
239 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
240 struct virgl_resource *res;
241 unsigned i;
242
243 for (i = 0; i < vctx->num_so_targets; i++) {
244 res = virgl_resource(vctx->so_targets[i].base.buffer);
245 if (res)
246 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
247 }
248 }
249
virgl_attach_res_uniform_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)250 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
251 enum pipe_shader_type shader_type)
252 {
253 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
254 const struct virgl_shader_binding_state *binding =
255 &vctx->shader_bindings[shader_type];
256 uint32_t remaining_mask = binding->ubo_enabled_mask;
257 struct virgl_resource *res;
258
259 while (remaining_mask) {
260 int i = u_bit_scan(&remaining_mask);
261 res = virgl_resource(binding->ubos[i].buffer);
262 assert(res);
263 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
264 }
265 }
266
virgl_attach_res_shader_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)267 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
268 enum pipe_shader_type shader_type)
269 {
270 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
271 const struct virgl_shader_binding_state *binding =
272 &vctx->shader_bindings[shader_type];
273 uint32_t remaining_mask = binding->ssbo_enabled_mask;
274 struct virgl_resource *res;
275
276 while (remaining_mask) {
277 int i = u_bit_scan(&remaining_mask);
278 res = virgl_resource(binding->ssbos[i].buffer);
279 assert(res);
280 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
281 }
282 }
283
virgl_attach_res_shader_images(struct virgl_context * vctx,enum pipe_shader_type shader_type)284 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
285 enum pipe_shader_type shader_type)
286 {
287 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
288 const struct virgl_shader_binding_state *binding =
289 &vctx->shader_bindings[shader_type];
290 uint32_t remaining_mask = binding->image_enabled_mask;
291 struct virgl_resource *res;
292
293 while (remaining_mask) {
294 int i = u_bit_scan(&remaining_mask);
295 res = virgl_resource(binding->images[i].resource);
296 assert(res);
297 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
298 }
299 }
300
virgl_attach_res_atomic_buffers(struct virgl_context * vctx)301 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
302 {
303 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
304 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
305 struct virgl_resource *res;
306
307 while (remaining_mask) {
308 int i = u_bit_scan(&remaining_mask);
309 res = virgl_resource(vctx->atomic_buffers[i].buffer);
310 assert(res);
311 vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
312 }
313 }
314
315 /*
316 * after flushing, the hw context still has a bunch of
317 * resources bound, so we need to rebind those here.
318 */
virgl_reemit_draw_resources(struct virgl_context * vctx)319 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
320 {
321 enum pipe_shader_type shader_type;
322
323 /* reattach any flushed resources */
324 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
325 virgl_attach_res_framebuffer(vctx);
326
327 for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
328 virgl_attach_res_sampler_views(vctx, shader_type);
329 virgl_attach_res_uniform_buffers(vctx, shader_type);
330 virgl_attach_res_shader_buffers(vctx, shader_type);
331 virgl_attach_res_shader_images(vctx, shader_type);
332 }
333 virgl_attach_res_atomic_buffers(vctx);
334 virgl_attach_res_vertex_buffers(vctx);
335 virgl_attach_res_so_targets(vctx);
336 }
337
virgl_reemit_compute_resources(struct virgl_context * vctx)338 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
339 {
340 virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
341 virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
342 virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
343 virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
344
345 virgl_attach_res_atomic_buffers(vctx);
346 }
347
virgl_create_surface(struct pipe_context * ctx,struct pipe_resource * resource,const struct pipe_surface * templ)348 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
349 struct pipe_resource *resource,
350 const struct pipe_surface *templ)
351 {
352 struct virgl_context *vctx = virgl_context(ctx);
353 struct virgl_surface *surf;
354 struct virgl_resource *res = virgl_resource(resource);
355 uint32_t handle;
356
357 /* no support for buffer surfaces */
358 if (resource->target == PIPE_BUFFER)
359 return NULL;
360
361 surf = CALLOC_STRUCT(virgl_surface);
362 if (!surf)
363 return NULL;
364
365 assert(ctx->screen->caps.dest_surface_srgb_control ||
366 (util_format_is_srgb(templ->format) ==
367 util_format_is_srgb(resource->format)));
368
369 virgl_resource_dirty(res, 0);
370 handle = virgl_object_assign_handle();
371 pipe_reference_init(&surf->base.reference, 1);
372 pipe_resource_reference(&surf->base.texture, resource);
373 surf->base.context = ctx;
374 surf->base.format = templ->format;
375
376 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
377 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
378 surf->base.u.tex.level = templ->u.tex.level;
379 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
380 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
381 surf->base.nr_samples = templ->nr_samples;
382
383 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
384 surf->handle = handle;
385 return &surf->base;
386 }
387
virgl_surface_destroy(struct pipe_context * ctx,struct pipe_surface * psurf)388 static void virgl_surface_destroy(struct pipe_context *ctx,
389 struct pipe_surface *psurf)
390 {
391 struct virgl_context *vctx = virgl_context(ctx);
392 struct virgl_surface *surf = virgl_surface(psurf);
393
394 pipe_resource_reference(&surf->base.texture, NULL);
395 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
396 FREE(surf);
397 }
398
virgl_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * blend_state)399 static void *virgl_create_blend_state(struct pipe_context *ctx,
400 const struct pipe_blend_state *blend_state)
401 {
402 struct virgl_context *vctx = virgl_context(ctx);
403 uint32_t handle;
404 handle = virgl_object_assign_handle();
405
406 virgl_encode_blend_state(vctx, handle, blend_state);
407 return (void *)(unsigned long)handle;
408
409 }
410
virgl_bind_blend_state(struct pipe_context * ctx,void * blend_state)411 static void virgl_bind_blend_state(struct pipe_context *ctx,
412 void *blend_state)
413 {
414 struct virgl_context *vctx = virgl_context(ctx);
415 uint32_t handle = (unsigned long)blend_state;
416 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
417 }
418
virgl_delete_blend_state(struct pipe_context * ctx,void * blend_state)419 static void virgl_delete_blend_state(struct pipe_context *ctx,
420 void *blend_state)
421 {
422 struct virgl_context *vctx = virgl_context(ctx);
423 uint32_t handle = (unsigned long)blend_state;
424 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
425 }
426
virgl_create_depth_stencil_alpha_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * blend_state)427 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
428 const struct pipe_depth_stencil_alpha_state *blend_state)
429 {
430 struct virgl_context *vctx = virgl_context(ctx);
431 uint32_t handle;
432 handle = virgl_object_assign_handle();
433
434 virgl_encode_dsa_state(vctx, handle, blend_state);
435 return (void *)(unsigned long)handle;
436 }
437
virgl_bind_depth_stencil_alpha_state(struct pipe_context * ctx,void * blend_state)438 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
439 void *blend_state)
440 {
441 struct virgl_context *vctx = virgl_context(ctx);
442 uint32_t handle = (unsigned long)blend_state;
443 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
444 }
445
virgl_delete_depth_stencil_alpha_state(struct pipe_context * ctx,void * dsa_state)446 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
447 void *dsa_state)
448 {
449 struct virgl_context *vctx = virgl_context(ctx);
450 uint32_t handle = (unsigned long)dsa_state;
451 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
452 }
453
virgl_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * rs_state)454 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
455 const struct pipe_rasterizer_state *rs_state)
456 {
457 struct virgl_context *vctx = virgl_context(ctx);
458 struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
459
460 if (!vrs)
461 return NULL;
462 vrs->rs = *rs_state;
463 vrs->handle = virgl_object_assign_handle();
464
465 assert(rs_state->depth_clip_near ||
466 virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
467
468 virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
469 return (void *)vrs;
470 }
471
virgl_bind_rasterizer_state(struct pipe_context * ctx,void * rs_state)472 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
473 void *rs_state)
474 {
475 struct virgl_context *vctx = virgl_context(ctx);
476 uint32_t handle = 0;
477 if (rs_state) {
478 struct virgl_rasterizer_state *vrs = rs_state;
479 vctx->rs_state = *vrs;
480 handle = vrs->handle;
481 }
482 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
483 }
484
virgl_delete_rasterizer_state(struct pipe_context * ctx,void * rs_state)485 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
486 void *rs_state)
487 {
488 struct virgl_context *vctx = virgl_context(ctx);
489 struct virgl_rasterizer_state *vrs = rs_state;
490 virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
491 FREE(vrs);
492 }
493
virgl_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)494 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
495 const struct pipe_framebuffer_state *state)
496 {
497 struct virgl_context *vctx = virgl_context(ctx);
498
499 vctx->framebuffer = *state;
500 virgl_encoder_set_framebuffer_state(vctx, state);
501 virgl_attach_res_framebuffer(vctx);
502 }
503
virgl_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)504 static void virgl_set_viewport_states(struct pipe_context *ctx,
505 unsigned start_slot,
506 unsigned num_viewports,
507 const struct pipe_viewport_state *state)
508 {
509 struct virgl_context *vctx = virgl_context(ctx);
510 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
511 }
512
virgl_create_vertex_elements_state(struct pipe_context * ctx,unsigned num_elements,const struct pipe_vertex_element * elements)513 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
514 unsigned num_elements,
515 const struct pipe_vertex_element *elements)
516 {
517 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
518 struct virgl_context *vctx = virgl_context(ctx);
519 struct virgl_vertex_elements_state *state =
520 CALLOC_STRUCT(virgl_vertex_elements_state);
521
522 for (int i = 0; i < num_elements; ++i) {
523 if (elements[i].instance_divisor) {
524 /* Virglrenderer doesn't deal with instance_divisor correctly if
525 * there isn't a 1:1 relationship between elements and bindings.
526 * So let's make sure there is, by duplicating bindings.
527 */
528 for (int j = 0; j < num_elements; ++j) {
529 new_elements[j] = elements[j];
530 new_elements[j].vertex_buffer_index = j;
531 state->binding_map[j] = elements[j].vertex_buffer_index;
532 }
533 elements = new_elements;
534 state->num_bindings = num_elements;
535 break;
536 }
537 }
538 for (int i = 0; i < num_elements; ++i)
539 state->strides[elements[i].vertex_buffer_index] = elements[i].src_stride;
540
541 state->handle = virgl_object_assign_handle();
542 virgl_encoder_create_vertex_elements(vctx, state->handle,
543 num_elements, elements);
544 return state;
545 }
546
virgl_delete_vertex_elements_state(struct pipe_context * ctx,void * ve)547 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
548 void *ve)
549 {
550 struct virgl_context *vctx = virgl_context(ctx);
551 struct virgl_vertex_elements_state *state =
552 (struct virgl_vertex_elements_state *)ve;
553 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
554 FREE(state);
555 }
556
virgl_bind_vertex_elements_state(struct pipe_context * ctx,void * ve)557 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
558 void *ve)
559 {
560 struct virgl_context *vctx = virgl_context(ctx);
561 struct virgl_vertex_elements_state *state =
562 (struct virgl_vertex_elements_state *)ve;
563 vctx->vertex_elements = state;
564 virgl_encode_bind_object(vctx, state ? state->handle : 0,
565 VIRGL_OBJECT_VERTEX_ELEMENTS);
566 vctx->vertex_array_dirty = true;
567 }
568
virgl_set_vertex_buffers(struct pipe_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)569 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
570 unsigned num_buffers,
571 const struct pipe_vertex_buffer *buffers)
572 {
573 struct virgl_context *vctx = virgl_context(ctx);
574
575 util_set_vertex_buffers_count(vctx->vertex_buffer,
576 &vctx->num_vertex_buffers,
577 buffers, num_buffers,
578 true);
579
580 if (buffers) {
581 for (unsigned i = 0; i < num_buffers; i++) {
582 struct virgl_resource *res =
583 virgl_resource(buffers[i].buffer.resource);
584 if (res && !buffers[i].is_user_buffer)
585 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
586 }
587 }
588
589 vctx->vertex_array_dirty = true;
590 }
591
virgl_hw_set_vertex_buffers(struct virgl_context * vctx)592 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
593 {
594 if (vctx->vertex_array_dirty) {
595 const struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
596
597 if (ve && ve->num_bindings) {
598 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
599 for (int i = 0; i < ve->num_bindings; ++i)
600 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
601
602 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
603 } else
604 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
605
606 virgl_attach_res_vertex_buffers(vctx);
607
608 vctx->vertex_array_dirty = false;
609 }
610 }
611
virgl_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref ref)612 static void virgl_set_stencil_ref(struct pipe_context *ctx,
613 const struct pipe_stencil_ref ref)
614 {
615 struct virgl_context *vctx = virgl_context(ctx);
616 virgl_encoder_set_stencil_ref(vctx, &ref);
617 }
618
virgl_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * color)619 static void virgl_set_blend_color(struct pipe_context *ctx,
620 const struct pipe_blend_color *color)
621 {
622 struct virgl_context *vctx = virgl_context(ctx);
623 virgl_encoder_set_blend_color(vctx, color);
624 }
625
virgl_hw_set_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)626 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
627 struct virgl_indexbuf *ib)
628 {
629 virgl_encoder_set_index_buffer(vctx, ib);
630 virgl_attach_res_index_buffer(vctx, ib);
631 }
632
virgl_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * buf)633 static void virgl_set_constant_buffer(struct pipe_context *ctx,
634 enum pipe_shader_type shader, uint index,
635 bool take_ownership,
636 const struct pipe_constant_buffer *buf)
637 {
638 struct virgl_context *vctx = virgl_context(ctx);
639 struct virgl_shader_binding_state *binding =
640 &vctx->shader_bindings[shader];
641
642 if (buf && buf->buffer) {
643 struct virgl_resource *res = virgl_resource(buf->buffer);
644 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
645
646 virgl_encoder_set_uniform_buffer(vctx, shader, index,
647 buf->buffer_offset,
648 buf->buffer_size, res);
649
650 if (take_ownership) {
651 pipe_resource_reference(&binding->ubos[index].buffer, NULL);
652 binding->ubos[index].buffer = buf->buffer;
653 } else {
654 pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
655 }
656 binding->ubos[index] = *buf;
657 binding->ubo_enabled_mask |= 1 << index;
658 } else {
659 static const struct pipe_constant_buffer dummy_ubo;
660 if (!buf)
661 buf = &dummy_ubo;
662 virgl_encoder_write_constant_buffer(vctx, shader, index,
663 buf->buffer_size / 4,
664 buf->user_buffer);
665
666 pipe_resource_reference(&binding->ubos[index].buffer, NULL);
667 binding->ubo_enabled_mask &= ~(1 << index);
668 }
669 }
670
671 static bool
lower_gles_arrayshadow_offset_filter(const nir_instr * instr,UNUSED const void * data)672 lower_gles_arrayshadow_offset_filter(const nir_instr *instr,
673 UNUSED const void *data)
674 {
675 if (instr->type != nir_instr_type_tex)
676 return false;
677
678 nir_tex_instr *tex = nir_instr_as_tex(instr);
679
680 if (!tex->is_shadow || !tex->is_array)
681 return false;
682
683 // textureGradOffset can be used directly
684 int grad_index = nir_tex_instr_src_index(tex, nir_tex_src_ddx);
685 int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
686 if (grad_index >= 0 && proj_index < 0)
687 return false;
688
689 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
690 if (offset_index >= 0)
691 return true;
692
693 return false;
694 }
695
virgl_shader_encoder(struct pipe_context * ctx,const struct pipe_shader_state * shader,unsigned type)696 static void *virgl_shader_encoder(struct pipe_context *ctx,
697 const struct pipe_shader_state *shader,
698 unsigned type)
699 {
700 struct virgl_context *vctx = virgl_context(ctx);
701 struct virgl_screen *rs = virgl_screen(ctx->screen);
702 uint32_t handle;
703 const struct tgsi_token *tokens;
704 const struct tgsi_token *ntt_tokens = NULL;
705 struct tgsi_token *new_tokens;
706 int ret;
707 bool is_separable = false;
708
709 if (shader->type == PIPE_SHADER_IR_NIR) {
710 struct nir_to_tgsi_options options = {
711 .unoptimized_ra = true,
712 .lower_fabs = true,
713 .lower_ssbo_bindings =
714 rs->caps.caps.v2.host_feature_check_version >= 16,
715 .non_compute_membar_needs_all_modes = true
716 };
717
718 if (!(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_TEXTURE_SHADOW_LOD) &&
719 rs->caps.caps.v2.capability_bits & VIRGL_CAP_HOST_IS_GLES) {
720 nir_lower_tex_options lower_tex_options = {
721 .lower_offset_filter = lower_gles_arrayshadow_offset_filter,
722 };
723
724 NIR_PASS_V(shader->ir.nir, nir_lower_tex, &lower_tex_options);
725 }
726
727 nir_shader *s = nir_shader_clone(NULL, shader->ir.nir);
728
729 /* The host can't handle certain IO slots as separable, because we can't assign
730 * more than 32 IO locations explicitly, and with varyings and patches we already
731 * exhaust the possible ways of handling this for the varyings with generic names,
732 * so drop the flag in these cases */
733 const uint64_t drop_slots_for_separable_io = 0xffull << VARYING_SLOT_TEX0 |
734 1 << VARYING_SLOT_FOGC |
735 1 << VARYING_SLOT_BFC0 |
736 1 << VARYING_SLOT_BFC1 |
737 1 << VARYING_SLOT_COL0 |
738 1 << VARYING_SLOT_COL1;
739 bool keep_separable_flags = true;
740 if (s->info.stage != MESA_SHADER_VERTEX)
741 keep_separable_flags &= !(s->info.inputs_read & drop_slots_for_separable_io);
742 if (s->info.stage != MESA_SHADER_FRAGMENT)
743 keep_separable_flags &= !(s->info.outputs_written & drop_slots_for_separable_io);
744
745 /* Propagare the separable shader property to the host, unless
746 * it is an internal shader - these are marked separable even though they are not. */
747 is_separable = s->info.separate_shader && !s->info.internal && keep_separable_flags;
748 ntt_tokens = tokens = nir_to_tgsi_options(s, vctx->base.screen, &options); /* takes ownership */
749 } else {
750 tokens = shader->tokens;
751 }
752
753 new_tokens = virgl_tgsi_transform(rs, tokens, is_separable);
754 if (!new_tokens)
755 return NULL;
756
757 handle = virgl_object_assign_handle();
758 /* encode VS state */
759 ret = virgl_encode_shader_state(vctx, handle, type,
760 &shader->stream_output, 0,
761 new_tokens);
762 if (ret) {
763 FREE((void *)ntt_tokens);
764 return NULL;
765 }
766
767 FREE((void *)ntt_tokens);
768 FREE(new_tokens);
769 return (void *)(unsigned long)handle;
770
771 }
virgl_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)772 static void *virgl_create_vs_state(struct pipe_context *ctx,
773 const struct pipe_shader_state *shader)
774 {
775 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
776 }
777
virgl_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)778 static void *virgl_create_tcs_state(struct pipe_context *ctx,
779 const struct pipe_shader_state *shader)
780 {
781 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
782 }
783
virgl_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)784 static void *virgl_create_tes_state(struct pipe_context *ctx,
785 const struct pipe_shader_state *shader)
786 {
787 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
788 }
789
virgl_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)790 static void *virgl_create_gs_state(struct pipe_context *ctx,
791 const struct pipe_shader_state *shader)
792 {
793 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
794 }
795
virgl_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)796 static void *virgl_create_fs_state(struct pipe_context *ctx,
797 const struct pipe_shader_state *shader)
798 {
799 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
800 }
801
802 static void
virgl_delete_fs_state(struct pipe_context * ctx,void * fs)803 virgl_delete_fs_state(struct pipe_context *ctx,
804 void *fs)
805 {
806 uint32_t handle = (unsigned long)fs;
807 struct virgl_context *vctx = virgl_context(ctx);
808
809 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
810 }
811
812 static void
virgl_delete_gs_state(struct pipe_context * ctx,void * gs)813 virgl_delete_gs_state(struct pipe_context *ctx,
814 void *gs)
815 {
816 uint32_t handle = (unsigned long)gs;
817 struct virgl_context *vctx = virgl_context(ctx);
818
819 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
820 }
821
822 static void
virgl_delete_vs_state(struct pipe_context * ctx,void * vs)823 virgl_delete_vs_state(struct pipe_context *ctx,
824 void *vs)
825 {
826 uint32_t handle = (unsigned long)vs;
827 struct virgl_context *vctx = virgl_context(ctx);
828
829 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
830 }
831
832 static void
virgl_delete_tcs_state(struct pipe_context * ctx,void * tcs)833 virgl_delete_tcs_state(struct pipe_context *ctx,
834 void *tcs)
835 {
836 uint32_t handle = (unsigned long)tcs;
837 struct virgl_context *vctx = virgl_context(ctx);
838
839 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
840 }
841
842 static void
virgl_delete_tes_state(struct pipe_context * ctx,void * tes)843 virgl_delete_tes_state(struct pipe_context *ctx,
844 void *tes)
845 {
846 uint32_t handle = (unsigned long)tes;
847 struct virgl_context *vctx = virgl_context(ctx);
848
849 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
850 }
851
virgl_bind_vs_state(struct pipe_context * ctx,void * vss)852 static void virgl_bind_vs_state(struct pipe_context *ctx,
853 void *vss)
854 {
855 uint32_t handle = (unsigned long)vss;
856 struct virgl_context *vctx = virgl_context(ctx);
857
858 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
859 }
860
virgl_bind_tcs_state(struct pipe_context * ctx,void * vss)861 static void virgl_bind_tcs_state(struct pipe_context *ctx,
862 void *vss)
863 {
864 uint32_t handle = (unsigned long)vss;
865 struct virgl_context *vctx = virgl_context(ctx);
866
867 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
868 }
869
virgl_bind_tes_state(struct pipe_context * ctx,void * vss)870 static void virgl_bind_tes_state(struct pipe_context *ctx,
871 void *vss)
872 {
873 uint32_t handle = (unsigned long)vss;
874 struct virgl_context *vctx = virgl_context(ctx);
875
876 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
877 }
878
virgl_bind_gs_state(struct pipe_context * ctx,void * vss)879 static void virgl_bind_gs_state(struct pipe_context *ctx,
880 void *vss)
881 {
882 uint32_t handle = (unsigned long)vss;
883 struct virgl_context *vctx = virgl_context(ctx);
884
885 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
886 }
887
888
virgl_bind_fs_state(struct pipe_context * ctx,void * vss)889 static void virgl_bind_fs_state(struct pipe_context *ctx,
890 void *vss)
891 {
892 uint32_t handle = (unsigned long)vss;
893 struct virgl_context *vctx = virgl_context(ctx);
894
895 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
896 }
897
virgl_clear(struct pipe_context * ctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)898 static void virgl_clear(struct pipe_context *ctx,
899 unsigned buffers,
900 const struct pipe_scissor_state *scissor_state,
901 const union pipe_color_union *color,
902 double depth, unsigned stencil)
903 {
904 struct virgl_context *vctx = virgl_context(ctx);
905
906 if (!vctx->num_draws)
907 virgl_reemit_draw_resources(vctx);
908 vctx->num_draws++;
909
910 virgl_encode_clear(vctx, buffers, color, depth, stencil);
911 }
912
virgl_clear_render_target(struct pipe_context * ctx,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)913 static void virgl_clear_render_target(struct pipe_context *ctx,
914 struct pipe_surface *dst,
915 const union pipe_color_union *color,
916 unsigned dstx, unsigned dsty,
917 unsigned width, unsigned height,
918 bool render_condition_enabled)
919 {
920 struct virgl_context *vctx = virgl_context(ctx);
921
922 virgl_encode_clear_surface(vctx, dst, PIPE_CLEAR_COLOR0, color,
923 dstx, dsty, width, height, render_condition_enabled);
924
925 /* Mark as dirty, since we are updating the host side resource
926 * without going through the corresponding guest side resource, and
927 * hence the two will diverge.
928 */
929 virgl_resource_dirty(virgl_resource(dst->texture), dst->u.tex.level);
930 }
931
virgl_clear_depth_stencil(struct pipe_context * ctx,struct pipe_surface * dst,unsigned clear_flags,double depth,unsigned stencil,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)932 static void virgl_clear_depth_stencil(struct pipe_context *ctx,
933 struct pipe_surface *dst,
934 unsigned clear_flags,
935 double depth,
936 unsigned stencil,
937 unsigned dstx, unsigned dsty,
938 unsigned width, unsigned height,
939 bool render_condition_enabled)
940 {
941 struct virgl_context *vctx = virgl_context(ctx);
942
943 union pipe_color_union color;
944 memcpy(color.ui, &depth, sizeof(double));
945 color.ui[3] = stencil;
946
947 virgl_encode_clear_surface(vctx, dst, clear_flags, &color,
948 dstx, dsty, width, height, render_condition_enabled);
949
950 /* Mark as dirty, since we are updating the host side resource
951 * without going through the corresponding guest side resource, and
952 * hence the two will diverge.
953 */
954 virgl_resource_dirty(virgl_resource(dst->texture), dst->u.tex.level);
955 }
956
virgl_clear_render_target_stub(struct pipe_context * ctx,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)957 static void virgl_clear_render_target_stub(struct pipe_context *ctx,
958 struct pipe_surface *dst,
959 const union pipe_color_union *color,
960 unsigned dstx, unsigned dsty,
961 unsigned width, unsigned height,
962 bool render_condition_enabled)
963 {
964 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
965 debug_printf("VIRGL: clear depth stencil unsupported.\n");
966 return;
967 }
968
virgl_clear_texture(struct pipe_context * ctx,struct pipe_resource * res,unsigned int level,const struct pipe_box * box,const void * data)969 static void virgl_clear_texture(struct pipe_context *ctx,
970 struct pipe_resource *res,
971 unsigned int level,
972 const struct pipe_box *box,
973 const void *data)
974 {
975 struct virgl_screen *rs = virgl_screen(ctx->screen);
976 struct virgl_resource *vres = virgl_resource(res);
977
978 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_CLEAR_TEXTURE) {
979 struct virgl_context *vctx = virgl_context(ctx);
980 virgl_encode_clear_texture(vctx, vres, level, box, data);
981 } else {
982 u_default_clear_texture(ctx, res, level, box, data);
983 }
984 /* Mark as dirty, since we are updating the host side resource
985 * without going through the corresponding guest side resource, and
986 * hence the two will diverge.
987 */
988 virgl_resource_dirty(vres, level);
989 }
990
virgl_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * dinfo,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)991 static void virgl_draw_vbo(struct pipe_context *ctx,
992 const struct pipe_draw_info *dinfo,
993 unsigned drawid_offset,
994 const struct pipe_draw_indirect_info *indirect,
995 const struct pipe_draw_start_count_bias *draws,
996 unsigned num_draws)
997 {
998 if (num_draws > 1) {
999 util_draw_multi(ctx, dinfo, drawid_offset, indirect, draws, num_draws);
1000 return;
1001 }
1002
1003 if (!indirect && (!draws[0].count || !dinfo->instance_count))
1004 return;
1005
1006 struct virgl_context *vctx = virgl_context(ctx);
1007 struct virgl_screen *rs = virgl_screen(ctx->screen);
1008 struct virgl_indexbuf ib = { 0 };
1009 struct pipe_draw_info info = *dinfo;
1010
1011 if (!indirect &&
1012 !dinfo->primitive_restart &&
1013 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&draws[0].count))
1014 return;
1015
1016 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
1017 util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
1018 util_primconvert_draw_vbo(vctx->primconvert, dinfo, drawid_offset, indirect, draws, num_draws);
1019 return;
1020 }
1021 if (info.index_size) {
1022 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
1023 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
1024 ib.index_size = dinfo->index_size;
1025 ib.offset = draws[0].start * ib.index_size;
1026
1027 if (ib.user_buffer) {
1028 unsigned start_offset = draws[0].start * ib.index_size;
1029 u_upload_data(vctx->uploader, 0,
1030 draws[0].count * ib.index_size, 4,
1031 (char*)ib.user_buffer + start_offset,
1032 &ib.offset, &ib.buffer);
1033 ib.user_buffer = NULL;
1034 }
1035 virgl_hw_set_index_buffer(vctx, &ib);
1036 }
1037
1038 if (!vctx->num_draws)
1039 virgl_reemit_draw_resources(vctx);
1040 vctx->num_draws++;
1041
1042 virgl_hw_set_vertex_buffers(vctx);
1043
1044 virgl_encoder_draw_vbo(vctx, &info, drawid_offset, indirect, &draws[0]);
1045
1046 pipe_resource_reference(&ib.buffer, NULL);
1047
1048 }
1049
virgl_submit_cmd(struct virgl_winsys * vws,struct virgl_cmd_buf * cbuf,struct pipe_fence_handle ** fence)1050 static void virgl_submit_cmd(struct virgl_winsys *vws,
1051 struct virgl_cmd_buf *cbuf,
1052 struct pipe_fence_handle **fence)
1053 {
1054 if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
1055 struct pipe_fence_handle *sync_fence = NULL;
1056
1057 vws->submit_cmd(vws, cbuf, &sync_fence);
1058
1059 vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
1060 vws->fence_reference(vws, &sync_fence, NULL);
1061 } else {
1062 vws->submit_cmd(vws, cbuf, fence);
1063 }
1064 }
1065
virgl_flush_eq(struct virgl_context * ctx,void * closure,struct pipe_fence_handle ** fence)1066 void virgl_flush_eq(struct virgl_context *ctx, void *closure,
1067 struct pipe_fence_handle **fence)
1068 {
1069 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1070
1071 /* skip empty cbuf */
1072 if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
1073 ctx->queue.num_dwords == 0 &&
1074 !fence)
1075 return;
1076
1077 if (ctx->num_draws)
1078 u_upload_unmap(ctx->uploader);
1079
1080 /* send the buffer to the remote side for decoding */
1081 ctx->num_draws = ctx->num_compute = 0;
1082
1083 virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
1084
1085 virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
1086
1087 /* Reserve some space for transfers. */
1088 if (ctx->encoded_transfers)
1089 ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1090
1091 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
1092
1093 ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
1094
1095 /* We have flushed the command queue, including any pending copy transfers
1096 * involving staging resources.
1097 */
1098 ctx->queued_staging_res_size = 0;
1099 }
1100
virgl_flush_from_st(struct pipe_context * ctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)1101 static void virgl_flush_from_st(struct pipe_context *ctx,
1102 struct pipe_fence_handle **fence,
1103 enum pipe_flush_flags flags)
1104 {
1105 struct virgl_context *vctx = virgl_context(ctx);
1106
1107 virgl_flush_eq(vctx, vctx, fence);
1108 }
1109
virgl_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * texture,const struct pipe_sampler_view * state)1110 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
1111 struct pipe_resource *texture,
1112 const struct pipe_sampler_view *state)
1113 {
1114 struct virgl_context *vctx = virgl_context(ctx);
1115 struct virgl_sampler_view *grview;
1116 uint32_t handle;
1117 struct virgl_resource *res;
1118
1119 if (!state)
1120 return NULL;
1121
1122 grview = CALLOC_STRUCT(virgl_sampler_view);
1123 if (!grview)
1124 return NULL;
1125
1126 res = virgl_resource(texture);
1127 handle = virgl_object_assign_handle();
1128 virgl_encode_sampler_view(vctx, handle, res, state);
1129
1130 grview->base = *state;
1131 grview->base.reference.count = 1;
1132
1133 grview->base.texture = NULL;
1134 grview->base.context = ctx;
1135 pipe_resource_reference(&grview->base.texture, texture);
1136 grview->handle = handle;
1137 return &grview->base;
1138 }
1139
virgl_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type shader_type,unsigned start_slot,unsigned num_views,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)1140 static void virgl_set_sampler_views(struct pipe_context *ctx,
1141 enum pipe_shader_type shader_type,
1142 unsigned start_slot,
1143 unsigned num_views,
1144 unsigned unbind_num_trailing_slots,
1145 bool take_ownership,
1146 struct pipe_sampler_view **views)
1147 {
1148 struct virgl_context *vctx = virgl_context(ctx);
1149 struct virgl_shader_binding_state *binding =
1150 &vctx->shader_bindings[shader_type];
1151
1152 for (unsigned i = 0; i < num_views; i++) {
1153 unsigned idx = start_slot + i;
1154 if (views && views[i]) {
1155 struct virgl_resource *res = virgl_resource(views[i]->texture);
1156 res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
1157
1158 if (take_ownership) {
1159 pipe_sampler_view_reference(&binding->views[idx], NULL);
1160 binding->views[idx] = views[i];
1161 } else {
1162 pipe_sampler_view_reference(&binding->views[idx], views[i]);
1163 }
1164 } else {
1165 pipe_sampler_view_reference(&binding->views[idx], NULL);
1166 }
1167 }
1168
1169 virgl_encode_set_sampler_views(vctx, shader_type,
1170 start_slot, num_views, (struct virgl_sampler_view **)binding->views);
1171 virgl_attach_res_sampler_views(vctx, shader_type);
1172
1173 if (unbind_num_trailing_slots) {
1174 virgl_set_sampler_views(ctx, shader_type, start_slot + num_views,
1175 unbind_num_trailing_slots, 0, false, NULL);
1176 }
1177 }
1178
1179 static void
virgl_texture_barrier(struct pipe_context * ctx,unsigned flags)1180 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
1181 {
1182 struct virgl_context *vctx = virgl_context(ctx);
1183 struct virgl_screen *rs = virgl_screen(ctx->screen);
1184
1185 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER) &&
1186 !(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_BLEND_EQUATION))
1187 return;
1188 virgl_encode_texture_barrier(vctx, flags);
1189 }
1190
virgl_destroy_sampler_view(struct pipe_context * ctx,struct pipe_sampler_view * view)1191 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1192 struct pipe_sampler_view *view)
1193 {
1194 struct virgl_context *vctx = virgl_context(ctx);
1195 struct virgl_sampler_view *grview = virgl_sampler_view(view);
1196
1197 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1198 pipe_resource_reference(&view->texture, NULL);
1199 FREE(view);
1200 }
1201
virgl_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)1202 static void *virgl_create_sampler_state(struct pipe_context *ctx,
1203 const struct pipe_sampler_state *state)
1204 {
1205 struct virgl_context *vctx = virgl_context(ctx);
1206 uint32_t handle;
1207
1208 handle = virgl_object_assign_handle();
1209
1210 virgl_encode_sampler_state(vctx, handle, state);
1211 return (void *)(unsigned long)handle;
1212 }
1213
virgl_delete_sampler_state(struct pipe_context * ctx,void * ss)1214 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1215 void *ss)
1216 {
1217 struct virgl_context *vctx = virgl_context(ctx);
1218 uint32_t handle = (unsigned long)ss;
1219
1220 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1221 }
1222
virgl_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)1223 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1224 enum pipe_shader_type shader,
1225 unsigned start_slot,
1226 unsigned num_samplers,
1227 void **samplers)
1228 {
1229 struct virgl_context *vctx = virgl_context(ctx);
1230 uint32_t handles[PIPE_MAX_SAMPLERS];
1231 int i;
1232 for (i = 0; i < num_samplers; i++) {
1233 handles[i] = (unsigned long)(samplers[i]);
1234 }
1235 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1236 }
1237
virgl_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * ps)1238 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1239 const struct pipe_poly_stipple *ps)
1240 {
1241 struct virgl_context *vctx = virgl_context(ctx);
1242 virgl_encoder_set_polygon_stipple(vctx, ps);
1243 }
1244
virgl_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissor,const struct pipe_scissor_state * ss)1245 static void virgl_set_scissor_states(struct pipe_context *ctx,
1246 unsigned start_slot,
1247 unsigned num_scissor,
1248 const struct pipe_scissor_state *ss)
1249 {
1250 struct virgl_context *vctx = virgl_context(ctx);
1251 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1252 }
1253
virgl_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)1254 static void virgl_set_sample_mask(struct pipe_context *ctx,
1255 unsigned sample_mask)
1256 {
1257 struct virgl_context *vctx = virgl_context(ctx);
1258 virgl_encoder_set_sample_mask(vctx, sample_mask);
1259 }
1260
virgl_set_min_samples(struct pipe_context * ctx,unsigned min_samples)1261 static void virgl_set_min_samples(struct pipe_context *ctx,
1262 unsigned min_samples)
1263 {
1264 struct virgl_context *vctx = virgl_context(ctx);
1265 struct virgl_screen *rs = virgl_screen(ctx->screen);
1266
1267 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1268 return;
1269 virgl_encoder_set_min_samples(vctx, min_samples);
1270 }
1271
virgl_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * clip)1272 static void virgl_set_clip_state(struct pipe_context *ctx,
1273 const struct pipe_clip_state *clip)
1274 {
1275 struct virgl_context *vctx = virgl_context(ctx);
1276 virgl_encoder_set_clip_state(vctx, clip);
1277 }
1278
virgl_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])1279 static void virgl_set_tess_state(struct pipe_context *ctx,
1280 const float default_outer_level[4],
1281 const float default_inner_level[2])
1282 {
1283 struct virgl_context *vctx = virgl_context(ctx);
1284 struct virgl_screen *rs = virgl_screen(ctx->screen);
1285
1286 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1287 return;
1288 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1289 }
1290
virgl_set_patch_vertices(struct pipe_context * ctx,uint8_t patch_vertices)1291 static void virgl_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
1292 {
1293 struct virgl_context *vctx = virgl_context(ctx);
1294
1295 vctx->patch_vertices = patch_vertices;
1296 }
1297
virgl_resource_copy_region(struct pipe_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1298 static void virgl_resource_copy_region(struct pipe_context *ctx,
1299 struct pipe_resource *dst,
1300 unsigned dst_level,
1301 unsigned dstx, unsigned dsty, unsigned dstz,
1302 struct pipe_resource *src,
1303 unsigned src_level,
1304 const struct pipe_box *src_box)
1305 {
1306 struct virgl_context *vctx = virgl_context(ctx);
1307 struct virgl_resource *dres = virgl_resource(dst);
1308 struct virgl_resource *sres = virgl_resource(src);
1309
1310 if (dres->b.target == PIPE_BUFFER)
1311 util_range_add(&dres->b, &dres->valid_buffer_range, dstx, dstx + src_box->width);
1312 virgl_resource_dirty(dres, dst_level);
1313
1314 virgl_encode_resource_copy_region(vctx, dres,
1315 dst_level, dstx, dsty, dstz,
1316 sres, src_level,
1317 src_box);
1318 }
1319
1320 static void
virgl_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)1321 virgl_flush_resource(struct pipe_context *pipe,
1322 struct pipe_resource *resource)
1323 {
1324 }
1325
virgl_blit(struct pipe_context * ctx,const struct pipe_blit_info * blit)1326 static void virgl_blit(struct pipe_context *ctx,
1327 const struct pipe_blit_info *blit)
1328 {
1329 struct virgl_context *vctx = virgl_context(ctx);
1330 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1331 struct virgl_resource *sres = virgl_resource(blit->src.resource);
1332
1333 assert(ctx->screen->caps.dest_surface_srgb_control ||
1334 (util_format_is_srgb(blit->dst.resource->format) ==
1335 util_format_is_srgb(blit->dst.format)));
1336
1337 virgl_resource_dirty(dres, blit->dst.level);
1338 virgl_encode_blit(vctx, dres, sres,
1339 blit);
1340 }
1341
virgl_set_hw_atomic_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1342 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1343 unsigned start_slot,
1344 unsigned count,
1345 const struct pipe_shader_buffer *buffers)
1346 {
1347 struct virgl_context *vctx = virgl_context(ctx);
1348
1349 vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1350 for (unsigned i = 0; i < count; i++) {
1351 unsigned idx = start_slot + i;
1352 if (buffers && buffers[i].buffer) {
1353 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1354 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1355
1356 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1357 buffers[i].buffer);
1358 vctx->atomic_buffers[idx] = buffers[i];
1359 vctx->atomic_buffer_enabled_mask |= 1 << idx;
1360 } else {
1361 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1362 }
1363 }
1364
1365 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1366 }
1367
virgl_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)1368 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1369 enum pipe_shader_type shader,
1370 unsigned start_slot, unsigned count,
1371 const struct pipe_shader_buffer *buffers,
1372 unsigned writable_bitmask)
1373 {
1374 struct virgl_context *vctx = virgl_context(ctx);
1375 struct virgl_screen *rs = virgl_screen(ctx->screen);
1376 struct virgl_shader_binding_state *binding =
1377 &vctx->shader_bindings[shader];
1378
1379 binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1380 for (unsigned i = 0; i < count; i++) {
1381 unsigned idx = start_slot + i;
1382 if (buffers && buffers[i].buffer) {
1383 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1384 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1385
1386 pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1387 binding->ssbos[idx] = buffers[i];
1388 binding->ssbo_enabled_mask |= 1 << idx;
1389 } else {
1390 pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1391 }
1392 }
1393
1394 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1395 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1396 rs->caps.caps.v2.max_shader_buffer_other_stages;
1397 if (!max_shader_buffer)
1398 return;
1399 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1400 }
1401
virgl_create_fence_fd(struct pipe_context * ctx,struct pipe_fence_handle ** fence,int fd,enum pipe_fd_type type)1402 static void virgl_create_fence_fd(struct pipe_context *ctx,
1403 struct pipe_fence_handle **fence,
1404 int fd,
1405 enum pipe_fd_type type)
1406 {
1407 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1408 struct virgl_screen *rs = virgl_screen(ctx->screen);
1409
1410 if (rs->vws->cs_create_fence)
1411 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1412 }
1413
virgl_fence_server_sync(struct pipe_context * ctx,struct pipe_fence_handle * fence)1414 static void virgl_fence_server_sync(struct pipe_context *ctx,
1415 struct pipe_fence_handle *fence)
1416 {
1417 struct virgl_context *vctx = virgl_context(ctx);
1418 struct virgl_screen *rs = virgl_screen(ctx->screen);
1419
1420 if (rs->vws->fence_server_sync)
1421 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1422 }
1423
virgl_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)1424 static void virgl_set_shader_images(struct pipe_context *ctx,
1425 enum pipe_shader_type shader,
1426 unsigned start_slot, unsigned count,
1427 unsigned unbind_num_trailing_slots,
1428 const struct pipe_image_view *images)
1429 {
1430 struct virgl_context *vctx = virgl_context(ctx);
1431 struct virgl_screen *rs = virgl_screen(ctx->screen);
1432 struct virgl_shader_binding_state *binding =
1433 &vctx->shader_bindings[shader];
1434
1435 binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1436 for (unsigned i = 0; i < count; i++) {
1437 unsigned idx = start_slot + i;
1438 if (images && images[i].resource) {
1439 struct virgl_resource *res = virgl_resource(images[i].resource);
1440 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1441
1442 pipe_resource_reference(&binding->images[idx].resource,
1443 images[i].resource);
1444 binding->images[idx] = images[i];
1445 binding->image_enabled_mask |= 1 << idx;
1446 } else {
1447 pipe_resource_reference(&binding->images[idx].resource, NULL);
1448 }
1449 }
1450
1451 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1452 rs->caps.caps.v2.max_shader_image_frag_compute :
1453 rs->caps.caps.v2.max_shader_image_other_stages;
1454 if (!max_shader_images)
1455 return;
1456 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1457
1458 if (unbind_num_trailing_slots) {
1459 virgl_set_shader_images(ctx, shader, start_slot + count,
1460 unbind_num_trailing_slots, 0, NULL);
1461 }
1462 }
1463
virgl_memory_barrier(struct pipe_context * ctx,unsigned flags)1464 static void virgl_memory_barrier(struct pipe_context *ctx,
1465 unsigned flags)
1466 {
1467 struct virgl_context *vctx = virgl_context(ctx);
1468 struct virgl_screen *rs = virgl_screen(ctx->screen);
1469
1470 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1471 return;
1472 virgl_encode_memory_barrier(vctx, flags);
1473 }
1474
virgl_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)1475 static void *virgl_create_compute_state(struct pipe_context *ctx,
1476 const struct pipe_compute_state *state)
1477 {
1478 struct virgl_context *vctx = virgl_context(ctx);
1479 uint32_t handle;
1480 const struct tgsi_token *ntt_tokens = NULL;
1481 const struct tgsi_token *tokens;
1482 struct pipe_stream_output_info so_info = { 0 };
1483 int ret;
1484
1485 if (state->ir_type == PIPE_SHADER_IR_NIR) {
1486 struct nir_to_tgsi_options options = {
1487 .unoptimized_ra = true,
1488 .lower_fabs = true
1489 };
1490 nir_shader *s = nir_shader_clone(NULL, state->prog);
1491 ntt_tokens = tokens = nir_to_tgsi_options(s, vctx->base.screen, &options); /* takes ownership */
1492 } else {
1493 tokens = state->prog;
1494 }
1495
1496 void *new_tokens = virgl_tgsi_transform((struct virgl_screen *)vctx->base.screen, tokens, false);
1497 if (!new_tokens)
1498 return NULL;
1499
1500 handle = virgl_object_assign_handle();
1501 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1502 &so_info,
1503 state->static_shared_mem,
1504 new_tokens);
1505 if (ret) {
1506 FREE((void *)ntt_tokens);
1507 return NULL;
1508 }
1509
1510 FREE((void *)ntt_tokens);
1511 FREE(new_tokens);
1512
1513 return (void *)(unsigned long)handle;
1514 }
1515
virgl_bind_compute_state(struct pipe_context * ctx,void * state)1516 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1517 {
1518 uint32_t handle = (unsigned long)state;
1519 struct virgl_context *vctx = virgl_context(ctx);
1520
1521 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1522 }
1523
virgl_delete_compute_state(struct pipe_context * ctx,void * state)1524 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1525 {
1526 uint32_t handle = (unsigned long)state;
1527 struct virgl_context *vctx = virgl_context(ctx);
1528
1529 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1530 }
1531
virgl_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * info)1532 static void virgl_launch_grid(struct pipe_context *ctx,
1533 const struct pipe_grid_info *info)
1534 {
1535 struct virgl_context *vctx = virgl_context(ctx);
1536
1537 if (!vctx->num_compute)
1538 virgl_reemit_compute_resources(vctx);
1539 vctx->num_compute++;
1540
1541 virgl_encode_launch_grid(vctx, info);
1542 }
1543
1544 static void
virgl_release_shader_binding(struct virgl_context * vctx,enum pipe_shader_type shader_type)1545 virgl_release_shader_binding(struct virgl_context *vctx,
1546 enum pipe_shader_type shader_type)
1547 {
1548 struct virgl_shader_binding_state *binding =
1549 &vctx->shader_bindings[shader_type];
1550
1551 for (int i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i) {
1552 if (binding->views[i]) {
1553 pipe_sampler_view_reference(
1554 (struct pipe_sampler_view **)&binding->views[i], NULL);
1555 }
1556 }
1557
1558 while (binding->ubo_enabled_mask) {
1559 int i = u_bit_scan(&binding->ubo_enabled_mask);
1560 pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1561 }
1562
1563 while (binding->ssbo_enabled_mask) {
1564 int i = u_bit_scan(&binding->ssbo_enabled_mask);
1565 pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1566 }
1567
1568 while (binding->image_enabled_mask) {
1569 int i = u_bit_scan(&binding->image_enabled_mask);
1570 pipe_resource_reference(&binding->images[i].resource, NULL);
1571 }
1572 }
1573
1574 static void
virgl_emit_string_marker(struct pipe_context * ctx,const char * message,int len)1575 virgl_emit_string_marker(struct pipe_context *ctx, const char *message, int len)
1576 {
1577 struct virgl_context *vctx = virgl_context(ctx);
1578 virgl_encode_emit_string_marker(vctx, message, len);
1579 }
1580
1581 static void
virgl_context_destroy(struct pipe_context * ctx)1582 virgl_context_destroy( struct pipe_context *ctx )
1583 {
1584 struct virgl_context *vctx = virgl_context(ctx);
1585 struct virgl_screen *rs = virgl_screen(ctx->screen);
1586 enum pipe_shader_type shader_type;
1587
1588 vctx->framebuffer.zsbuf = NULL;
1589 vctx->framebuffer.nr_cbufs = 0;
1590 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1591 virgl_flush_eq(vctx, vctx, NULL);
1592
1593 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1594 virgl_release_shader_binding(vctx, shader_type);
1595
1596 while (vctx->atomic_buffer_enabled_mask) {
1597 int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1598 pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1599 }
1600
1601 rs->vws->cmd_buf_destroy(vctx->cbuf);
1602 if (vctx->uploader)
1603 u_upload_destroy(vctx->uploader);
1604 if (vctx->supports_staging)
1605 virgl_staging_destroy(&vctx->staging);
1606 util_primconvert_destroy(vctx->primconvert);
1607 virgl_transfer_queue_fini(&vctx->queue);
1608
1609 slab_destroy_child(&vctx->transfer_pool);
1610 FREE(vctx);
1611 }
1612
virgl_get_sample_position(struct pipe_context * ctx,unsigned sample_count,unsigned index,float * out_value)1613 static void virgl_get_sample_position(struct pipe_context *ctx,
1614 unsigned sample_count,
1615 unsigned index,
1616 float *out_value)
1617 {
1618 struct virgl_context *vctx = virgl_context(ctx);
1619 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1620
1621 if (sample_count > vs->caps.caps.v1.max_samples) {
1622 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1623 sample_count, vs->caps.caps.v1.max_samples);
1624 return;
1625 }
1626
1627 /* The following is basically copied from dri/i965gen6_get_sample_position
1628 * The only addition is that we hold the msaa positions for all sample
1629 * counts in a flat array. */
1630 uint32_t bits = 0;
1631 if (sample_count == 1) {
1632 out_value[0] = out_value[1] = 0.5f;
1633 return;
1634 } else if (sample_count == 2) {
1635 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1636 } else if (sample_count <= 4) {
1637 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1638 } else if (sample_count <= 8) {
1639 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1640 } else if (sample_count <= 16) {
1641 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1642 }
1643 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1644 out_value[1] = (bits & 0xf) / 16.0f;
1645
1646 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1647 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1648 index, sample_count, out_value[0], out_value[1]);
1649 }
1650
virgl_send_tweaks(struct virgl_context * vctx,struct virgl_screen * rs)1651 static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1652 {
1653 if (rs->tweak_gles_emulate_bgra)
1654 virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1655
1656 if (rs->tweak_gles_apply_bgra_dest_swizzle)
1657 virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1658
1659 if (rs->tweak_gles_tf3_value > 0)
1660 virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1661 rs->tweak_gles_tf3_value);
1662 }
1663
virgl_link_shader(struct pipe_context * ctx,void ** handles)1664 static void virgl_link_shader(struct pipe_context *ctx, void **handles)
1665 {
1666 struct virgl_context *vctx = virgl_context(ctx);
1667 struct virgl_screen *rs = virgl_screen(vctx->base.screen);
1668
1669 uint32_t shader_handles[PIPE_SHADER_TYPES];
1670 for (uint32_t i = 0; i < PIPE_SHADER_TYPES; ++i)
1671 shader_handles[i] = (uintptr_t)handles[i];
1672 virgl_encode_link_shader(vctx, shader_handles);
1673
1674 /* block until shader linking is finished on host */
1675 if (rs->shader_sync && !unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
1676 struct virgl_winsys *vws = rs->vws;
1677 struct pipe_fence_handle *sync_fence;
1678 virgl_flush_eq(vctx, vctx, &sync_fence);
1679 vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
1680 vws->fence_reference(vws, &sync_fence, NULL);
1681 }
1682 }
1683
virgl_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)1684 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1685 void *priv,
1686 unsigned flags)
1687 {
1688 struct virgl_context *vctx;
1689 struct virgl_screen *rs = virgl_screen(pscreen);
1690 vctx = CALLOC_STRUCT(virgl_context);
1691 const char *host_debug_flagstring;
1692
1693 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1694 if (!vctx->cbuf) {
1695 FREE(vctx);
1696 return NULL;
1697 }
1698
1699 vctx->base.destroy = virgl_context_destroy;
1700 vctx->base.create_surface = virgl_create_surface;
1701 vctx->base.surface_destroy = virgl_surface_destroy;
1702 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1703 vctx->base.create_blend_state = virgl_create_blend_state;
1704 vctx->base.bind_blend_state = virgl_bind_blend_state;
1705 vctx->base.delete_blend_state = virgl_delete_blend_state;
1706 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1707 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1708 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1709 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1710 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1711 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1712
1713 vctx->base.set_viewport_states = virgl_set_viewport_states;
1714 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1715 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1716 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1717 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1718 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1719
1720 vctx->base.set_tess_state = virgl_set_tess_state;
1721 vctx->base.set_patch_vertices = virgl_set_patch_vertices;
1722 vctx->base.create_vs_state = virgl_create_vs_state;
1723 vctx->base.create_tcs_state = virgl_create_tcs_state;
1724 vctx->base.create_tes_state = virgl_create_tes_state;
1725 vctx->base.create_gs_state = virgl_create_gs_state;
1726 vctx->base.create_fs_state = virgl_create_fs_state;
1727
1728 vctx->base.bind_vs_state = virgl_bind_vs_state;
1729 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1730 vctx->base.bind_tes_state = virgl_bind_tes_state;
1731 vctx->base.bind_gs_state = virgl_bind_gs_state;
1732 vctx->base.bind_fs_state = virgl_bind_fs_state;
1733
1734 vctx->base.delete_vs_state = virgl_delete_vs_state;
1735 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1736 vctx->base.delete_tes_state = virgl_delete_tes_state;
1737 vctx->base.delete_gs_state = virgl_delete_gs_state;
1738 vctx->base.delete_fs_state = virgl_delete_fs_state;
1739
1740 vctx->base.create_compute_state = virgl_create_compute_state;
1741 vctx->base.bind_compute_state = virgl_bind_compute_state;
1742 vctx->base.delete_compute_state = virgl_delete_compute_state;
1743 vctx->base.launch_grid = virgl_launch_grid;
1744
1745 vctx->base.clear = virgl_clear;
1746 if (rs->caps.caps.v2.host_feature_check_version >= 21) {
1747 vctx->base.clear_render_target = virgl_clear_render_target;
1748 vctx->base.clear_depth_stencil = virgl_clear_depth_stencil;
1749 } else {
1750 // Stub is required by VL backend
1751 vctx->base.clear_render_target = virgl_clear_render_target_stub;
1752 }
1753 vctx->base.clear_texture = virgl_clear_texture;
1754 vctx->base.draw_vbo = virgl_draw_vbo;
1755 vctx->base.flush = virgl_flush_from_st;
1756 vctx->base.screen = pscreen;
1757 vctx->base.create_sampler_view = virgl_create_sampler_view;
1758 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1759 vctx->base.set_sampler_views = virgl_set_sampler_views;
1760 vctx->base.texture_barrier = virgl_texture_barrier;
1761
1762 vctx->base.create_sampler_state = virgl_create_sampler_state;
1763 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1764 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1765
1766 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1767 vctx->base.set_scissor_states = virgl_set_scissor_states;
1768 vctx->base.set_sample_mask = virgl_set_sample_mask;
1769 vctx->base.set_min_samples = virgl_set_min_samples;
1770 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1771 vctx->base.set_clip_state = virgl_set_clip_state;
1772
1773 vctx->base.set_blend_color = virgl_set_blend_color;
1774
1775 vctx->base.get_sample_position = virgl_get_sample_position;
1776
1777 vctx->base.resource_copy_region = virgl_resource_copy_region;
1778 vctx->base.flush_resource = virgl_flush_resource;
1779 vctx->base.blit = virgl_blit;
1780 vctx->base.create_fence_fd = virgl_create_fence_fd;
1781 vctx->base.fence_server_sync = virgl_fence_server_sync;
1782
1783 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1784 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1785 vctx->base.set_shader_images = virgl_set_shader_images;
1786 vctx->base.memory_barrier = virgl_memory_barrier;
1787 vctx->base.emit_string_marker = virgl_emit_string_marker;
1788
1789 vctx->base.create_video_codec = virgl_video_create_codec;
1790 vctx->base.create_video_buffer = virgl_video_create_buffer;
1791
1792 if (rs->caps.caps.v2.host_feature_check_version >= 7)
1793 vctx->base.link_shader = virgl_link_shader;
1794
1795 virgl_init_context_resource_functions(&vctx->base);
1796 virgl_init_query_functions(vctx);
1797 virgl_init_so_functions(vctx);
1798
1799 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1800 virgl_transfer_queue_init(&vctx->queue, vctx);
1801 vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1802 (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1803
1804 /* Reserve some space for transfers. */
1805 if (vctx->encoded_transfers)
1806 vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1807
1808 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1809 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1810 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1811 if (!vctx->uploader)
1812 goto fail;
1813 vctx->base.stream_uploader = vctx->uploader;
1814 vctx->base.const_uploader = vctx->uploader;
1815
1816 /* We use a special staging buffer as the source of copy transfers. */
1817 if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1818 vctx->encoded_transfers) {
1819 virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
1820 vctx->supports_staging = true;
1821 }
1822
1823 vctx->hw_sub_ctx_id = p_atomic_inc_return(&rs->sub_ctx_id);
1824 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1825
1826 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1827
1828 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1829 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1830 if (host_debug_flagstring)
1831 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1832 }
1833
1834 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1835 virgl_send_tweaks(vctx, rs);
1836
1837 /* On Android, a virgl_screen is generally created first by the HWUI
1838 * service, followed by the application's no-op attempt to do the same with
1839 * eglInitialize(). To retain the ability for apps to set their own driver
1840 * config procedurally right before context creation, we must check the
1841 * envvar again.
1842 */
1843 #if DETECT_OS_ANDROID
1844 if (!rs->shader_sync) {
1845 uint64_t debug_options = debug_get_flags_option("VIRGL_DEBUG",
1846 virgl_debug_options, 0);
1847 rs->shader_sync |= !!(debug_options & VIRGL_DEBUG_SHADER_SYNC);
1848 }
1849 #endif
1850
1851 return &vctx->base;
1852 fail:
1853 virgl_context_destroy(&vctx->base);
1854 return NULL;
1855 }
1856