• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26 
27 #include "util/format/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33 
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virtio-gpu/virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39 
40 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
41 
42 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
43 
44 static const enum virgl_formats virgl_formats_conv_table[PIPE_FORMAT_COUNT] = {
45    CONV_FORMAT(NONE)
46    CONV_FORMAT(B8G8R8A8_UNORM)
47    CONV_FORMAT(B8G8R8X8_UNORM)
48    CONV_FORMAT(A8R8G8B8_UNORM)
49    CONV_FORMAT(X8R8G8B8_UNORM)
50    CONV_FORMAT(B5G5R5A1_UNORM)
51    CONV_FORMAT(B4G4R4A4_UNORM)
52    CONV_FORMAT(B5G6R5_UNORM)
53    CONV_FORMAT(R10G10B10A2_UNORM)
54    CONV_FORMAT(L8_UNORM)
55    CONV_FORMAT(A8_UNORM)
56    CONV_FORMAT(I8_UNORM)
57    CONV_FORMAT(L8A8_UNORM)
58    CONV_FORMAT(L16_UNORM)
59    CONV_FORMAT(Z16_UNORM)
60    CONV_FORMAT(Z32_UNORM)
61    CONV_FORMAT(Z32_FLOAT)
62    CONV_FORMAT(Z24_UNORM_S8_UINT)
63    CONV_FORMAT(S8_UINT_Z24_UNORM)
64    CONV_FORMAT(Z24X8_UNORM)
65    CONV_FORMAT(X8Z24_UNORM)
66    CONV_FORMAT(S8_UINT)
67    CONV_FORMAT(R64_FLOAT)
68    CONV_FORMAT(R64G64_FLOAT)
69    CONV_FORMAT(R64G64B64_FLOAT)
70    CONV_FORMAT(R64G64B64A64_FLOAT)
71    CONV_FORMAT(R32_FLOAT)
72    CONV_FORMAT(R32G32_FLOAT)
73    CONV_FORMAT(R32G32B32_FLOAT)
74    CONV_FORMAT(R32G32B32A32_FLOAT)
75    CONV_FORMAT(R32_UNORM)
76    CONV_FORMAT(R32G32_UNORM)
77    CONV_FORMAT(R32G32B32_UNORM)
78    CONV_FORMAT(R32G32B32A32_UNORM)
79    CONV_FORMAT(R32_USCALED)
80    CONV_FORMAT(R32G32_USCALED)
81    CONV_FORMAT(R32G32B32_USCALED)
82    CONV_FORMAT(R32G32B32A32_USCALED)
83    CONV_FORMAT(R32_SNORM)
84    CONV_FORMAT(R32G32_SNORM)
85    CONV_FORMAT(R32G32B32_SNORM)
86    CONV_FORMAT(R32G32B32A32_SNORM)
87    CONV_FORMAT(R32_SSCALED)
88    CONV_FORMAT(R32G32_SSCALED)
89    CONV_FORMAT(R32G32B32_SSCALED)
90    CONV_FORMAT(R32G32B32A32_SSCALED)
91    CONV_FORMAT(R16_UNORM)
92    CONV_FORMAT(R16G16_UNORM)
93    CONV_FORMAT(R16G16B16_UNORM)
94    CONV_FORMAT(R16G16B16A16_UNORM)
95    CONV_FORMAT(R16_USCALED)
96    CONV_FORMAT(R16G16_USCALED)
97    CONV_FORMAT(R16G16B16_USCALED)
98    CONV_FORMAT(R16G16B16A16_USCALED)
99    CONV_FORMAT(R16_SNORM)
100    CONV_FORMAT(R16G16_SNORM)
101    CONV_FORMAT(R16G16B16_SNORM)
102    CONV_FORMAT(R16G16B16A16_SNORM)
103    CONV_FORMAT(R16_SSCALED)
104    CONV_FORMAT(R16G16_SSCALED)
105    CONV_FORMAT(R16G16B16_SSCALED)
106    CONV_FORMAT(R16G16B16A16_SSCALED)
107    CONV_FORMAT(R8_UNORM)
108    CONV_FORMAT(R8G8_UNORM)
109    CONV_FORMAT(R8G8B8_UNORM)
110    CONV_FORMAT(R8G8B8A8_UNORM)
111    CONV_FORMAT(R8_USCALED)
112    CONV_FORMAT(R8G8_USCALED)
113    CONV_FORMAT(R8G8B8_USCALED)
114    CONV_FORMAT(R8G8B8A8_USCALED)
115    CONV_FORMAT(R8_SNORM)
116    CONV_FORMAT(R8G8_SNORM)
117    CONV_FORMAT(R8G8B8_SNORM)
118    CONV_FORMAT(R8G8B8A8_SNORM)
119    CONV_FORMAT(R8_SSCALED)
120    CONV_FORMAT(R8G8_SSCALED)
121    CONV_FORMAT(R8G8B8_SSCALED)
122    CONV_FORMAT(R8G8B8A8_SSCALED)
123    CONV_FORMAT(R16_FLOAT)
124    CONV_FORMAT(R16G16_FLOAT)
125    CONV_FORMAT(R16G16B16_FLOAT)
126    CONV_FORMAT(R16G16B16A16_FLOAT)
127    CONV_FORMAT(L8_SRGB)
128    CONV_FORMAT(L8A8_SRGB)
129    CONV_FORMAT(R8G8B8_SRGB)
130    CONV_FORMAT(A8B8G8R8_SRGB)
131    CONV_FORMAT(X8B8G8R8_SRGB)
132    CONV_FORMAT(B8G8R8A8_SRGB)
133    CONV_FORMAT(B8G8R8X8_SRGB)
134    CONV_FORMAT(A8R8G8B8_SRGB)
135    CONV_FORMAT(X8R8G8B8_SRGB)
136    CONV_FORMAT(R8G8B8A8_SRGB)
137    CONV_FORMAT(DXT1_RGB)
138    CONV_FORMAT(DXT1_RGBA)
139    CONV_FORMAT(DXT3_RGBA)
140    CONV_FORMAT(DXT5_RGBA)
141    CONV_FORMAT(DXT1_SRGB)
142    CONV_FORMAT(DXT1_SRGBA)
143    CONV_FORMAT(DXT3_SRGBA)
144    CONV_FORMAT(DXT5_SRGBA)
145    CONV_FORMAT(RGTC1_UNORM)
146    CONV_FORMAT(RGTC1_SNORM)
147    CONV_FORMAT(RGTC2_UNORM)
148    CONV_FORMAT(RGTC2_SNORM)
149    CONV_FORMAT(A8B8G8R8_UNORM)
150    CONV_FORMAT(B5G5R5X1_UNORM)
151    CONV_FORMAT(R10G10B10A2_USCALED)
152    CONV_FORMAT(R11G11B10_FLOAT)
153    CONV_FORMAT(R9G9B9E5_FLOAT)
154    CONV_FORMAT(Z32_FLOAT_S8X24_UINT)
155    CONV_FORMAT(B10G10R10A2_UNORM)
156    CONV_FORMAT(R8G8B8X8_UNORM)
157    CONV_FORMAT(B4G4R4X4_UNORM)
158    CONV_FORMAT(X24S8_UINT)
159    CONV_FORMAT(S8X24_UINT)
160    CONV_FORMAT(X32_S8X24_UINT)
161    CONV_FORMAT(B2G3R3_UNORM)
162    CONV_FORMAT(L16A16_UNORM)
163    CONV_FORMAT(A16_UNORM)
164    CONV_FORMAT(I16_UNORM)
165    CONV_FORMAT(LATC1_UNORM)
166    CONV_FORMAT(LATC1_SNORM)
167    CONV_FORMAT(LATC2_UNORM)
168    CONV_FORMAT(LATC2_SNORM)
169    CONV_FORMAT(A8_SNORM)
170    CONV_FORMAT(L8_SNORM)
171    CONV_FORMAT(L8A8_SNORM)
172    CONV_FORMAT(A16_SNORM)
173    CONV_FORMAT(L16_SNORM)
174    CONV_FORMAT(L16A16_SNORM)
175    CONV_FORMAT(A16_FLOAT)
176    CONV_FORMAT(I16_FLOAT)
177    CONV_FORMAT(L16_FLOAT)
178    CONV_FORMAT(L16A16_FLOAT)
179    CONV_FORMAT(A32_FLOAT)
180    CONV_FORMAT(I32_FLOAT)
181    CONV_FORMAT(L32_FLOAT)
182    CONV_FORMAT(L32A32_FLOAT)
183    CONV_FORMAT(YV12)
184    CONV_FORMAT(YV16)
185    CONV_FORMAT(IYUV)
186    CONV_FORMAT(NV12)
187    CONV_FORMAT(NV21)
188    CONV_FORMAT(R8_UINT)
189    CONV_FORMAT(R8G8_UINT)
190    CONV_FORMAT(R8G8B8_UINT)
191    CONV_FORMAT(R8G8B8A8_UINT)
192    CONV_FORMAT(R8_SINT)
193    CONV_FORMAT(R8G8_SINT)
194    CONV_FORMAT(R8G8B8_SINT)
195    CONV_FORMAT(R8G8B8A8_SINT)
196    CONV_FORMAT(R16_UINT)
197    CONV_FORMAT(R16G16_UINT)
198    CONV_FORMAT(R16G16B16_UINT)
199    CONV_FORMAT(R16G16B16A16_UINT)
200    CONV_FORMAT(R16_SINT)
201    CONV_FORMAT(R16G16_SINT)
202    CONV_FORMAT(R16G16B16_SINT)
203    CONV_FORMAT(R16G16B16A16_SINT)
204    CONV_FORMAT(R32_UINT)
205    CONV_FORMAT(R32G32_UINT)
206    CONV_FORMAT(R32G32B32_UINT)
207    CONV_FORMAT(R32G32B32A32_UINT)
208    CONV_FORMAT(R32_SINT)
209    CONV_FORMAT(R32G32_SINT)
210    CONV_FORMAT(R32G32B32_SINT)
211    CONV_FORMAT(R32G32B32A32_SINT)
212    CONV_FORMAT(A8_UINT)
213    CONV_FORMAT(I8_UINT)
214    CONV_FORMAT(L8_UINT)
215    CONV_FORMAT(L8A8_UINT)
216    CONV_FORMAT(A8_SINT)
217    CONV_FORMAT(L8_SINT)
218    CONV_FORMAT(I8_SINT)
219    CONV_FORMAT(L8A8_SINT)
220    CONV_FORMAT(A16_UINT)
221    CONV_FORMAT(I16_UINT)
222    CONV_FORMAT(L16_UINT)
223    CONV_FORMAT(L16A16_UINT)
224    CONV_FORMAT(A16_SINT)
225    CONV_FORMAT(I16_SINT)
226    CONV_FORMAT(L16_SINT)
227    CONV_FORMAT(L16A16_SINT)
228    CONV_FORMAT(A32_UINT)
229    CONV_FORMAT(I32_UINT)
230    CONV_FORMAT(L32_UINT)
231    CONV_FORMAT(L32A32_UINT)
232    CONV_FORMAT(A32_SINT)
233    CONV_FORMAT(I32_SINT)
234    CONV_FORMAT(L32_SINT)
235    CONV_FORMAT(L32A32_SINT)
236    CONV_FORMAT(R10G10B10A2_SSCALED)
237    CONV_FORMAT(R10G10B10A2_SNORM)
238    CONV_FORMAT(B10G10R10A2_SNORM)
239    CONV_FORMAT(B10G10R10A2_UINT)
240    CONV_FORMAT(R8G8B8X8_SNORM)
241    CONV_FORMAT(R8G8B8X8_SRGB)
242    CONV_FORMAT(R8G8B8X8_UINT)
243    CONV_FORMAT(R8G8B8X8_SINT)
244    CONV_FORMAT(B10G10R10X2_UNORM)
245    CONV_FORMAT(R16G16B16X16_UNORM)
246    CONV_FORMAT(R16G16B16X16_SNORM)
247    CONV_FORMAT(R16G16B16X16_FLOAT)
248    CONV_FORMAT(R16G16B16X16_UINT)
249    CONV_FORMAT(R16G16B16X16_SINT)
250    CONV_FORMAT(R32G32B32X32_FLOAT)
251    CONV_FORMAT(R32G32B32X32_UINT)
252    CONV_FORMAT(R32G32B32X32_SINT)
253    CONV_FORMAT(R10G10B10A2_UINT)
254    CONV_FORMAT(BPTC_RGBA_UNORM)
255    CONV_FORMAT(BPTC_SRGBA)
256    CONV_FORMAT(BPTC_RGB_FLOAT)
257    CONV_FORMAT(BPTC_RGB_UFLOAT)
258    CONV_FORMAT(R10G10B10X2_UNORM)
259    CONV_FORMAT(A4B4G4R4_UNORM)
260    CONV_FORMAT(R8_SRGB)
261    CONV_FORMAT(R8G8_SRGB)
262    CONV_FORMAT(ETC1_RGB8)
263    CONV_FORMAT(ETC2_RGB8)
264    CONV_FORMAT(ETC2_SRGB8)
265    CONV_FORMAT(ETC2_RGB8A1)
266    CONV_FORMAT(ETC2_SRGB8A1)
267    CONV_FORMAT(ETC2_RGBA8)
268    CONV_FORMAT(ETC2_SRGBA8)
269    CONV_FORMAT(ETC2_R11_UNORM)
270    CONV_FORMAT(ETC2_R11_SNORM)
271    CONV_FORMAT(ETC2_RG11_UNORM)
272    CONV_FORMAT(ETC2_RG11_SNORM)
273    CONV_FORMAT(ASTC_4x4)
274    CONV_FORMAT(ASTC_5x4)
275    CONV_FORMAT(ASTC_5x5)
276    CONV_FORMAT(ASTC_6x5)
277    CONV_FORMAT(ASTC_6x6)
278    CONV_FORMAT(ASTC_8x5)
279    CONV_FORMAT(ASTC_8x6)
280    CONV_FORMAT(ASTC_8x8)
281    CONV_FORMAT(ASTC_10x5)
282    CONV_FORMAT(ASTC_10x6)
283    CONV_FORMAT(ASTC_10x8)
284    CONV_FORMAT(ASTC_10x10)
285    CONV_FORMAT(ASTC_12x10)
286    CONV_FORMAT(ASTC_12x12)
287    CONV_FORMAT(ASTC_4x4_SRGB)
288    CONV_FORMAT(ASTC_5x4_SRGB)
289    CONV_FORMAT(ASTC_5x5_SRGB)
290    CONV_FORMAT(ASTC_6x5_SRGB)
291    CONV_FORMAT(ASTC_6x6_SRGB)
292    CONV_FORMAT(ASTC_8x5_SRGB)
293    CONV_FORMAT(ASTC_8x6_SRGB)
294    CONV_FORMAT(ASTC_8x8_SRGB )
295    CONV_FORMAT(ASTC_10x5_SRGB)
296    CONV_FORMAT(ASTC_10x6_SRGB)
297    CONV_FORMAT(ASTC_10x8_SRGB)
298    CONV_FORMAT(ASTC_10x10_SRGB)
299    CONV_FORMAT(ASTC_12x10_SRGB)
300    CONV_FORMAT(ASTC_12x12_SRGB)
301 };
302 
pipe_to_virgl_format(enum pipe_format format)303 enum virgl_formats pipe_to_virgl_format(enum pipe_format format)
304 {
305    enum virgl_formats vformat = virgl_formats_conv_table[format];
306    if (format != PIPE_FORMAT_NONE && !vformat)
307       debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format));
308    return vformat;
309 }
310 
virgl_encoder_write_cmd_dword(struct virgl_context * ctx,uint32_t dword)311 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
312                                         uint32_t dword)
313 {
314    int len = (dword >> 16);
315 
316    if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
317       ctx->base.flush(&ctx->base, NULL, 0);
318 
319    virgl_encoder_write_dword(ctx->cbuf, dword);
320    return 0;
321 }
322 
virgl_encoder_emit_resource(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_resource * res)323 static void virgl_encoder_emit_resource(struct virgl_screen *vs,
324                                         struct virgl_cmd_buf *buf,
325                                         struct virgl_resource *res)
326 {
327    struct virgl_winsys *vws = vs->vws;
328    if (res && res->hw_res)
329       vws->emit_res(vws, buf, res->hw_res, TRUE);
330    else {
331       virgl_encoder_write_dword(buf, 0);
332    }
333 }
334 
virgl_encoder_write_res(struct virgl_context * ctx,struct virgl_resource * res)335 static void virgl_encoder_write_res(struct virgl_context *ctx,
336                                     struct virgl_resource *res)
337 {
338    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
339    virgl_encoder_emit_resource(vs, ctx->cbuf, res);
340 }
341 
virgl_encode_bind_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)342 int virgl_encode_bind_object(struct virgl_context *ctx,
343                             uint32_t handle, uint32_t object)
344 {
345    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
346    virgl_encoder_write_dword(ctx->cbuf, handle);
347    return 0;
348 }
349 
virgl_encode_delete_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)350 int virgl_encode_delete_object(struct virgl_context *ctx,
351                               uint32_t handle, uint32_t object)
352 {
353    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
354    virgl_encoder_write_dword(ctx->cbuf, handle);
355    return 0;
356 }
357 
virgl_encode_blend_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_blend_state * blend_state)358 int virgl_encode_blend_state(struct virgl_context *ctx,
359                             uint32_t handle,
360                             const struct pipe_blend_state *blend_state)
361 {
362    uint32_t tmp;
363    int i;
364 
365    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
366    virgl_encoder_write_dword(ctx->cbuf, handle);
367 
368    tmp =
369       VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
370       VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
371       VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
372       VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
373       VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
374 
375    virgl_encoder_write_dword(ctx->cbuf, tmp);
376 
377    tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
378    virgl_encoder_write_dword(ctx->cbuf, tmp);
379 
380    for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
381       /* We use alpha src factor to pass the advanced blend equation value
382        * to the host. By doing so, we don't have to change the protocol.
383        */
384       uint32_t alpha = (i == 0 && blend_state->advanced_blend_func)
385                         ? blend_state->advanced_blend_func
386                         : blend_state->rt[i].alpha_src_factor;
387       tmp =
388          VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
389          VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
390          VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
391          VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
392          VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
393          VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(alpha) |
394          VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
395          VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
396       virgl_encoder_write_dword(ctx->cbuf, tmp);
397    }
398    return 0;
399 }
400 
virgl_encode_dsa_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_depth_stencil_alpha_state * dsa_state)401 int virgl_encode_dsa_state(struct virgl_context *ctx,
402                           uint32_t handle,
403                           const struct pipe_depth_stencil_alpha_state *dsa_state)
404 {
405    uint32_t tmp;
406    int i;
407    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
408    virgl_encoder_write_dword(ctx->cbuf, handle);
409 
410    tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth_enabled) |
411       VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth_writemask) |
412       VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth_func) |
413       VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha_enabled) |
414       VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha_func);
415    virgl_encoder_write_dword(ctx->cbuf, tmp);
416 
417    for (i = 0; i < 2; i++) {
418       tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
419          VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
420          VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
421          VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
422          VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
423          VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
424          VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
425       virgl_encoder_write_dword(ctx->cbuf, tmp);
426    }
427 
428    virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha_ref_value));
429    return 0;
430 }
virgl_encode_rasterizer_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_rasterizer_state * state)431 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
432                                   uint32_t handle,
433                                   const struct pipe_rasterizer_state *state)
434 {
435    uint32_t tmp;
436 
437    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
438    virgl_encoder_write_dword(ctx->cbuf, handle);
439 
440    tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
441       VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
442       VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
443       VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
444       VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
445       VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
446       VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
447       VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
448       VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
449       VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
450       VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
451       VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
452       VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
453       VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
454       VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
455       VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
456       VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
457       VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
458       VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
459       VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
460       VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
461       VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
462       VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
463       VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
464       VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
465       VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
466       VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
467       VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
468       VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
469 
470    virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
471    virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
472    virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
473    tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
474       VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
475       VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
476    virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
477    virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
478    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
479    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
480    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
481    return 0;
482 }
483 
virgl_emit_shader_header(struct virgl_context * ctx,uint32_t handle,uint32_t len,uint32_t type,uint32_t offlen,uint32_t num_tokens)484 static void virgl_emit_shader_header(struct virgl_context *ctx,
485                                      uint32_t handle, uint32_t len,
486                                      uint32_t type, uint32_t offlen,
487                                      uint32_t num_tokens)
488 {
489    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
490    virgl_encoder_write_dword(ctx->cbuf, handle);
491    virgl_encoder_write_dword(ctx->cbuf, type);
492    virgl_encoder_write_dword(ctx->cbuf, offlen);
493    virgl_encoder_write_dword(ctx->cbuf, num_tokens);
494 }
495 
virgl_emit_shader_streamout(struct virgl_context * ctx,const struct pipe_stream_output_info * so_info)496 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
497                                         const struct pipe_stream_output_info *so_info)
498 {
499    int num_outputs = 0;
500    int i;
501    uint32_t tmp;
502 
503    if (so_info)
504       num_outputs = so_info->num_outputs;
505 
506    virgl_encoder_write_dword(ctx->cbuf, num_outputs);
507    if (num_outputs) {
508       for (i = 0; i < 4; i++)
509          virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
510 
511       for (i = 0; i < so_info->num_outputs; i++) {
512          tmp =
513            VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
514            VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
515            VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
516            VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
517            VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
518          virgl_encoder_write_dword(ctx->cbuf, tmp);
519          virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
520       }
521    }
522 }
523 
virgl_encode_shader_state(struct virgl_context * ctx,uint32_t handle,uint32_t type,const struct pipe_stream_output_info * so_info,uint32_t cs_req_local_mem,const struct tgsi_token * tokens)524 int virgl_encode_shader_state(struct virgl_context *ctx,
525                               uint32_t handle,
526                               uint32_t type,
527                               const struct pipe_stream_output_info *so_info,
528                               uint32_t cs_req_local_mem,
529                               const struct tgsi_token *tokens)
530 {
531    char *str, *sptr;
532    uint32_t shader_len, len;
533    bool bret;
534    int num_tokens = tgsi_num_tokens(tokens);
535    int str_total_size = 65536;
536    int retry_size = 1;
537    uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
538    bool first_pass;
539    str = CALLOC(1, str_total_size);
540    if (!str)
541       return -1;
542 
543    do {
544       int old_size;
545 
546       bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
547       if (bret == false) {
548          if (virgl_debug & VIRGL_DEBUG_VERBOSE)
549             debug_printf("Failed to translate shader in available space - trying again\n");
550          old_size = str_total_size;
551          str_total_size = 65536 * retry_size;
552          retry_size *= 2;
553          str = REALLOC(str, old_size, str_total_size);
554          if (!str)
555             return -1;
556       }
557    } while (bret == false && retry_size < 1024);
558 
559    if (bret == false)
560       return -1;
561 
562    if (virgl_debug & VIRGL_DEBUG_TGSI)
563       debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
564 
565    /* virglrenderer before addbd9c5058dcc9d561b20ab747aed58c53499da mis-counts
566     * the tokens needed for a BARRIER, so ask it to allocate some more space.
567     */
568    const char *barrier = str;
569    while ((barrier = strstr(barrier + 1, "BARRIER")))
570       num_tokens++;
571 
572    shader_len = strlen(str) + 1;
573 
574    left_bytes = shader_len;
575 
576    base_hdr_size = 5;
577    strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
578    first_pass = true;
579    sptr = str;
580    while (left_bytes) {
581       uint32_t length, offlen;
582       int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
583       if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
584          ctx->base.flush(&ctx->base, NULL, 0);
585 
586       thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
587 
588       length = MIN2(thispass, left_bytes);
589       len = ((length + 3) / 4) + hdr_len;
590 
591       if (first_pass)
592          offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
593       else
594          offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
595 
596       virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
597 
598       if (type == PIPE_SHADER_COMPUTE)
599          virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
600       else
601          virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
602 
603       virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
604 
605       sptr += length;
606       first_pass = false;
607       left_bytes -= length;
608    }
609 
610    FREE(str);
611    return 0;
612 }
613 
614 
virgl_encode_clear(struct virgl_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)615 int virgl_encode_clear(struct virgl_context *ctx,
616                       unsigned buffers,
617                       const union pipe_color_union *color,
618                       double depth, unsigned stencil)
619 {
620    int i;
621    uint64_t qword;
622 
623    STATIC_ASSERT(sizeof(qword) == sizeof(depth));
624    memcpy(&qword, &depth, sizeof(qword));
625 
626    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
627    virgl_encoder_write_dword(ctx->cbuf, buffers);
628    for (i = 0; i < 4; i++)
629       virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
630    virgl_encoder_write_qword(ctx->cbuf, qword);
631    virgl_encoder_write_dword(ctx->cbuf, stencil);
632    return 0;
633 }
634 
virgl_encode_clear_texture(struct virgl_context * ctx,struct virgl_resource * res,unsigned int level,const struct pipe_box * box,const void * data)635 int virgl_encode_clear_texture(struct virgl_context *ctx,
636                                struct virgl_resource *res,
637                                unsigned int level,
638                                const struct pipe_box *box,
639                                const void *data)
640 {
641    const struct util_format_description *desc = util_format_description(res->b.format);
642    unsigned block_bits = desc->block.bits;
643    uint32_t arr[4] = {0};
644    /* The spec describe <data> as a pointer to an array of between one
645     * and four components of texel data that will be used as the source
646     * for the constant fill value.
647     * Here, we are just copying the memory into <arr>. We do not try to
648     * re-create the data array. The host part will take care of interpreting
649     * the memory and applying the correct format to the clear call.
650     */
651    memcpy(&arr, data, block_bits / 8);
652 
653    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE, 0, VIRGL_CLEAR_TEXTURE_SIZE));
654    virgl_encoder_write_res(ctx, res);
655    virgl_encoder_write_dword(ctx->cbuf, level);
656    virgl_encoder_write_dword(ctx->cbuf, box->x);
657    virgl_encoder_write_dword(ctx->cbuf, box->y);
658    virgl_encoder_write_dword(ctx->cbuf, box->z);
659    virgl_encoder_write_dword(ctx->cbuf, box->width);
660    virgl_encoder_write_dword(ctx->cbuf, box->height);
661    virgl_encoder_write_dword(ctx->cbuf, box->depth);
662    for (unsigned i = 0; i < 4; i++)
663       virgl_encoder_write_dword(ctx->cbuf, arr[i]);
664    return 0;
665 }
666 
virgl_encoder_set_framebuffer_state(struct virgl_context * ctx,const struct pipe_framebuffer_state * state)667 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
668                                        const struct pipe_framebuffer_state *state)
669 {
670    struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
671    int i;
672 
673    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
674    virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
675    virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
676    for (i = 0; i < state->nr_cbufs; i++) {
677       struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
678       virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
679    }
680 
681    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
682    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
683       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
684       virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
685       virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
686    }
687    return 0;
688 }
689 
virgl_encoder_set_viewport_states(struct virgl_context * ctx,int start_slot,int num_viewports,const struct pipe_viewport_state * states)690 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
691                                       int start_slot,
692                                       int num_viewports,
693                                       const struct pipe_viewport_state *states)
694 {
695    int i,v;
696    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
697    virgl_encoder_write_dword(ctx->cbuf, start_slot);
698    for (v = 0; v < num_viewports; v++) {
699       for (i = 0; i < 3; i++)
700          virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
701       for (i = 0; i < 3; i++)
702          virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
703    }
704    return 0;
705 }
706 
virgl_encoder_create_vertex_elements(struct virgl_context * ctx,uint32_t handle,unsigned num_elements,const struct pipe_vertex_element * element)707 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
708                                         uint32_t handle,
709                                         unsigned num_elements,
710                                         const struct pipe_vertex_element *element)
711 {
712    int i;
713    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
714    virgl_encoder_write_dword(ctx->cbuf, handle);
715    for (i = 0; i < num_elements; i++) {
716       virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
717       virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
718       virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
719       virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(element[i].src_format));
720    }
721    return 0;
722 }
723 
virgl_encoder_set_vertex_buffers(struct virgl_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)724 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
725                                     unsigned num_buffers,
726                                     const struct pipe_vertex_buffer *buffers)
727 {
728    int i;
729    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
730    for (i = 0; i < num_buffers; i++) {
731       struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
732       virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
733       virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
734       virgl_encoder_write_res(ctx, res);
735    }
736    return 0;
737 }
738 
virgl_encoder_set_index_buffer(struct virgl_context * ctx,const struct virgl_indexbuf * ib)739 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
740                                   const struct virgl_indexbuf *ib)
741 {
742    int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
743    struct virgl_resource *res = NULL;
744    if (ib)
745       res = virgl_resource(ib->buffer);
746 
747    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
748    virgl_encoder_write_res(ctx, res);
749    if (ib) {
750       virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
751       virgl_encoder_write_dword(ctx->cbuf, ib->offset);
752    }
753    return 0;
754 }
755 
virgl_encoder_draw_vbo(struct virgl_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw)756 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
757                            const struct pipe_draw_info *info,
758                            unsigned drawid_offset,
759                            const struct pipe_draw_indirect_info *indirect,
760                            const struct pipe_draw_start_count_bias *draw)
761 {
762    uint32_t length = VIRGL_DRAW_VBO_SIZE;
763    if (info->mode == PIPE_PRIM_PATCHES)
764       length = VIRGL_DRAW_VBO_SIZE_TESS;
765    if (indirect && indirect->buffer)
766       length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
767    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
768    virgl_encoder_write_dword(ctx->cbuf, draw->start);
769    virgl_encoder_write_dword(ctx->cbuf, draw->count);
770    virgl_encoder_write_dword(ctx->cbuf, info->mode);
771    virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
772    virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
773    virgl_encoder_write_dword(ctx->cbuf, info->index_size ? draw->index_bias : 0);
774    virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
775    virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
776    virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart ? info->restart_index : 0);
777    virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->min_index : 0);
778    virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->max_index : ~0);
779    if (indirect && indirect->count_from_stream_output)
780       virgl_encoder_write_dword(ctx->cbuf, indirect->count_from_stream_output->buffer_size);
781    else
782       virgl_encoder_write_dword(ctx->cbuf, 0);
783    if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
784       virgl_encoder_write_dword(ctx->cbuf, ctx->patch_vertices); /* vertices per patch */
785       virgl_encoder_write_dword(ctx->cbuf, drawid_offset); /* drawid */
786    }
787    if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
788       virgl_encoder_write_res(ctx, virgl_resource(indirect->buffer));
789       virgl_encoder_write_dword(ctx->cbuf, indirect->offset);
790       virgl_encoder_write_dword(ctx->cbuf, indirect->stride); /* indirect stride */
791       virgl_encoder_write_dword(ctx->cbuf, indirect->draw_count); /* indirect draw count */
792       virgl_encoder_write_dword(ctx->cbuf, indirect->indirect_draw_count_offset); /* indirect draw count offset */
793       if (indirect->indirect_draw_count)
794          virgl_encoder_write_res(ctx, virgl_resource(indirect->indirect_draw_count));
795       else
796          virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
797    }
798    return 0;
799 }
800 
virgl_encoder_create_surface_common(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)801 static int virgl_encoder_create_surface_common(struct virgl_context *ctx,
802                                                uint32_t handle,
803                                                struct virgl_resource *res,
804                                                const struct pipe_surface *templat)
805 {
806    virgl_encoder_write_dword(ctx->cbuf, handle);
807    virgl_encoder_write_res(ctx, res);
808    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(templat->format));
809 
810    assert(templat->texture->target != PIPE_BUFFER);
811    virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
812    virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
813 
814    return 0;
815 }
816 
virgl_encoder_create_surface(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)817 int virgl_encoder_create_surface(struct virgl_context *ctx,
818                                  uint32_t handle,
819                                  struct virgl_resource *res,
820                                  const struct pipe_surface *templat)
821 {
822    if (templat->nr_samples > 0) {
823       ASSERTED struct virgl_screen *rs = virgl_screen(ctx->base.screen);
824       assert(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_IMPLICIT_MSAA);
825 
826       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_MSAA_SURFACE, VIRGL_OBJ_MSAA_SURFACE_SIZE));
827       virgl_encoder_create_surface_common(ctx, handle, res, templat);
828       virgl_encoder_write_dword(ctx->cbuf, templat->nr_samples);
829    } else {
830       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
831       virgl_encoder_create_surface_common(ctx, handle, res, templat);
832    }
833 
834    return 0;
835 }
836 
virgl_encoder_create_so_target(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,unsigned buffer_offset,unsigned buffer_size)837 int virgl_encoder_create_so_target(struct virgl_context *ctx,
838                                   uint32_t handle,
839                                   struct virgl_resource *res,
840                                   unsigned buffer_offset,
841                                   unsigned buffer_size)
842 {
843    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
844    virgl_encoder_write_dword(ctx->cbuf, handle);
845    virgl_encoder_write_res(ctx, res);
846    virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
847    virgl_encoder_write_dword(ctx->cbuf, buffer_size);
848    return 0;
849 }
850 
851 enum virgl_transfer3d_encode_stride {
852    /* The stride and layer_stride are explicitly specified in the command. */
853    virgl_transfer3d_explicit_stride,
854    /* The stride and layer_stride are inferred by the host. In this case, the
855     * host will use the image stride and layer_stride for the specified level.
856     */
857    virgl_transfer3d_host_inferred_stride,
858 };
859 
virgl_encoder_transfer3d_common(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * xfer,enum virgl_transfer3d_encode_stride encode_stride)860 static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
861                                             struct virgl_cmd_buf *buf,
862                                             struct virgl_transfer *xfer,
863                                             enum virgl_transfer3d_encode_stride encode_stride)
864 
865 {
866    struct pipe_transfer *transfer = &xfer->base;
867    unsigned stride;
868    unsigned layer_stride;
869 
870    if (encode_stride == virgl_transfer3d_explicit_stride) {
871       stride = transfer->stride;
872       layer_stride = transfer->layer_stride;
873    } else if (encode_stride == virgl_transfer3d_host_inferred_stride) {
874       stride = 0;
875       layer_stride = 0;
876    } else {
877       assert(!"Invalid virgl_transfer3d_encode_stride value");
878    }
879 
880    /* We cannot use virgl_encoder_emit_resource with transfer->resource here
881     * because transfer->resource might have a different virgl_hw_res than what
882     * this transfer targets, which is saved in xfer->hw_res.
883     */
884    vs->vws->emit_res(vs->vws, buf, xfer->hw_res, TRUE);
885    virgl_encoder_write_dword(buf, transfer->level);
886    virgl_encoder_write_dword(buf, transfer->usage);
887    virgl_encoder_write_dword(buf, stride);
888    virgl_encoder_write_dword(buf, layer_stride);
889    virgl_encoder_write_dword(buf, transfer->box.x);
890    virgl_encoder_write_dword(buf, transfer->box.y);
891    virgl_encoder_write_dword(buf, transfer->box.z);
892    virgl_encoder_write_dword(buf, transfer->box.width);
893    virgl_encoder_write_dword(buf, transfer->box.height);
894    virgl_encoder_write_dword(buf, transfer->box.depth);
895 }
896 
virgl_encoder_inline_write(struct virgl_context * ctx,struct virgl_resource * res,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,unsigned layer_stride)897 int virgl_encoder_inline_write(struct virgl_context *ctx,
898                               struct virgl_resource *res,
899                               unsigned level, unsigned usage,
900                               const struct pipe_box *box,
901                               const void *data, unsigned stride,
902                               unsigned layer_stride)
903 {
904    uint32_t size = (stride ? stride : box->width) * box->height;
905    uint32_t length, thispass, left_bytes;
906    struct virgl_transfer transfer;
907    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
908 
909    transfer.base.resource = &res->b;
910    transfer.hw_res = res->hw_res;
911    transfer.base.level = level;
912    transfer.base.usage = usage;
913    transfer.base.box = *box;
914 
915    length = 11 + (size + 3) / 4;
916    if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) {
917       if (box->height > 1 || box->depth > 1) {
918          debug_printf("inline transfer failed due to multi dimensions and too large\n");
919          assert(0);
920       }
921    }
922 
923    left_bytes = size;
924    while (left_bytes) {
925       if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS)
926          ctx->base.flush(&ctx->base, NULL, 0);
927 
928       thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4;
929 
930       length = MIN2(thispass, left_bytes);
931 
932       transfer.base.box.width = length;
933       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
934       virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer,
935                                       virgl_transfer3d_host_inferred_stride);
936       virgl_encoder_write_block(ctx->cbuf, data, length);
937       left_bytes -= length;
938       transfer.base.box.x += length;
939       data += length;
940    }
941    return 0;
942 }
943 
virgl_encoder_flush_frontbuffer(struct virgl_context * ctx,struct virgl_resource * res)944 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
945                                    struct virgl_resource *res)
946 {
947 //   virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
948 //   virgl_encoder_write_dword(ctx->cbuf, res_handle);
949    return 0;
950 }
951 
virgl_encode_sampler_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_sampler_state * state)952 int virgl_encode_sampler_state(struct virgl_context *ctx,
953                               uint32_t handle,
954                               const struct pipe_sampler_state *state)
955 {
956    uint32_t tmp;
957    int i;
958    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
959    virgl_encoder_write_dword(ctx->cbuf, handle);
960 
961    tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
962       VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
963       VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
964       VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
965       VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
966       VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
967       VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
968       VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
969       VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map) |
970       VIRGL_OBJ_SAMPLE_STATE_S0_MAX_ANISOTROPY((int)(state->max_anisotropy));
971 
972    virgl_encoder_write_dword(ctx->cbuf, tmp);
973    virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
974    virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
975    virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
976    for (i = 0; i <  4; i++)
977       virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
978    return 0;
979 }
980 
981 
virgl_encode_sampler_view(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_sampler_view * state)982 int virgl_encode_sampler_view(struct virgl_context *ctx,
983                              uint32_t handle,
984                              struct virgl_resource *res,
985                              const struct pipe_sampler_view *state)
986 {
987    unsigned elem_size = util_format_get_blocksize(state->format);
988    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
989    uint32_t tmp;
990    uint32_t dword_fmt_target = pipe_to_virgl_format(state->format);
991    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
992    virgl_encoder_write_dword(ctx->cbuf, handle);
993    virgl_encoder_write_res(ctx, res);
994    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
995      dword_fmt_target |= (state->target << 24);
996    virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
997    if (res->b.target == PIPE_BUFFER) {
998       virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
999       virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
1000    } else {
1001       if (res->metadata.plane) {
1002          assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
1003          virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
1004       } else {
1005          virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
1006       }
1007       virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
1008    }
1009    tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
1010       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
1011       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
1012       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
1013    virgl_encoder_write_dword(ctx->cbuf, tmp);
1014    return 0;
1015 }
1016 
virgl_encode_set_sampler_views(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_views,struct virgl_sampler_view ** views)1017 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
1018                                   uint32_t shader_type,
1019                                   uint32_t start_slot,
1020                                   uint32_t num_views,
1021                                   struct virgl_sampler_view **views)
1022 {
1023    int i;
1024    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
1025    virgl_encoder_write_dword(ctx->cbuf, shader_type);
1026    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1027    for (i = 0; i < num_views; i++) {
1028       uint32_t handle = views[i] ? views[i]->handle : 0;
1029       virgl_encoder_write_dword(ctx->cbuf, handle);
1030    }
1031    return 0;
1032 }
1033 
virgl_encode_bind_sampler_states(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_handles,uint32_t * handles)1034 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
1035                                     uint32_t shader_type,
1036                                     uint32_t start_slot,
1037                                     uint32_t num_handles,
1038                                     uint32_t *handles)
1039 {
1040    int i;
1041    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
1042    virgl_encoder_write_dword(ctx->cbuf, shader_type);
1043    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1044    for (i = 0; i < num_handles; i++)
1045       virgl_encoder_write_dword(ctx->cbuf, handles[i]);
1046    return 0;
1047 }
1048 
virgl_encoder_write_constant_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t size,const void * data)1049 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
1050                                        uint32_t shader,
1051                                        uint32_t index,
1052                                        uint32_t size,
1053                                        const void *data)
1054 {
1055    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
1056    virgl_encoder_write_dword(ctx->cbuf, shader);
1057    virgl_encoder_write_dword(ctx->cbuf, index);
1058    if (data)
1059       virgl_encoder_write_block(ctx->cbuf, data, size * 4);
1060    return 0;
1061 }
1062 
virgl_encoder_set_uniform_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t offset,uint32_t length,struct virgl_resource * res)1063 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
1064                                      uint32_t shader,
1065                                      uint32_t index,
1066                                      uint32_t offset,
1067                                      uint32_t length,
1068                                      struct virgl_resource *res)
1069 {
1070    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
1071    virgl_encoder_write_dword(ctx->cbuf, shader);
1072    virgl_encoder_write_dword(ctx->cbuf, index);
1073    virgl_encoder_write_dword(ctx->cbuf, offset);
1074    virgl_encoder_write_dword(ctx->cbuf, length);
1075    virgl_encoder_write_res(ctx, res);
1076    return 0;
1077 }
1078 
1079 
virgl_encoder_set_stencil_ref(struct virgl_context * ctx,const struct pipe_stencil_ref * ref)1080 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
1081                                  const struct pipe_stencil_ref *ref)
1082 {
1083    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
1084    virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
1085    return 0;
1086 }
1087 
virgl_encoder_set_blend_color(struct virgl_context * ctx,const struct pipe_blend_color * color)1088 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
1089                                  const struct pipe_blend_color *color)
1090 {
1091    int i;
1092    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
1093    for (i = 0; i < 4; i++)
1094       virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
1095    return 0;
1096 }
1097 
virgl_encoder_set_scissor_state(struct virgl_context * ctx,unsigned start_slot,int num_scissors,const struct pipe_scissor_state * ss)1098 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
1099                                     unsigned start_slot,
1100                                     int num_scissors,
1101                                     const struct pipe_scissor_state *ss)
1102 {
1103    int i;
1104    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
1105    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1106    for (i = 0; i < num_scissors; i++) {
1107       virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
1108       virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
1109    }
1110    return 0;
1111 }
1112 
virgl_encoder_set_polygon_stipple(struct virgl_context * ctx,const struct pipe_poly_stipple * ps)1113 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
1114                                       const struct pipe_poly_stipple *ps)
1115 {
1116    int i;
1117    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
1118    for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
1119       virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
1120    }
1121 }
1122 
virgl_encoder_set_sample_mask(struct virgl_context * ctx,unsigned sample_mask)1123 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
1124                                   unsigned sample_mask)
1125 {
1126    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
1127    virgl_encoder_write_dword(ctx->cbuf, sample_mask);
1128 }
1129 
virgl_encoder_set_min_samples(struct virgl_context * ctx,unsigned min_samples)1130 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
1131                                   unsigned min_samples)
1132 {
1133    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
1134    virgl_encoder_write_dword(ctx->cbuf, min_samples);
1135 }
1136 
virgl_encoder_set_clip_state(struct virgl_context * ctx,const struct pipe_clip_state * clip)1137 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
1138                                  const struct pipe_clip_state *clip)
1139 {
1140    int i, j;
1141    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
1142    for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
1143       for (j = 0; j < 4; j++) {
1144          virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
1145       }
1146    }
1147 }
1148 
virgl_encode_resource_copy_region(struct virgl_context * ctx,struct virgl_resource * dst_res,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct virgl_resource * src_res,unsigned src_level,const struct pipe_box * src_box)1149 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
1150                                      struct virgl_resource *dst_res,
1151                                      unsigned dst_level,
1152                                      unsigned dstx, unsigned dsty, unsigned dstz,
1153                                      struct virgl_resource *src_res,
1154                                      unsigned src_level,
1155                                      const struct pipe_box *src_box)
1156 {
1157    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
1158    virgl_encoder_write_res(ctx, dst_res);
1159    virgl_encoder_write_dword(ctx->cbuf, dst_level);
1160    virgl_encoder_write_dword(ctx->cbuf, dstx);
1161    virgl_encoder_write_dword(ctx->cbuf, dsty);
1162    virgl_encoder_write_dword(ctx->cbuf, dstz);
1163    virgl_encoder_write_res(ctx, src_res);
1164    virgl_encoder_write_dword(ctx->cbuf, src_level);
1165    virgl_encoder_write_dword(ctx->cbuf, src_box->x);
1166    virgl_encoder_write_dword(ctx->cbuf, src_box->y);
1167    virgl_encoder_write_dword(ctx->cbuf, src_box->z);
1168    virgl_encoder_write_dword(ctx->cbuf, src_box->width);
1169    virgl_encoder_write_dword(ctx->cbuf, src_box->height);
1170    virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
1171    return 0;
1172 }
1173 
virgl_encode_blit(struct virgl_context * ctx,struct virgl_resource * dst_res,struct virgl_resource * src_res,const struct pipe_blit_info * blit)1174 int virgl_encode_blit(struct virgl_context *ctx,
1175                      struct virgl_resource *dst_res,
1176                      struct virgl_resource *src_res,
1177                      const struct pipe_blit_info *blit)
1178 {
1179    uint32_t tmp;
1180    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
1181    tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
1182       VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
1183       VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
1184       VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
1185       VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
1186    virgl_encoder_write_dword(ctx->cbuf, tmp);
1187    virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
1188    virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
1189 
1190    virgl_encoder_write_res(ctx, dst_res);
1191    virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
1192    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->dst.format));
1193    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
1194    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
1195    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
1196    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
1197    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
1198    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
1199 
1200    virgl_encoder_write_res(ctx, src_res);
1201    virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
1202    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->src.format));
1203    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
1204    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
1205    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
1206    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
1207    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
1208    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
1209    return 0;
1210 }
1211 
virgl_encoder_create_query(struct virgl_context * ctx,uint32_t handle,uint query_type,uint query_index,struct virgl_resource * res,uint32_t offset)1212 int virgl_encoder_create_query(struct virgl_context *ctx,
1213                               uint32_t handle,
1214                               uint query_type,
1215                               uint query_index,
1216                               struct virgl_resource *res,
1217                               uint32_t offset)
1218 {
1219    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
1220    virgl_encoder_write_dword(ctx->cbuf, handle);
1221    virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
1222    virgl_encoder_write_dword(ctx->cbuf, offset);
1223    virgl_encoder_write_res(ctx, res);
1224    return 0;
1225 }
1226 
virgl_encoder_begin_query(struct virgl_context * ctx,uint32_t handle)1227 int virgl_encoder_begin_query(struct virgl_context *ctx,
1228                              uint32_t handle)
1229 {
1230    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
1231    virgl_encoder_write_dword(ctx->cbuf, handle);
1232    return 0;
1233 }
1234 
virgl_encoder_end_query(struct virgl_context * ctx,uint32_t handle)1235 int virgl_encoder_end_query(struct virgl_context *ctx,
1236                            uint32_t handle)
1237 {
1238    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
1239    virgl_encoder_write_dword(ctx->cbuf, handle);
1240    return 0;
1241 }
1242 
virgl_encoder_get_query_result(struct virgl_context * ctx,uint32_t handle,boolean wait)1243 int virgl_encoder_get_query_result(struct virgl_context *ctx,
1244                                   uint32_t handle, boolean wait)
1245 {
1246    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
1247    virgl_encoder_write_dword(ctx->cbuf, handle);
1248    virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1249    return 0;
1250 }
1251 
virgl_encoder_render_condition(struct virgl_context * ctx,uint32_t handle,boolean condition,enum pipe_render_cond_flag mode)1252 int virgl_encoder_render_condition(struct virgl_context *ctx,
1253                                   uint32_t handle, boolean condition,
1254                                   enum pipe_render_cond_flag mode)
1255 {
1256    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
1257    virgl_encoder_write_dword(ctx->cbuf, handle);
1258    virgl_encoder_write_dword(ctx->cbuf, condition);
1259    virgl_encoder_write_dword(ctx->cbuf, mode);
1260    return 0;
1261 }
1262 
virgl_encoder_set_so_targets(struct virgl_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,unsigned append_bitmask)1263 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
1264                                 unsigned num_targets,
1265                                 struct pipe_stream_output_target **targets,
1266                                 unsigned append_bitmask)
1267 {
1268    int i;
1269 
1270    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
1271    virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
1272    for (i = 0; i < num_targets; i++) {
1273       struct virgl_so_target *tg = virgl_so_target(targets[i]);
1274       virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
1275    }
1276    return 0;
1277 }
1278 
1279 
virgl_encoder_set_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1280 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1281 {
1282    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
1283    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1284    return 0;
1285 }
1286 
virgl_encoder_create_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1287 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1288 {
1289    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
1290    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1291    return 0;
1292 }
1293 
virgl_encoder_destroy_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1294 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1295 {
1296    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
1297    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1298    return 0;
1299 }
1300 
virgl_encode_link_shader(struct virgl_context * ctx,uint32_t * handles)1301 int virgl_encode_link_shader(struct virgl_context *ctx, uint32_t *handles)
1302 {
1303    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LINK_SHADER, 0, VIRGL_LINK_SHADER_SIZE));
1304    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_VERTEX]);
1305    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_FRAGMENT]);
1306    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_GEOMETRY]);
1307    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_CTRL]);
1308    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_EVAL]);
1309    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_COMPUTE]);
1310    return 0;
1311 }
1312 
virgl_encode_bind_shader(struct virgl_context * ctx,uint32_t handle,uint32_t type)1313 int virgl_encode_bind_shader(struct virgl_context *ctx,
1314                              uint32_t handle, uint32_t type)
1315 {
1316    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
1317    virgl_encoder_write_dword(ctx->cbuf, handle);
1318    virgl_encoder_write_dword(ctx->cbuf, type);
1319    return 0;
1320 }
1321 
virgl_encode_set_tess_state(struct virgl_context * ctx,const float outer[4],const float inner[2])1322 int virgl_encode_set_tess_state(struct virgl_context *ctx,
1323                                 const float outer[4],
1324                                 const float inner[2])
1325 {
1326    int i;
1327    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
1328    for (i = 0; i < 4; i++)
1329       virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
1330    for (i = 0; i < 2; i++)
1331       virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
1332    return 0;
1333 }
1334 
virgl_encode_set_shader_buffers(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1335 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
1336                                     enum pipe_shader_type shader,
1337                                     unsigned start_slot, unsigned count,
1338                                     const struct pipe_shader_buffer *buffers)
1339 {
1340    int i;
1341    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
1342 
1343    virgl_encoder_write_dword(ctx->cbuf, shader);
1344    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1345    for (i = 0; i < count; i++) {
1346       if (buffers && buffers[i].buffer) {
1347          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1348          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1349          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1350          virgl_encoder_write_res(ctx, res);
1351 
1352          util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1353                buffers[i].buffer_offset + buffers[i].buffer_size);
1354          virgl_resource_dirty(res, 0);
1355       } else {
1356          virgl_encoder_write_dword(ctx->cbuf, 0);
1357          virgl_encoder_write_dword(ctx->cbuf, 0);
1358          virgl_encoder_write_dword(ctx->cbuf, 0);
1359       }
1360    }
1361    return 0;
1362 }
1363 
virgl_encode_set_hw_atomic_buffers(struct virgl_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1364 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
1365                                        unsigned start_slot, unsigned count,
1366                                        const struct pipe_shader_buffer *buffers)
1367 {
1368    int i;
1369    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
1370 
1371    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1372    for (i = 0; i < count; i++) {
1373       if (buffers && buffers[i].buffer) {
1374          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1375          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1376          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1377          virgl_encoder_write_res(ctx, res);
1378 
1379          util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1380                buffers[i].buffer_offset + buffers[i].buffer_size);
1381          virgl_resource_dirty(res, 0);
1382       } else {
1383          virgl_encoder_write_dword(ctx->cbuf, 0);
1384          virgl_encoder_write_dword(ctx->cbuf, 0);
1385          virgl_encoder_write_dword(ctx->cbuf, 0);
1386       }
1387    }
1388    return 0;
1389 }
1390 
virgl_encode_set_shader_images(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_image_view * images)1391 int virgl_encode_set_shader_images(struct virgl_context *ctx,
1392                                    enum pipe_shader_type shader,
1393                                    unsigned start_slot, unsigned count,
1394                                    const struct pipe_image_view *images)
1395 {
1396    int i;
1397    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1398 
1399    virgl_encoder_write_dword(ctx->cbuf, shader);
1400    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1401    for (i = 0; i < count; i++) {
1402       if (images && images[i].resource) {
1403          struct virgl_resource *res = virgl_resource(images[i].resource);
1404          virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(images[i].format));
1405          virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1406          virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1407          virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1408          virgl_encoder_write_res(ctx, res);
1409 
1410          if (res->b.target == PIPE_BUFFER) {
1411             util_range_add(&res->b, &res->valid_buffer_range, images[i].u.buf.offset,
1412                   images[i].u.buf.offset + images[i].u.buf.size);
1413          }
1414          virgl_resource_dirty(res, images[i].u.tex.level);
1415       } else {
1416          virgl_encoder_write_dword(ctx->cbuf, 0);
1417          virgl_encoder_write_dword(ctx->cbuf, 0);
1418          virgl_encoder_write_dword(ctx->cbuf, 0);
1419          virgl_encoder_write_dword(ctx->cbuf, 0);
1420          virgl_encoder_write_dword(ctx->cbuf, 0);
1421       }
1422    }
1423    return 0;
1424 }
1425 
virgl_encode_memory_barrier(struct virgl_context * ctx,unsigned flags)1426 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1427                                 unsigned flags)
1428 {
1429    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1430    virgl_encoder_write_dword(ctx->cbuf, flags);
1431    return 0;
1432 }
1433 
virgl_encode_launch_grid(struct virgl_context * ctx,const struct pipe_grid_info * grid_info)1434 int virgl_encode_launch_grid(struct virgl_context *ctx,
1435                              const struct pipe_grid_info *grid_info)
1436 {
1437    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1438    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1439    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1440    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1441    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1442    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1443    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1444    if (grid_info->indirect) {
1445       struct virgl_resource *res = virgl_resource(grid_info->indirect);
1446       virgl_encoder_write_res(ctx, res);
1447    } else
1448       virgl_encoder_write_dword(ctx->cbuf, 0);
1449    virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1450    return 0;
1451 }
1452 
virgl_encode_texture_barrier(struct virgl_context * ctx,unsigned flags)1453 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1454                                  unsigned flags)
1455 {
1456    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1457    virgl_encoder_write_dword(ctx->cbuf, flags);
1458    return 0;
1459 }
1460 
virgl_encode_host_debug_flagstring(struct virgl_context * ctx,const char * flagstring)1461 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1462                                        const char *flagstring)
1463 {
1464    unsigned long slen = strlen(flagstring) + 1;
1465    uint32_t sslen;
1466    uint32_t string_length;
1467 
1468    if (!slen)
1469       return 0;
1470 
1471    if (slen > 4 * 0xffff) {
1472       debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1473       slen = 4 * 0xffff;
1474    }
1475 
1476    sslen = (uint32_t )(slen + 3) / 4;
1477    string_length = (uint32_t)MIN2(sslen * 4, slen);
1478 
1479    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1480    virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1481    return 0;
1482 }
1483 
virgl_encode_tweak(struct virgl_context * ctx,enum vrend_tweak_type tweak,uint32_t value)1484 int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
1485 {
1486    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
1487    virgl_encoder_write_dword(ctx->cbuf, tweak);
1488    virgl_encoder_write_dword(ctx->cbuf, value);
1489    return 0;
1490 }
1491 
1492 
virgl_encode_get_query_result_qbo(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,boolean wait,uint32_t result_type,uint32_t offset,uint32_t index)1493 int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1494                                       uint32_t handle,
1495                                       struct virgl_resource *res, boolean wait,
1496                                       uint32_t result_type,
1497                                       uint32_t offset,
1498                                       uint32_t index)
1499 {
1500    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1501    virgl_encoder_write_dword(ctx->cbuf, handle);
1502    virgl_encoder_write_res(ctx, res);
1503    virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1504    virgl_encoder_write_dword(ctx->cbuf, result_type);
1505    virgl_encoder_write_dword(ctx->cbuf, offset);
1506    virgl_encoder_write_dword(ctx->cbuf, index);
1507    return 0;
1508 }
1509 
virgl_encode_transfer(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * trans,uint32_t direction)1510 void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1511                            struct virgl_transfer *trans, uint32_t direction)
1512 {
1513    uint32_t command;
1514    struct virgl_resource *vres = virgl_resource(trans->base.resource);
1515    enum virgl_transfer3d_encode_stride stride_type =
1516         virgl_transfer3d_host_inferred_stride;
1517 
1518    if (trans->base.box.depth == 1 && trans->base.level == 0 &&
1519        trans->base.resource->target == PIPE_TEXTURE_2D &&
1520        vres->blob_mem == VIRGL_BLOB_MEM_HOST3D_GUEST)
1521       stride_type = virgl_transfer3d_explicit_stride;
1522 
1523    command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1524    virgl_encoder_write_dword(buf, command);
1525    virgl_encoder_transfer3d_common(vs, buf, trans, stride_type);
1526    virgl_encoder_write_dword(buf, trans->offset);
1527    virgl_encoder_write_dword(buf, direction);
1528 }
1529 
virgl_encode_copy_transfer(struct virgl_context * ctx,struct virgl_transfer * trans)1530 void virgl_encode_copy_transfer(struct virgl_context *ctx,
1531                                 struct virgl_transfer *trans)
1532 {
1533    uint32_t command;
1534    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
1535    // set always synchronized to 1, second bit is used for direction
1536    uint32_t direction_and_synchronized = VIRGL_COPY_TRANSFER3D_FLAGS_SYNCHRONIZED;
1537 
1538    if (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS) {
1539       if (trans->direction == VIRGL_TRANSFER_TO_HOST) {
1540          // do nothing, as 0 means transfer to host
1541       } else if (trans->direction == VIRGL_TRANSFER_FROM_HOST) {
1542          direction_and_synchronized |= VIRGL_COPY_TRANSFER3D_FLAGS_READ_FROM_HOST;
1543       } else {
1544          // something wrong happened here
1545          assert(0);
1546       }
1547    }
1548    assert(trans->copy_src_hw_res);
1549    command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
1550 
1551    virgl_encoder_write_cmd_dword(ctx, command);
1552    /* Copy transfers need to explicitly specify the stride, since it may differ
1553     * from the image stride.
1554     */
1555    virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
1556    vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, TRUE);
1557    virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
1558    virgl_encoder_write_dword(ctx->cbuf, direction_and_synchronized);
1559 }
1560 
virgl_encode_end_transfers(struct virgl_cmd_buf * buf)1561 void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1562 {
1563    uint32_t command, diff;
1564    diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1565    if (diff) {
1566       command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1567       virgl_encoder_write_dword(buf, command);
1568    }
1569 }
1570 
virgl_encode_get_memory_info(struct virgl_context * ctx,struct virgl_resource * res)1571 void virgl_encode_get_memory_info(struct virgl_context *ctx, struct virgl_resource *res)
1572 {
1573    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_MEMORY_INFO, 0, 1));
1574    virgl_encoder_write_res(ctx, res);
1575 }
1576 
virgl_encode_emit_string_marker(struct virgl_context * ctx,const char * message,int len)1577 void virgl_encode_emit_string_marker(struct virgl_context *ctx,
1578                                      const char *message, int len)
1579 {
1580    /* len is guaranteed to be non-negative but be defensive */
1581    assert(len >= 0);
1582    if (len <= 0)
1583       return;
1584 
1585    if (len > 4 * 0xffff) {
1586       debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1587       len = 4 * 0xffff;
1588    }
1589 
1590    uint32_t buf_len = (uint32_t )(len + 3) / 4 + 1;
1591    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_EMIT_STRING_MARKER, 0, buf_len));
1592    virgl_encoder_write_dword(ctx->cbuf, len);
1593    virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)message, len);
1594 }
1595