1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26
27 #include "util/format/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virtio-gpu/virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39
40 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
41
42 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
43
44 static const enum virgl_formats virgl_formats_conv_table[PIPE_FORMAT_COUNT] = {
45 CONV_FORMAT(B8G8R8A8_UNORM)
46 CONV_FORMAT(B8G8R8X8_UNORM)
47 CONV_FORMAT(A8R8G8B8_UNORM)
48 CONV_FORMAT(X8R8G8B8_UNORM)
49 CONV_FORMAT(B5G5R5A1_UNORM)
50 CONV_FORMAT(B4G4R4A4_UNORM)
51 CONV_FORMAT(B5G6R5_UNORM)
52 CONV_FORMAT(R10G10B10A2_UNORM)
53 CONV_FORMAT(L8_UNORM)
54 CONV_FORMAT(A8_UNORM)
55 CONV_FORMAT(L8A8_UNORM)
56 CONV_FORMAT(L16_UNORM)
57 CONV_FORMAT(Z16_UNORM)
58 CONV_FORMAT(Z32_UNORM)
59 CONV_FORMAT(Z32_FLOAT)
60 CONV_FORMAT(Z24_UNORM_S8_UINT)
61 CONV_FORMAT(S8_UINT_Z24_UNORM)
62 CONV_FORMAT(Z24X8_UNORM)
63 CONV_FORMAT(X8Z24_UNORM)
64 CONV_FORMAT(S8_UINT)
65 CONV_FORMAT(R64_FLOAT)
66 CONV_FORMAT(R64G64_FLOAT)
67 CONV_FORMAT(R64G64B64_FLOAT)
68 CONV_FORMAT(R64G64B64A64_FLOAT)
69 CONV_FORMAT(R32_FLOAT)
70 CONV_FORMAT(R32G32_FLOAT)
71 CONV_FORMAT(R32G32B32_FLOAT)
72 CONV_FORMAT(R32G32B32A32_FLOAT)
73 CONV_FORMAT(R32_UNORM)
74 CONV_FORMAT(R32G32_UNORM)
75 CONV_FORMAT(R32G32B32_UNORM)
76 CONV_FORMAT(R32G32B32A32_UNORM)
77 CONV_FORMAT(R32_USCALED)
78 CONV_FORMAT(R32G32_USCALED)
79 CONV_FORMAT(R32G32B32_USCALED)
80 CONV_FORMAT(R32G32B32A32_USCALED)
81 CONV_FORMAT(R32_SNORM)
82 CONV_FORMAT(R32G32_SNORM)
83 CONV_FORMAT(R32G32B32_SNORM)
84 CONV_FORMAT(R32G32B32A32_SNORM)
85 CONV_FORMAT(R32_SSCALED)
86 CONV_FORMAT(R32G32_SSCALED)
87 CONV_FORMAT(R32G32B32_SSCALED)
88 CONV_FORMAT(R32G32B32A32_SSCALED)
89 CONV_FORMAT(R16_UNORM)
90 CONV_FORMAT(R16G16_UNORM)
91 CONV_FORMAT(R16G16B16_UNORM)
92 CONV_FORMAT(R16G16B16A16_UNORM)
93 CONV_FORMAT(R16_USCALED)
94 CONV_FORMAT(R16G16_USCALED)
95 CONV_FORMAT(R16G16B16_USCALED)
96 CONV_FORMAT(R16G16B16A16_USCALED)
97 CONV_FORMAT(R16_SNORM)
98 CONV_FORMAT(R16G16_SNORM)
99 CONV_FORMAT(R16G16B16_SNORM)
100 CONV_FORMAT(R16G16B16A16_SNORM)
101 CONV_FORMAT(R16_SSCALED)
102 CONV_FORMAT(R16G16_SSCALED)
103 CONV_FORMAT(R16G16B16_SSCALED)
104 CONV_FORMAT(R16G16B16A16_SSCALED)
105 CONV_FORMAT(R8_UNORM)
106 CONV_FORMAT(R8G8_UNORM)
107 CONV_FORMAT(R8G8B8_UNORM)
108 CONV_FORMAT(R8G8B8A8_UNORM)
109 CONV_FORMAT(R8_USCALED)
110 CONV_FORMAT(R8G8_USCALED)
111 CONV_FORMAT(R8G8B8_USCALED)
112 CONV_FORMAT(R8G8B8A8_USCALED)
113 CONV_FORMAT(R8_SNORM)
114 CONV_FORMAT(R8G8_SNORM)
115 CONV_FORMAT(R8G8B8_SNORM)
116 CONV_FORMAT(R8G8B8A8_SNORM)
117 CONV_FORMAT(R8_SSCALED)
118 CONV_FORMAT(R8G8_SSCALED)
119 CONV_FORMAT(R8G8B8_SSCALED)
120 CONV_FORMAT(R8G8B8A8_SSCALED)
121 CONV_FORMAT(R16_FLOAT)
122 CONV_FORMAT(R16G16_FLOAT)
123 CONV_FORMAT(R16G16B16_FLOAT)
124 CONV_FORMAT(R16G16B16A16_FLOAT)
125 CONV_FORMAT(L8_SRGB)
126 CONV_FORMAT(L8A8_SRGB)
127 CONV_FORMAT(R8G8B8_SRGB)
128 CONV_FORMAT(A8B8G8R8_SRGB)
129 CONV_FORMAT(X8B8G8R8_SRGB)
130 CONV_FORMAT(B8G8R8A8_SRGB)
131 CONV_FORMAT(B8G8R8X8_SRGB)
132 CONV_FORMAT(A8R8G8B8_SRGB)
133 CONV_FORMAT(X8R8G8B8_SRGB)
134 CONV_FORMAT(R8G8B8A8_SRGB)
135 CONV_FORMAT(DXT1_RGB)
136 CONV_FORMAT(DXT1_RGBA)
137 CONV_FORMAT(DXT3_RGBA)
138 CONV_FORMAT(DXT5_RGBA)
139 CONV_FORMAT(DXT1_SRGB)
140 CONV_FORMAT(DXT1_SRGBA)
141 CONV_FORMAT(DXT3_SRGBA)
142 CONV_FORMAT(DXT5_SRGBA)
143 CONV_FORMAT(RGTC1_UNORM)
144 CONV_FORMAT(RGTC1_SNORM)
145 CONV_FORMAT(RGTC2_UNORM)
146 CONV_FORMAT(RGTC2_SNORM)
147 CONV_FORMAT(A8B8G8R8_UNORM)
148 CONV_FORMAT(B5G5R5X1_UNORM)
149 CONV_FORMAT(R10G10B10A2_USCALED)
150 CONV_FORMAT(R11G11B10_FLOAT)
151 CONV_FORMAT(R9G9B9E5_FLOAT)
152 CONV_FORMAT(Z32_FLOAT_S8X24_UINT)
153 CONV_FORMAT(B10G10R10A2_UNORM)
154 CONV_FORMAT(R8G8B8X8_UNORM)
155 CONV_FORMAT(B4G4R4X4_UNORM)
156 CONV_FORMAT(X24S8_UINT)
157 CONV_FORMAT(S8X24_UINT)
158 CONV_FORMAT(X32_S8X24_UINT)
159 CONV_FORMAT(B2G3R3_UNORM)
160 CONV_FORMAT(L16A16_UNORM)
161 CONV_FORMAT(A16_UNORM)
162 CONV_FORMAT(I16_UNORM)
163 CONV_FORMAT(LATC1_UNORM)
164 CONV_FORMAT(LATC1_SNORM)
165 CONV_FORMAT(LATC2_UNORM)
166 CONV_FORMAT(LATC2_SNORM)
167 CONV_FORMAT(A8_SNORM)
168 CONV_FORMAT(L8_SNORM)
169 CONV_FORMAT(L8A8_SNORM)
170 CONV_FORMAT(A16_SNORM)
171 CONV_FORMAT(L16_SNORM)
172 CONV_FORMAT(L16A16_SNORM)
173 CONV_FORMAT(A16_FLOAT)
174 CONV_FORMAT(L16_FLOAT)
175 CONV_FORMAT(L16A16_FLOAT)
176 CONV_FORMAT(A32_FLOAT)
177 CONV_FORMAT(L32_FLOAT)
178 CONV_FORMAT(L32A32_FLOAT)
179 CONV_FORMAT(YV12)
180 CONV_FORMAT(YV16)
181 CONV_FORMAT(IYUV)
182 CONV_FORMAT(NV12)
183 CONV_FORMAT(NV21)
184 CONV_FORMAT(R8_UINT)
185 CONV_FORMAT(R8G8_UINT)
186 CONV_FORMAT(R8G8B8_UINT)
187 CONV_FORMAT(R8G8B8A8_UINT)
188 CONV_FORMAT(R8_SINT)
189 CONV_FORMAT(R8G8_SINT)
190 CONV_FORMAT(R8G8B8_SINT)
191 CONV_FORMAT(R8G8B8A8_SINT)
192 CONV_FORMAT(R16_UINT)
193 CONV_FORMAT(R16G16_UINT)
194 CONV_FORMAT(R16G16B16_UINT)
195 CONV_FORMAT(R16G16B16A16_UINT)
196 CONV_FORMAT(R16_SINT)
197 CONV_FORMAT(R16G16_SINT)
198 CONV_FORMAT(R16G16B16_SINT)
199 CONV_FORMAT(R16G16B16A16_SINT)
200 CONV_FORMAT(R32_UINT)
201 CONV_FORMAT(R32G32_UINT)
202 CONV_FORMAT(R32G32B32_UINT)
203 CONV_FORMAT(R32G32B32A32_UINT)
204 CONV_FORMAT(R32_SINT)
205 CONV_FORMAT(R32G32_SINT)
206 CONV_FORMAT(R32G32B32_SINT)
207 CONV_FORMAT(R32G32B32A32_SINT)
208 CONV_FORMAT(A8_UINT)
209 CONV_FORMAT(L8_UINT)
210 CONV_FORMAT(L8A8_UINT)
211 CONV_FORMAT(A8_SINT)
212 CONV_FORMAT(L8_SINT)
213 CONV_FORMAT(L8A8_SINT)
214 CONV_FORMAT(A16_UINT)
215 CONV_FORMAT(L16_UINT)
216 CONV_FORMAT(L16A16_UINT)
217 CONV_FORMAT(A16_SINT)
218 CONV_FORMAT(L16_SINT)
219 CONV_FORMAT(L16A16_SINT)
220 CONV_FORMAT(A32_UINT)
221 CONV_FORMAT(L32_UINT)
222 CONV_FORMAT(L32A32_UINT)
223 CONV_FORMAT(A32_SINT)
224 CONV_FORMAT(L32_SINT)
225 CONV_FORMAT(L32A32_SINT)
226 CONV_FORMAT(R10G10B10A2_SSCALED)
227 CONV_FORMAT(R10G10B10A2_SNORM)
228 CONV_FORMAT(B10G10R10A2_SNORM)
229 CONV_FORMAT(B10G10R10A2_UINT)
230 CONV_FORMAT(R8G8B8X8_SNORM)
231 CONV_FORMAT(R8G8B8X8_SRGB)
232 CONV_FORMAT(R8G8B8X8_UINT)
233 CONV_FORMAT(R8G8B8X8_SINT)
234 CONV_FORMAT(B10G10R10X2_UNORM)
235 CONV_FORMAT(R16G16B16X16_UNORM)
236 CONV_FORMAT(R16G16B16X16_SNORM)
237 CONV_FORMAT(R16G16B16X16_FLOAT)
238 CONV_FORMAT(R16G16B16X16_UINT)
239 CONV_FORMAT(R16G16B16X16_SINT)
240 CONV_FORMAT(R32G32B32X32_FLOAT)
241 CONV_FORMAT(R32G32B32X32_UINT)
242 CONV_FORMAT(R32G32B32X32_SINT)
243 CONV_FORMAT(R10G10B10A2_UINT)
244 CONV_FORMAT(BPTC_RGBA_UNORM)
245 CONV_FORMAT(BPTC_SRGBA)
246 CONV_FORMAT(BPTC_RGB_FLOAT)
247 CONV_FORMAT(BPTC_RGB_UFLOAT)
248 CONV_FORMAT(R10G10B10X2_UNORM)
249 CONV_FORMAT(A4B4G4R4_UNORM)
250 CONV_FORMAT(R8_SRGB)
251 CONV_FORMAT(R8G8_SRGB)
252 CONV_FORMAT(ETC1_RGB8)
253 CONV_FORMAT(ETC2_RGB8)
254 CONV_FORMAT(ETC2_SRGB8)
255 CONV_FORMAT(ETC2_RGB8A1)
256 CONV_FORMAT(ETC2_SRGB8A1)
257 CONV_FORMAT(ETC2_RGBA8)
258 CONV_FORMAT(ETC2_SRGBA8)
259 CONV_FORMAT(ETC2_R11_UNORM)
260 CONV_FORMAT(ETC2_R11_SNORM)
261 CONV_FORMAT(ETC2_RG11_UNORM)
262 CONV_FORMAT(ETC2_RG11_SNORM)
263 CONV_FORMAT(ASTC_4x4)
264 CONV_FORMAT(ASTC_5x4)
265 CONV_FORMAT(ASTC_5x5)
266 CONV_FORMAT(ASTC_6x5)
267 CONV_FORMAT(ASTC_6x6)
268 CONV_FORMAT(ASTC_8x5)
269 CONV_FORMAT(ASTC_8x6)
270 CONV_FORMAT(ASTC_8x8)
271 CONV_FORMAT(ASTC_10x5)
272 CONV_FORMAT(ASTC_10x6)
273 CONV_FORMAT(ASTC_10x8)
274 CONV_FORMAT(ASTC_10x10)
275 CONV_FORMAT(ASTC_12x10)
276 CONV_FORMAT(ASTC_12x12)
277 CONV_FORMAT(ASTC_4x4_SRGB)
278 CONV_FORMAT(ASTC_5x4_SRGB)
279 CONV_FORMAT(ASTC_5x5_SRGB)
280 CONV_FORMAT(ASTC_6x5_SRGB)
281 CONV_FORMAT(ASTC_6x6_SRGB)
282 CONV_FORMAT(ASTC_8x5_SRGB)
283 CONV_FORMAT(ASTC_8x6_SRGB)
284 CONV_FORMAT(ASTC_8x8_SRGB )
285 CONV_FORMAT(ASTC_10x5_SRGB)
286 CONV_FORMAT(ASTC_10x6_SRGB)
287 CONV_FORMAT(ASTC_10x8_SRGB)
288 CONV_FORMAT(ASTC_10x10_SRGB)
289 CONV_FORMAT(ASTC_12x10_SRGB)
290 CONV_FORMAT(ASTC_12x12_SRGB)
291 };
292
pipe_to_virgl_format(enum pipe_format format)293 enum virgl_formats pipe_to_virgl_format(enum pipe_format format)
294 {
295 enum virgl_formats vformat = virgl_formats_conv_table[format];
296 if (format != PIPE_FORMAT_NONE && !vformat)
297 debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format));
298 return vformat;
299 }
300
virgl_encoder_write_cmd_dword(struct virgl_context * ctx,uint32_t dword)301 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
302 uint32_t dword)
303 {
304 int len = (dword >> 16);
305
306 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
307 ctx->base.flush(&ctx->base, NULL, 0);
308
309 virgl_encoder_write_dword(ctx->cbuf, dword);
310 return 0;
311 }
312
virgl_encoder_emit_resource(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_resource * res)313 static void virgl_encoder_emit_resource(struct virgl_screen *vs,
314 struct virgl_cmd_buf *buf,
315 struct virgl_resource *res)
316 {
317 struct virgl_winsys *vws = vs->vws;
318 if (res && res->hw_res)
319 vws->emit_res(vws, buf, res->hw_res, TRUE);
320 else {
321 virgl_encoder_write_dword(buf, 0);
322 }
323 }
324
virgl_encoder_write_res(struct virgl_context * ctx,struct virgl_resource * res)325 static void virgl_encoder_write_res(struct virgl_context *ctx,
326 struct virgl_resource *res)
327 {
328 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
329 virgl_encoder_emit_resource(vs, ctx->cbuf, res);
330 }
331
virgl_encode_bind_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)332 int virgl_encode_bind_object(struct virgl_context *ctx,
333 uint32_t handle, uint32_t object)
334 {
335 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
336 virgl_encoder_write_dword(ctx->cbuf, handle);
337 return 0;
338 }
339
virgl_encode_delete_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)340 int virgl_encode_delete_object(struct virgl_context *ctx,
341 uint32_t handle, uint32_t object)
342 {
343 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
344 virgl_encoder_write_dword(ctx->cbuf, handle);
345 return 0;
346 }
347
virgl_encode_blend_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_blend_state * blend_state)348 int virgl_encode_blend_state(struct virgl_context *ctx,
349 uint32_t handle,
350 const struct pipe_blend_state *blend_state)
351 {
352 uint32_t tmp;
353 int i;
354
355 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
356 virgl_encoder_write_dword(ctx->cbuf, handle);
357
358 tmp =
359 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
360 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
361 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
362 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
363 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
364
365 virgl_encoder_write_dword(ctx->cbuf, tmp);
366
367 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
368 virgl_encoder_write_dword(ctx->cbuf, tmp);
369
370 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
371 /* We use alpha src factor to pass the advanced blend equation value
372 * to the host. By doing so, we don't have to change the protocol.
373 */
374 uint32_t alpha = (i == 0 && blend_state->advanced_blend_func)
375 ? blend_state->advanced_blend_func
376 : blend_state->rt[i].alpha_src_factor;
377 tmp =
378 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
379 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
380 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
381 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
382 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
383 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(alpha) |
384 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
385 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
386 virgl_encoder_write_dword(ctx->cbuf, tmp);
387 }
388 return 0;
389 }
390
virgl_encode_dsa_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_depth_stencil_alpha_state * dsa_state)391 int virgl_encode_dsa_state(struct virgl_context *ctx,
392 uint32_t handle,
393 const struct pipe_depth_stencil_alpha_state *dsa_state)
394 {
395 uint32_t tmp;
396 int i;
397 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
398 virgl_encoder_write_dword(ctx->cbuf, handle);
399
400 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth_enabled) |
401 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth_writemask) |
402 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth_func) |
403 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha_enabled) |
404 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha_func);
405 virgl_encoder_write_dword(ctx->cbuf, tmp);
406
407 for (i = 0; i < 2; i++) {
408 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
409 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
410 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
411 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
412 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
413 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
414 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
415 virgl_encoder_write_dword(ctx->cbuf, tmp);
416 }
417
418 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha_ref_value));
419 return 0;
420 }
virgl_encode_rasterizer_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_rasterizer_state * state)421 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
422 uint32_t handle,
423 const struct pipe_rasterizer_state *state)
424 {
425 uint32_t tmp;
426
427 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
428 virgl_encoder_write_dword(ctx->cbuf, handle);
429
430 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
431 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
432 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
433 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
434 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
435 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
436 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
437 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
438 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
439 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
440 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
441 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
442 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
443 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
444 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
445 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
446 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
447 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
448 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
449 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
450 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
451 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
452 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
453 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
454 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
455 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
456 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
457 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
458 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
459
460 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
461 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
462 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
463 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
464 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
465 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
466 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
467 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
468 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
469 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
470 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
471 return 0;
472 }
473
virgl_emit_shader_header(struct virgl_context * ctx,uint32_t handle,uint32_t len,uint32_t type,uint32_t offlen,uint32_t num_tokens)474 static void virgl_emit_shader_header(struct virgl_context *ctx,
475 uint32_t handle, uint32_t len,
476 uint32_t type, uint32_t offlen,
477 uint32_t num_tokens)
478 {
479 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
480 virgl_encoder_write_dword(ctx->cbuf, handle);
481 virgl_encoder_write_dword(ctx->cbuf, type);
482 virgl_encoder_write_dword(ctx->cbuf, offlen);
483 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
484 }
485
virgl_emit_shader_streamout(struct virgl_context * ctx,const struct pipe_stream_output_info * so_info)486 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
487 const struct pipe_stream_output_info *so_info)
488 {
489 int num_outputs = 0;
490 int i;
491 uint32_t tmp;
492
493 if (so_info)
494 num_outputs = so_info->num_outputs;
495
496 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
497 if (num_outputs) {
498 for (i = 0; i < 4; i++)
499 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
500
501 for (i = 0; i < so_info->num_outputs; i++) {
502 tmp =
503 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
504 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
505 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
506 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
507 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
508 virgl_encoder_write_dword(ctx->cbuf, tmp);
509 virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
510 }
511 }
512 }
513
virgl_encode_shader_state(struct virgl_context * ctx,uint32_t handle,uint32_t type,const struct pipe_stream_output_info * so_info,uint32_t cs_req_local_mem,const struct tgsi_token * tokens)514 int virgl_encode_shader_state(struct virgl_context *ctx,
515 uint32_t handle,
516 uint32_t type,
517 const struct pipe_stream_output_info *so_info,
518 uint32_t cs_req_local_mem,
519 const struct tgsi_token *tokens)
520 {
521 char *str, *sptr;
522 uint32_t shader_len, len;
523 bool bret;
524 int num_tokens = tgsi_num_tokens(tokens);
525 int str_total_size = 65536;
526 int retry_size = 1;
527 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
528 bool first_pass;
529 str = CALLOC(1, str_total_size);
530 if (!str)
531 return -1;
532
533 do {
534 int old_size;
535
536 bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
537 if (bret == false) {
538 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
539 debug_printf("Failed to translate shader in available space - trying again\n");
540 old_size = str_total_size;
541 str_total_size = 65536 * retry_size;
542 retry_size *= 2;
543 str = REALLOC(str, old_size, str_total_size);
544 if (!str)
545 return -1;
546 }
547 } while (bret == false && retry_size < 1024);
548
549 if (bret == false)
550 return -1;
551
552 if (virgl_debug & VIRGL_DEBUG_TGSI)
553 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
554
555 shader_len = strlen(str) + 1;
556
557 left_bytes = shader_len;
558
559 base_hdr_size = 5;
560 strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
561 first_pass = true;
562 sptr = str;
563 while (left_bytes) {
564 uint32_t length, offlen;
565 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
566 if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
567 ctx->base.flush(&ctx->base, NULL, 0);
568
569 thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
570
571 length = MIN2(thispass, left_bytes);
572 len = ((length + 3) / 4) + hdr_len;
573
574 if (first_pass)
575 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
576 else
577 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
578
579 virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
580
581 if (type == PIPE_SHADER_COMPUTE)
582 virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
583 else
584 virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
585
586 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
587
588 sptr += length;
589 first_pass = false;
590 left_bytes -= length;
591 }
592
593 FREE(str);
594 return 0;
595 }
596
597
virgl_encode_clear(struct virgl_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)598 int virgl_encode_clear(struct virgl_context *ctx,
599 unsigned buffers,
600 const union pipe_color_union *color,
601 double depth, unsigned stencil)
602 {
603 int i;
604 uint64_t qword;
605
606 STATIC_ASSERT(sizeof(qword) == sizeof(depth));
607 memcpy(&qword, &depth, sizeof(qword));
608
609 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
610 virgl_encoder_write_dword(ctx->cbuf, buffers);
611 for (i = 0; i < 4; i++)
612 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
613 virgl_encoder_write_qword(ctx->cbuf, qword);
614 virgl_encoder_write_dword(ctx->cbuf, stencil);
615 return 0;
616 }
617
virgl_encode_clear_texture(struct virgl_context * ctx,struct virgl_resource * res,unsigned int level,const struct pipe_box * box,const void * data)618 int virgl_encode_clear_texture(struct virgl_context *ctx,
619 struct virgl_resource *res,
620 unsigned int level,
621 const struct pipe_box *box,
622 const void *data)
623 {
624 const struct util_format_description *desc = util_format_description(res->b.format);
625 unsigned block_bits = desc->block.bits;
626 uint32_t arr[4] = {0};
627 /* The spec describe <data> as a pointer to an array of between one
628 * and four components of texel data that will be used as the source
629 * for the constant fill value.
630 * Here, we are just copying the memory into <arr>. We do not try to
631 * re-create the data array. The host part will take care of interpreting
632 * the memory and applying the correct format to the clear call.
633 */
634 memcpy(&arr, data, block_bits / 8);
635
636 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE, 0, VIRGL_CLEAR_TEXTURE_SIZE));
637 virgl_encoder_write_res(ctx, res);
638 virgl_encoder_write_dword(ctx->cbuf, level);
639 virgl_encoder_write_dword(ctx->cbuf, box->x);
640 virgl_encoder_write_dword(ctx->cbuf, box->y);
641 virgl_encoder_write_dword(ctx->cbuf, box->z);
642 virgl_encoder_write_dword(ctx->cbuf, box->width);
643 virgl_encoder_write_dword(ctx->cbuf, box->height);
644 virgl_encoder_write_dword(ctx->cbuf, box->depth);
645 for (unsigned i = 0; i < 4; i++)
646 virgl_encoder_write_dword(ctx->cbuf, arr[i]);
647 return 0;
648 }
649
virgl_encoder_set_framebuffer_state(struct virgl_context * ctx,const struct pipe_framebuffer_state * state)650 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
651 const struct pipe_framebuffer_state *state)
652 {
653 struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
654 int i;
655
656 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
657 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
658 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
659 for (i = 0; i < state->nr_cbufs; i++) {
660 struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
661 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
662 }
663
664 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
665 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
666 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
667 virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
668 virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
669 }
670 return 0;
671 }
672
virgl_encoder_set_viewport_states(struct virgl_context * ctx,int start_slot,int num_viewports,const struct pipe_viewport_state * states)673 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
674 int start_slot,
675 int num_viewports,
676 const struct pipe_viewport_state *states)
677 {
678 int i,v;
679 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
680 virgl_encoder_write_dword(ctx->cbuf, start_slot);
681 for (v = 0; v < num_viewports; v++) {
682 for (i = 0; i < 3; i++)
683 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
684 for (i = 0; i < 3; i++)
685 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
686 }
687 return 0;
688 }
689
virgl_encoder_create_vertex_elements(struct virgl_context * ctx,uint32_t handle,unsigned num_elements,const struct pipe_vertex_element * element)690 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
691 uint32_t handle,
692 unsigned num_elements,
693 const struct pipe_vertex_element *element)
694 {
695 int i;
696 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
697 virgl_encoder_write_dword(ctx->cbuf, handle);
698 for (i = 0; i < num_elements; i++) {
699 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
700 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
701 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
702 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(element[i].src_format));
703 }
704 return 0;
705 }
706
virgl_encoder_set_vertex_buffers(struct virgl_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)707 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
708 unsigned num_buffers,
709 const struct pipe_vertex_buffer *buffers)
710 {
711 int i;
712 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
713 for (i = 0; i < num_buffers; i++) {
714 struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
715 virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
716 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
717 virgl_encoder_write_res(ctx, res);
718 }
719 return 0;
720 }
721
virgl_encoder_set_index_buffer(struct virgl_context * ctx,const struct virgl_indexbuf * ib)722 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
723 const struct virgl_indexbuf *ib)
724 {
725 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
726 struct virgl_resource *res = NULL;
727 if (ib)
728 res = virgl_resource(ib->buffer);
729
730 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
731 virgl_encoder_write_res(ctx, res);
732 if (ib) {
733 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
734 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
735 }
736 return 0;
737 }
738
virgl_encoder_draw_vbo(struct virgl_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw)739 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
740 const struct pipe_draw_info *info,
741 unsigned drawid_offset,
742 const struct pipe_draw_indirect_info *indirect,
743 const struct pipe_draw_start_count_bias *draw)
744 {
745 uint32_t length = VIRGL_DRAW_VBO_SIZE;
746 if (info->mode == PIPE_PRIM_PATCHES)
747 length = VIRGL_DRAW_VBO_SIZE_TESS;
748 if (indirect && indirect->buffer)
749 length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
750 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
751 virgl_encoder_write_dword(ctx->cbuf, draw->start);
752 virgl_encoder_write_dword(ctx->cbuf, draw->count);
753 virgl_encoder_write_dword(ctx->cbuf, info->mode);
754 virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
755 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
756 virgl_encoder_write_dword(ctx->cbuf, info->index_size ? draw->index_bias : 0);
757 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
758 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
759 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart ? info->restart_index : 0);
760 virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->min_index : 0);
761 virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->max_index : ~0);
762 if (indirect && indirect->count_from_stream_output)
763 virgl_encoder_write_dword(ctx->cbuf, indirect->count_from_stream_output->buffer_size);
764 else
765 virgl_encoder_write_dword(ctx->cbuf, 0);
766 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
767 virgl_encoder_write_dword(ctx->cbuf, ctx->patch_vertices); /* vertices per patch */
768 virgl_encoder_write_dword(ctx->cbuf, drawid_offset); /* drawid */
769 }
770 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
771 virgl_encoder_write_res(ctx, virgl_resource(indirect->buffer));
772 virgl_encoder_write_dword(ctx->cbuf, indirect->offset);
773 virgl_encoder_write_dword(ctx->cbuf, indirect->stride); /* indirect stride */
774 virgl_encoder_write_dword(ctx->cbuf, indirect->draw_count); /* indirect draw count */
775 virgl_encoder_write_dword(ctx->cbuf, indirect->indirect_draw_count_offset); /* indirect draw count offset */
776 if (indirect->indirect_draw_count)
777 virgl_encoder_write_res(ctx, virgl_resource(indirect->indirect_draw_count));
778 else
779 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
780 }
781 return 0;
782 }
783
virgl_encoder_create_surface_common(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)784 static int virgl_encoder_create_surface_common(struct virgl_context *ctx,
785 uint32_t handle,
786 struct virgl_resource *res,
787 const struct pipe_surface *templat)
788 {
789 virgl_encoder_write_dword(ctx->cbuf, handle);
790 virgl_encoder_write_res(ctx, res);
791 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(templat->format));
792
793 assert(templat->texture->target != PIPE_BUFFER);
794 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
795 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
796
797 return 0;
798 }
799
virgl_encoder_create_surface(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)800 int virgl_encoder_create_surface(struct virgl_context *ctx,
801 uint32_t handle,
802 struct virgl_resource *res,
803 const struct pipe_surface *templat)
804 {
805 if (templat->nr_samples > 0) {
806 ASSERTED struct virgl_screen *rs = virgl_screen(ctx->base.screen);
807 assert(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_IMPLICIT_MSAA);
808
809 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_MSAA_SURFACE, VIRGL_OBJ_MSAA_SURFACE_SIZE));
810 virgl_encoder_create_surface_common(ctx, handle, res, templat);
811 virgl_encoder_write_dword(ctx->cbuf, templat->nr_samples);
812 } else {
813 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
814 virgl_encoder_create_surface_common(ctx, handle, res, templat);
815 }
816
817 return 0;
818 }
819
virgl_encoder_create_so_target(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,unsigned buffer_offset,unsigned buffer_size)820 int virgl_encoder_create_so_target(struct virgl_context *ctx,
821 uint32_t handle,
822 struct virgl_resource *res,
823 unsigned buffer_offset,
824 unsigned buffer_size)
825 {
826 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
827 virgl_encoder_write_dword(ctx->cbuf, handle);
828 virgl_encoder_write_res(ctx, res);
829 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
830 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
831 return 0;
832 }
833
834 enum virgl_transfer3d_encode_stride {
835 /* The stride and layer_stride are explicitly specified in the command. */
836 virgl_transfer3d_explicit_stride,
837 /* The stride and layer_stride are inferred by the host. In this case, the
838 * host will use the image stride and layer_stride for the specified level.
839 */
840 virgl_transfer3d_host_inferred_stride,
841 };
842
virgl_encoder_transfer3d_common(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * xfer,enum virgl_transfer3d_encode_stride encode_stride)843 static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
844 struct virgl_cmd_buf *buf,
845 struct virgl_transfer *xfer,
846 enum virgl_transfer3d_encode_stride encode_stride)
847
848 {
849 struct pipe_transfer *transfer = &xfer->base;
850 unsigned stride;
851 unsigned layer_stride;
852
853 if (encode_stride == virgl_transfer3d_explicit_stride) {
854 stride = transfer->stride;
855 layer_stride = transfer->layer_stride;
856 } else if (encode_stride == virgl_transfer3d_host_inferred_stride) {
857 stride = 0;
858 layer_stride = 0;
859 } else {
860 assert(!"Invalid virgl_transfer3d_encode_stride value");
861 }
862
863 /* We cannot use virgl_encoder_emit_resource with transfer->resource here
864 * because transfer->resource might have a different virgl_hw_res than what
865 * this transfer targets, which is saved in xfer->hw_res.
866 */
867 vs->vws->emit_res(vs->vws, buf, xfer->hw_res, TRUE);
868 virgl_encoder_write_dword(buf, transfer->level);
869 virgl_encoder_write_dword(buf, transfer->usage);
870 virgl_encoder_write_dword(buf, stride);
871 virgl_encoder_write_dword(buf, layer_stride);
872 virgl_encoder_write_dword(buf, transfer->box.x);
873 virgl_encoder_write_dword(buf, transfer->box.y);
874 virgl_encoder_write_dword(buf, transfer->box.z);
875 virgl_encoder_write_dword(buf, transfer->box.width);
876 virgl_encoder_write_dword(buf, transfer->box.height);
877 virgl_encoder_write_dword(buf, transfer->box.depth);
878 }
879
virgl_encoder_inline_write(struct virgl_context * ctx,struct virgl_resource * res,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,unsigned layer_stride)880 int virgl_encoder_inline_write(struct virgl_context *ctx,
881 struct virgl_resource *res,
882 unsigned level, unsigned usage,
883 const struct pipe_box *box,
884 const void *data, unsigned stride,
885 unsigned layer_stride)
886 {
887 uint32_t size = (stride ? stride : box->width) * box->height;
888 uint32_t length, thispass, left_bytes;
889 struct virgl_transfer transfer;
890 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
891
892 transfer.base.resource = &res->b;
893 transfer.hw_res = res->hw_res;
894 transfer.base.level = level;
895 transfer.base.usage = usage;
896 transfer.base.box = *box;
897
898 length = 11 + (size + 3) / 4;
899 if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) {
900 if (box->height > 1 || box->depth > 1) {
901 debug_printf("inline transfer failed due to multi dimensions and too large\n");
902 assert(0);
903 }
904 }
905
906 left_bytes = size;
907 while (left_bytes) {
908 if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS)
909 ctx->base.flush(&ctx->base, NULL, 0);
910
911 thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4;
912
913 length = MIN2(thispass, left_bytes);
914
915 transfer.base.box.width = length;
916 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
917 virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer,
918 virgl_transfer3d_host_inferred_stride);
919 virgl_encoder_write_block(ctx->cbuf, data, length);
920 left_bytes -= length;
921 transfer.base.box.x += length;
922 data += length;
923 }
924 return 0;
925 }
926
virgl_encoder_flush_frontbuffer(struct virgl_context * ctx,struct virgl_resource * res)927 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
928 struct virgl_resource *res)
929 {
930 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
931 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
932 return 0;
933 }
934
virgl_encode_sampler_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_sampler_state * state)935 int virgl_encode_sampler_state(struct virgl_context *ctx,
936 uint32_t handle,
937 const struct pipe_sampler_state *state)
938 {
939 uint32_t tmp;
940 int i;
941 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
942 virgl_encoder_write_dword(ctx->cbuf, handle);
943
944 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
945 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
946 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
947 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
948 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
949 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
950 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
951 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
952 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map) |
953 VIRGL_OBJ_SAMPLE_STATE_S0_MAX_ANISOTROPY((int)(state->max_anisotropy));
954
955 virgl_encoder_write_dword(ctx->cbuf, tmp);
956 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
957 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
958 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
959 for (i = 0; i < 4; i++)
960 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
961 return 0;
962 }
963
964
virgl_encode_sampler_view(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_sampler_view * state)965 int virgl_encode_sampler_view(struct virgl_context *ctx,
966 uint32_t handle,
967 struct virgl_resource *res,
968 const struct pipe_sampler_view *state)
969 {
970 unsigned elem_size = util_format_get_blocksize(state->format);
971 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
972 uint32_t tmp;
973 uint32_t dword_fmt_target = pipe_to_virgl_format(state->format);
974 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
975 virgl_encoder_write_dword(ctx->cbuf, handle);
976 virgl_encoder_write_res(ctx, res);
977 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
978 dword_fmt_target |= (state->target << 24);
979 virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
980 if (res->b.target == PIPE_BUFFER) {
981 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
982 virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
983 } else {
984 if (res->metadata.plane) {
985 debug_assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
986 virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
987 } else {
988 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
989 }
990 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
991 }
992 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
993 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
994 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
995 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
996 virgl_encoder_write_dword(ctx->cbuf, tmp);
997 return 0;
998 }
999
virgl_encode_set_sampler_views(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_views,struct virgl_sampler_view ** views)1000 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
1001 uint32_t shader_type,
1002 uint32_t start_slot,
1003 uint32_t num_views,
1004 struct virgl_sampler_view **views)
1005 {
1006 int i;
1007 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
1008 virgl_encoder_write_dword(ctx->cbuf, shader_type);
1009 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1010 for (i = 0; i < num_views; i++) {
1011 uint32_t handle = views[i] ? views[i]->handle : 0;
1012 virgl_encoder_write_dword(ctx->cbuf, handle);
1013 }
1014 return 0;
1015 }
1016
virgl_encode_bind_sampler_states(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_handles,uint32_t * handles)1017 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
1018 uint32_t shader_type,
1019 uint32_t start_slot,
1020 uint32_t num_handles,
1021 uint32_t *handles)
1022 {
1023 int i;
1024 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
1025 virgl_encoder_write_dword(ctx->cbuf, shader_type);
1026 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1027 for (i = 0; i < num_handles; i++)
1028 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
1029 return 0;
1030 }
1031
virgl_encoder_write_constant_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t size,const void * data)1032 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
1033 uint32_t shader,
1034 uint32_t index,
1035 uint32_t size,
1036 const void *data)
1037 {
1038 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
1039 virgl_encoder_write_dword(ctx->cbuf, shader);
1040 virgl_encoder_write_dword(ctx->cbuf, index);
1041 if (data)
1042 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
1043 return 0;
1044 }
1045
virgl_encoder_set_uniform_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t offset,uint32_t length,struct virgl_resource * res)1046 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
1047 uint32_t shader,
1048 uint32_t index,
1049 uint32_t offset,
1050 uint32_t length,
1051 struct virgl_resource *res)
1052 {
1053 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
1054 virgl_encoder_write_dword(ctx->cbuf, shader);
1055 virgl_encoder_write_dword(ctx->cbuf, index);
1056 virgl_encoder_write_dword(ctx->cbuf, offset);
1057 virgl_encoder_write_dword(ctx->cbuf, length);
1058 virgl_encoder_write_res(ctx, res);
1059 return 0;
1060 }
1061
1062
virgl_encoder_set_stencil_ref(struct virgl_context * ctx,const struct pipe_stencil_ref * ref)1063 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
1064 const struct pipe_stencil_ref *ref)
1065 {
1066 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
1067 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
1068 return 0;
1069 }
1070
virgl_encoder_set_blend_color(struct virgl_context * ctx,const struct pipe_blend_color * color)1071 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
1072 const struct pipe_blend_color *color)
1073 {
1074 int i;
1075 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
1076 for (i = 0; i < 4; i++)
1077 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
1078 return 0;
1079 }
1080
virgl_encoder_set_scissor_state(struct virgl_context * ctx,unsigned start_slot,int num_scissors,const struct pipe_scissor_state * ss)1081 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
1082 unsigned start_slot,
1083 int num_scissors,
1084 const struct pipe_scissor_state *ss)
1085 {
1086 int i;
1087 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
1088 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1089 for (i = 0; i < num_scissors; i++) {
1090 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
1091 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
1092 }
1093 return 0;
1094 }
1095
virgl_encoder_set_polygon_stipple(struct virgl_context * ctx,const struct pipe_poly_stipple * ps)1096 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
1097 const struct pipe_poly_stipple *ps)
1098 {
1099 int i;
1100 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
1101 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
1102 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
1103 }
1104 }
1105
virgl_encoder_set_sample_mask(struct virgl_context * ctx,unsigned sample_mask)1106 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
1107 unsigned sample_mask)
1108 {
1109 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
1110 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
1111 }
1112
virgl_encoder_set_min_samples(struct virgl_context * ctx,unsigned min_samples)1113 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
1114 unsigned min_samples)
1115 {
1116 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
1117 virgl_encoder_write_dword(ctx->cbuf, min_samples);
1118 }
1119
virgl_encoder_set_clip_state(struct virgl_context * ctx,const struct pipe_clip_state * clip)1120 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
1121 const struct pipe_clip_state *clip)
1122 {
1123 int i, j;
1124 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
1125 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
1126 for (j = 0; j < 4; j++) {
1127 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
1128 }
1129 }
1130 }
1131
virgl_encode_resource_copy_region(struct virgl_context * ctx,struct virgl_resource * dst_res,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct virgl_resource * src_res,unsigned src_level,const struct pipe_box * src_box)1132 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
1133 struct virgl_resource *dst_res,
1134 unsigned dst_level,
1135 unsigned dstx, unsigned dsty, unsigned dstz,
1136 struct virgl_resource *src_res,
1137 unsigned src_level,
1138 const struct pipe_box *src_box)
1139 {
1140 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
1141 virgl_encoder_write_res(ctx, dst_res);
1142 virgl_encoder_write_dword(ctx->cbuf, dst_level);
1143 virgl_encoder_write_dword(ctx->cbuf, dstx);
1144 virgl_encoder_write_dword(ctx->cbuf, dsty);
1145 virgl_encoder_write_dword(ctx->cbuf, dstz);
1146 virgl_encoder_write_res(ctx, src_res);
1147 virgl_encoder_write_dword(ctx->cbuf, src_level);
1148 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
1149 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
1150 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
1151 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
1152 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
1153 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
1154 return 0;
1155 }
1156
virgl_encode_blit(struct virgl_context * ctx,struct virgl_resource * dst_res,struct virgl_resource * src_res,const struct pipe_blit_info * blit)1157 int virgl_encode_blit(struct virgl_context *ctx,
1158 struct virgl_resource *dst_res,
1159 struct virgl_resource *src_res,
1160 const struct pipe_blit_info *blit)
1161 {
1162 uint32_t tmp;
1163 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
1164 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
1165 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
1166 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
1167 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
1168 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
1169 virgl_encoder_write_dword(ctx->cbuf, tmp);
1170 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
1171 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
1172
1173 virgl_encoder_write_res(ctx, dst_res);
1174 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
1175 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->dst.format));
1176 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
1177 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
1178 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
1179 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
1180 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
1181 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
1182
1183 virgl_encoder_write_res(ctx, src_res);
1184 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
1185 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->src.format));
1186 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
1187 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
1188 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
1189 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
1190 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
1191 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
1192 return 0;
1193 }
1194
virgl_encoder_create_query(struct virgl_context * ctx,uint32_t handle,uint query_type,uint query_index,struct virgl_resource * res,uint32_t offset)1195 int virgl_encoder_create_query(struct virgl_context *ctx,
1196 uint32_t handle,
1197 uint query_type,
1198 uint query_index,
1199 struct virgl_resource *res,
1200 uint32_t offset)
1201 {
1202 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
1203 virgl_encoder_write_dword(ctx->cbuf, handle);
1204 virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
1205 virgl_encoder_write_dword(ctx->cbuf, offset);
1206 virgl_encoder_write_res(ctx, res);
1207 return 0;
1208 }
1209
virgl_encoder_begin_query(struct virgl_context * ctx,uint32_t handle)1210 int virgl_encoder_begin_query(struct virgl_context *ctx,
1211 uint32_t handle)
1212 {
1213 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
1214 virgl_encoder_write_dword(ctx->cbuf, handle);
1215 return 0;
1216 }
1217
virgl_encoder_end_query(struct virgl_context * ctx,uint32_t handle)1218 int virgl_encoder_end_query(struct virgl_context *ctx,
1219 uint32_t handle)
1220 {
1221 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
1222 virgl_encoder_write_dword(ctx->cbuf, handle);
1223 return 0;
1224 }
1225
virgl_encoder_get_query_result(struct virgl_context * ctx,uint32_t handle,boolean wait)1226 int virgl_encoder_get_query_result(struct virgl_context *ctx,
1227 uint32_t handle, boolean wait)
1228 {
1229 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
1230 virgl_encoder_write_dword(ctx->cbuf, handle);
1231 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1232 return 0;
1233 }
1234
virgl_encoder_render_condition(struct virgl_context * ctx,uint32_t handle,boolean condition,enum pipe_render_cond_flag mode)1235 int virgl_encoder_render_condition(struct virgl_context *ctx,
1236 uint32_t handle, boolean condition,
1237 enum pipe_render_cond_flag mode)
1238 {
1239 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
1240 virgl_encoder_write_dword(ctx->cbuf, handle);
1241 virgl_encoder_write_dword(ctx->cbuf, condition);
1242 virgl_encoder_write_dword(ctx->cbuf, mode);
1243 return 0;
1244 }
1245
virgl_encoder_set_so_targets(struct virgl_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,unsigned append_bitmask)1246 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
1247 unsigned num_targets,
1248 struct pipe_stream_output_target **targets,
1249 unsigned append_bitmask)
1250 {
1251 int i;
1252
1253 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
1254 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
1255 for (i = 0; i < num_targets; i++) {
1256 struct virgl_so_target *tg = virgl_so_target(targets[i]);
1257 virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
1258 }
1259 return 0;
1260 }
1261
1262
virgl_encoder_set_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1263 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1264 {
1265 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
1266 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1267 return 0;
1268 }
1269
virgl_encoder_create_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1270 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1271 {
1272 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
1273 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1274 return 0;
1275 }
1276
virgl_encoder_destroy_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1277 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1278 {
1279 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
1280 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1281 return 0;
1282 }
1283
virgl_encode_bind_shader(struct virgl_context * ctx,uint32_t handle,uint32_t type)1284 int virgl_encode_bind_shader(struct virgl_context *ctx,
1285 uint32_t handle, uint32_t type)
1286 {
1287 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
1288 virgl_encoder_write_dword(ctx->cbuf, handle);
1289 virgl_encoder_write_dword(ctx->cbuf, type);
1290 return 0;
1291 }
1292
virgl_encode_set_tess_state(struct virgl_context * ctx,const float outer[4],const float inner[2])1293 int virgl_encode_set_tess_state(struct virgl_context *ctx,
1294 const float outer[4],
1295 const float inner[2])
1296 {
1297 int i;
1298 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
1299 for (i = 0; i < 4; i++)
1300 virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
1301 for (i = 0; i < 2; i++)
1302 virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
1303 return 0;
1304 }
1305
virgl_encode_set_shader_buffers(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1306 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
1307 enum pipe_shader_type shader,
1308 unsigned start_slot, unsigned count,
1309 const struct pipe_shader_buffer *buffers)
1310 {
1311 int i;
1312 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
1313
1314 virgl_encoder_write_dword(ctx->cbuf, shader);
1315 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1316 for (i = 0; i < count; i++) {
1317 if (buffers && buffers[i].buffer) {
1318 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1319 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1320 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1321 virgl_encoder_write_res(ctx, res);
1322
1323 util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1324 buffers[i].buffer_offset + buffers[i].buffer_size);
1325 virgl_resource_dirty(res, 0);
1326 } else {
1327 virgl_encoder_write_dword(ctx->cbuf, 0);
1328 virgl_encoder_write_dword(ctx->cbuf, 0);
1329 virgl_encoder_write_dword(ctx->cbuf, 0);
1330 }
1331 }
1332 return 0;
1333 }
1334
virgl_encode_set_hw_atomic_buffers(struct virgl_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1335 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
1336 unsigned start_slot, unsigned count,
1337 const struct pipe_shader_buffer *buffers)
1338 {
1339 int i;
1340 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
1341
1342 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1343 for (i = 0; i < count; i++) {
1344 if (buffers && buffers[i].buffer) {
1345 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1346 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1347 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1348 virgl_encoder_write_res(ctx, res);
1349
1350 util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1351 buffers[i].buffer_offset + buffers[i].buffer_size);
1352 virgl_resource_dirty(res, 0);
1353 } else {
1354 virgl_encoder_write_dword(ctx->cbuf, 0);
1355 virgl_encoder_write_dword(ctx->cbuf, 0);
1356 virgl_encoder_write_dword(ctx->cbuf, 0);
1357 }
1358 }
1359 return 0;
1360 }
1361
virgl_encode_set_shader_images(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_image_view * images)1362 int virgl_encode_set_shader_images(struct virgl_context *ctx,
1363 enum pipe_shader_type shader,
1364 unsigned start_slot, unsigned count,
1365 const struct pipe_image_view *images)
1366 {
1367 int i;
1368 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1369
1370 virgl_encoder_write_dword(ctx->cbuf, shader);
1371 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1372 for (i = 0; i < count; i++) {
1373 if (images && images[i].resource) {
1374 struct virgl_resource *res = virgl_resource(images[i].resource);
1375 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(images[i].format));
1376 virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1377 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1378 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1379 virgl_encoder_write_res(ctx, res);
1380
1381 if (res->b.target == PIPE_BUFFER) {
1382 util_range_add(&res->b, &res->valid_buffer_range, images[i].u.buf.offset,
1383 images[i].u.buf.offset + images[i].u.buf.size);
1384 }
1385 virgl_resource_dirty(res, images[i].u.tex.level);
1386 } else {
1387 virgl_encoder_write_dword(ctx->cbuf, 0);
1388 virgl_encoder_write_dword(ctx->cbuf, 0);
1389 virgl_encoder_write_dword(ctx->cbuf, 0);
1390 virgl_encoder_write_dword(ctx->cbuf, 0);
1391 virgl_encoder_write_dword(ctx->cbuf, 0);
1392 }
1393 }
1394 return 0;
1395 }
1396
virgl_encode_memory_barrier(struct virgl_context * ctx,unsigned flags)1397 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1398 unsigned flags)
1399 {
1400 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1401 virgl_encoder_write_dword(ctx->cbuf, flags);
1402 return 0;
1403 }
1404
virgl_encode_launch_grid(struct virgl_context * ctx,const struct pipe_grid_info * grid_info)1405 int virgl_encode_launch_grid(struct virgl_context *ctx,
1406 const struct pipe_grid_info *grid_info)
1407 {
1408 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1409 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1410 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1411 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1412 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1413 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1414 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1415 if (grid_info->indirect) {
1416 struct virgl_resource *res = virgl_resource(grid_info->indirect);
1417 virgl_encoder_write_res(ctx, res);
1418 } else
1419 virgl_encoder_write_dword(ctx->cbuf, 0);
1420 virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1421 return 0;
1422 }
1423
virgl_encode_texture_barrier(struct virgl_context * ctx,unsigned flags)1424 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1425 unsigned flags)
1426 {
1427 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1428 virgl_encoder_write_dword(ctx->cbuf, flags);
1429 return 0;
1430 }
1431
virgl_encode_host_debug_flagstring(struct virgl_context * ctx,const char * flagstring)1432 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1433 const char *flagstring)
1434 {
1435 unsigned long slen = strlen(flagstring) + 1;
1436 uint32_t sslen;
1437 uint32_t string_length;
1438
1439 if (!slen)
1440 return 0;
1441
1442 if (slen > 4 * 0xffff) {
1443 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1444 slen = 4 * 0xffff;
1445 }
1446
1447 sslen = (uint32_t )(slen + 3) / 4;
1448 string_length = (uint32_t)MIN2(sslen * 4, slen);
1449
1450 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1451 virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1452 return 0;
1453 }
1454
virgl_encode_tweak(struct virgl_context * ctx,enum vrend_tweak_type tweak,uint32_t value)1455 int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
1456 {
1457 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
1458 virgl_encoder_write_dword(ctx->cbuf, tweak);
1459 virgl_encoder_write_dword(ctx->cbuf, value);
1460 return 0;
1461 }
1462
1463
virgl_encode_get_query_result_qbo(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,boolean wait,uint32_t result_type,uint32_t offset,uint32_t index)1464 int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1465 uint32_t handle,
1466 struct virgl_resource *res, boolean wait,
1467 uint32_t result_type,
1468 uint32_t offset,
1469 uint32_t index)
1470 {
1471 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1472 virgl_encoder_write_dword(ctx->cbuf, handle);
1473 virgl_encoder_write_res(ctx, res);
1474 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1475 virgl_encoder_write_dword(ctx->cbuf, result_type);
1476 virgl_encoder_write_dword(ctx->cbuf, offset);
1477 virgl_encoder_write_dword(ctx->cbuf, index);
1478 return 0;
1479 }
1480
virgl_encode_transfer(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * trans,uint32_t direction)1481 void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1482 struct virgl_transfer *trans, uint32_t direction)
1483 {
1484 uint32_t command;
1485 struct virgl_resource *vres = virgl_resource(trans->base.resource);
1486 enum virgl_transfer3d_encode_stride stride_type =
1487 virgl_transfer3d_host_inferred_stride;
1488
1489 if (trans->base.box.depth == 1 && trans->base.level == 0 &&
1490 trans->base.resource->target == PIPE_TEXTURE_2D &&
1491 vres->blob_mem == VIRGL_BLOB_MEM_HOST3D_GUEST)
1492 stride_type = virgl_transfer3d_explicit_stride;
1493
1494 command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1495 virgl_encoder_write_dword(buf, command);
1496 virgl_encoder_transfer3d_common(vs, buf, trans, stride_type);
1497 virgl_encoder_write_dword(buf, trans->offset);
1498 virgl_encoder_write_dword(buf, direction);
1499 }
1500
virgl_encode_copy_transfer(struct virgl_context * ctx,struct virgl_transfer * trans)1501 void virgl_encode_copy_transfer(struct virgl_context *ctx,
1502 struct virgl_transfer *trans)
1503 {
1504 uint32_t command;
1505 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
1506
1507 assert(trans->copy_src_hw_res);
1508
1509 command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
1510 virgl_encoder_write_cmd_dword(ctx, command);
1511 /* Copy transfers need to explicitly specify the stride, since it may differ
1512 * from the image stride.
1513 */
1514 virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
1515 vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, TRUE);
1516 virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
1517 /* At the moment all copy transfers are synchronized. */
1518 virgl_encoder_write_dword(ctx->cbuf, 1);
1519 }
1520
virgl_encode_end_transfers(struct virgl_cmd_buf * buf)1521 void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1522 {
1523 uint32_t command, diff;
1524 diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1525 if (diff) {
1526 command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1527 virgl_encoder_write_dword(buf, command);
1528 }
1529 }
1530
virgl_encode_get_memory_info(struct virgl_context * ctx,struct virgl_resource * res)1531 void virgl_encode_get_memory_info(struct virgl_context *ctx, struct virgl_resource *res)
1532 {
1533 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_MEMORY_INFO, 0, 1));
1534 virgl_encoder_write_res(ctx, res);
1535 }
1536
virgl_encode_emit_string_marker(struct virgl_context * ctx,const char * message,int len)1537 void virgl_encode_emit_string_marker(struct virgl_context *ctx,
1538 const char *message, int len)
1539 {
1540 /* len is guaranteed to be non-negative but be defensive */
1541 assert(len >= 0);
1542 if (len <= 0)
1543 return;
1544
1545 if (len > 4 * 0xffff) {
1546 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1547 len = 4 * 0xffff;
1548 }
1549
1550 uint32_t buf_len = (uint32_t )(len + 3) / 4 + 1;
1551 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_EMIT_STRING_MARKER, 0, buf_len));
1552 virgl_encoder_write_dword(ctx->cbuf, len);
1553 virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)message, len);
1554 }
1555