1 /**************************************************************************
2 *
3 * Copyright (C) 2014 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 **************************************************************************/
24 #include <stdint.h>
25 #include <string.h>
26 #include <stdio.h>
27 #include <errno.h>
28 #include <epoxy/gl.h>
29 #include <fcntl.h>
30
31 #include "util/u_memory.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_state.h"
34 #include "pipe/p_shader_tokens.h"
35 #include "virgl_context.h"
36 #include "virgl_resource.h"
37 #include "vrend_renderer.h"
38 #include "vrend_object.h"
39 #include "tgsi/tgsi_text.h"
40 #include "vrend_debug.h"
41 #include "vrend_tweaks.h"
42 #include "virgl_util.h"
43
44 /* decode side */
45 #define DECODE_MAX_TOKENS 8000
46
47 struct vrend_decode_ctx {
48 struct virgl_context base;
49 struct vrend_context *grctx;
50 };
51
get_buf_entry(const uint32_t * buf,uint32_t offset)52 static inline uint32_t get_buf_entry(const uint32_t *buf, uint32_t offset)
53 {
54 return buf[offset];
55 }
56
get_buf_ptr(const uint32_t * buf,uint32_t offset)57 static inline const void *get_buf_ptr(const uint32_t *buf, uint32_t offset)
58 {
59 return &buf[offset];
60 }
61
vrend_decode_create_shader(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)62 static int vrend_decode_create_shader(struct vrend_context *ctx,
63 const uint32_t *buf,
64 uint32_t handle,
65 uint16_t length)
66 {
67 struct pipe_stream_output_info so_info;
68 uint i;
69 int ret;
70 uint32_t shader_offset, req_local_mem = 0;
71 unsigned num_tokens, num_so_outputs, offlen;
72 const uint8_t *shd_text;
73 uint32_t type;
74
75 if (length < VIRGL_OBJ_SHADER_HDR_SIZE(0))
76 return EINVAL;
77
78 type = get_buf_entry(buf, VIRGL_OBJ_SHADER_TYPE);
79 num_tokens = get_buf_entry(buf, VIRGL_OBJ_SHADER_NUM_TOKENS);
80 offlen = get_buf_entry(buf, VIRGL_OBJ_SHADER_OFFSET);
81
82 if (type == PIPE_SHADER_COMPUTE) {
83 req_local_mem = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
84 num_so_outputs = 0;
85 } else {
86 num_so_outputs = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
87 if (length < VIRGL_OBJ_SHADER_HDR_SIZE(num_so_outputs))
88 return EINVAL;
89
90 if (num_so_outputs > PIPE_MAX_SO_OUTPUTS)
91 return EINVAL;
92 }
93
94 shader_offset = 6;
95 if (num_so_outputs) {
96 so_info.num_outputs = num_so_outputs;
97 if (so_info.num_outputs) {
98 for (i = 0; i < 4; i++)
99 so_info.stride[i] = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_STRIDE(i));
100 for (i = 0; i < so_info.num_outputs; i++) {
101 uint32_t tmp = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_OUTPUT0(i));
102
103 so_info.output[i].register_index = tmp & 0xff;
104 so_info.output[i].start_component = (tmp >> 8) & 0x3;
105 so_info.output[i].num_components = (tmp >> 10) & 0x7;
106 so_info.output[i].output_buffer = (tmp >> 13) & 0x7;
107 so_info.output[i].dst_offset = (tmp >> 16) & 0xffff;
108 tmp = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(i));
109 so_info.output[i].stream = (tmp & 0x3);
110 so_info.output[i].need_temp = so_info.output[i].num_components < 4;
111 }
112
113 for (i = 0; i < so_info.num_outputs - 1; i++) {
114 for (unsigned j = i + 1; j < so_info.num_outputs; j++) {
115 so_info.output[j].need_temp |=
116 (so_info.output[i].register_index == so_info.output[j].register_index);
117 }
118 }
119 }
120 shader_offset += 4 + (2 * num_so_outputs);
121 } else
122 memset(&so_info, 0, sizeof(so_info));
123
124 shd_text = get_buf_ptr(buf, shader_offset);
125 ret = vrend_create_shader(ctx, handle, &so_info, req_local_mem, (const char *)shd_text, offlen, num_tokens, type, length - shader_offset + 1);
126
127 return ret;
128 }
129
vrend_decode_create_stream_output_target(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)130 static int vrend_decode_create_stream_output_target(struct vrend_context *ctx, const uint32_t *buf,
131 uint32_t handle, uint16_t length)
132 {
133 uint32_t res_handle, buffer_size, buffer_offset;
134
135 if (length != VIRGL_OBJ_STREAMOUT_SIZE)
136 return EINVAL;
137
138 res_handle = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_RES_HANDLE);
139 buffer_offset = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET);
140 buffer_size = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_BUFFER_SIZE);
141
142 return vrend_create_so_target(ctx, handle, res_handle, buffer_offset,
143 buffer_size);
144 }
145
vrend_decode_set_framebuffer_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)146 static int vrend_decode_set_framebuffer_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
147 {
148 if (length < 2)
149 return EINVAL;
150
151 uint32_t nr_cbufs = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NR_CBUFS);
152 uint32_t zsurf_handle = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NR_ZSURF_HANDLE);
153 uint32_t surf_handle[8];
154 uint32_t i;
155
156 if (length != (2u + nr_cbufs))
157 return EINVAL;
158
159 if (nr_cbufs > 8)
160 return EINVAL;
161
162 for (i = 0; i < nr_cbufs; i++)
163 surf_handle[i] = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_CBUF_HANDLE(i));
164 vrend_set_framebuffer_state(ctx, nr_cbufs, surf_handle, zsurf_handle);
165 return 0;
166 }
167
vrend_decode_set_framebuffer_state_no_attach(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)168 static int vrend_decode_set_framebuffer_state_no_attach(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
169 {
170 uint32_t width, height;
171 uint32_t layers, samples;
172 uint32_t tmp;
173
174 if (length != VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE)
175 return EINVAL;
176
177 tmp = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH_HEIGHT);
178 width = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH(tmp);
179 height = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_HEIGHT(tmp);
180
181 tmp = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS_SAMPLES);
182 layers = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS(tmp);
183 samples = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SAMPLES(tmp);
184
185 vrend_set_framebuffer_state_no_attach(ctx, width, height, layers, samples);
186 return 0;
187 }
188
vrend_decode_clear(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)189 static int vrend_decode_clear(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
190 {
191 union pipe_color_union color;
192 double depth;
193 unsigned stencil, buffers;
194 int i;
195
196 if (length != VIRGL_OBJ_CLEAR_SIZE)
197 return EINVAL;
198 buffers = get_buf_entry(buf, VIRGL_OBJ_CLEAR_BUFFERS);
199 for (i = 0; i < 4; i++)
200 color.ui[i] = get_buf_entry(buf, VIRGL_OBJ_CLEAR_COLOR_0 + i);
201 const void *depth_ptr = get_buf_ptr(buf, VIRGL_OBJ_CLEAR_DEPTH_0);
202 memcpy(&depth, depth_ptr, sizeof(double));
203 stencil = get_buf_entry(buf, VIRGL_OBJ_CLEAR_STENCIL);
204
205 vrend_clear(ctx, buffers, &color, depth, stencil);
206 return 0;
207 }
208
vrend_decode_clear_texture(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)209 static int vrend_decode_clear_texture(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
210 {
211 struct pipe_box box;
212 uint32_t handle;
213 uint32_t level;
214 uint32_t arr[4] = {0};
215
216 if (length != VIRGL_CLEAR_TEXTURE_SIZE)
217 return EINVAL;
218
219 handle = get_buf_entry(buf, VIRGL_TEXTURE_HANDLE);
220 level = get_buf_entry(buf, VIRGL_TEXTURE_LEVEL);
221 box.x = get_buf_entry(buf, VIRGL_TEXTURE_SRC_X);
222 box.y = get_buf_entry(buf, VIRGL_TEXTURE_SRC_Y);
223 box.z = get_buf_entry(buf, VIRGL_TEXTURE_SRC_Z);
224 box.width = get_buf_entry(buf, VIRGL_TEXTURE_SRC_W);
225 box.height = get_buf_entry(buf, VIRGL_TEXTURE_SRC_H);
226 box.depth = get_buf_entry(buf, VIRGL_TEXTURE_SRC_D);
227 arr[0] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_A);
228 arr[1] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_B);
229 arr[2] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_C);
230 arr[3] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_D);
231
232 vrend_clear_texture(ctx, handle, level, &box, (void *) &arr);
233 return 0;
234 }
235
uif(unsigned int ui)236 static float uif(unsigned int ui)
237 {
238 union { float f; unsigned int ui; } myuif;
239 myuif.ui = ui;
240 return myuif.f;
241 }
242
vrend_decode_set_viewport_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)243 static int vrend_decode_set_viewport_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
244 {
245 struct pipe_viewport_state vps[PIPE_MAX_VIEWPORTS];
246 uint i, v;
247 uint32_t num_viewports, start_slot;
248 if (length < 1)
249 return EINVAL;
250
251 if ((length - 1) % 6)
252 return EINVAL;
253
254 num_viewports = (length - 1) / 6;
255 start_slot = get_buf_entry(buf, VIRGL_SET_VIEWPORT_START_SLOT);
256
257 if (num_viewports > PIPE_MAX_VIEWPORTS ||
258 start_slot > (PIPE_MAX_VIEWPORTS - num_viewports))
259 return EINVAL;
260
261 for (v = 0; v < num_viewports; v++) {
262 for (i = 0; i < 3; i++)
263 vps[v].scale[i] = uif(get_buf_entry(buf, VIRGL_SET_VIEWPORT_STATE_SCALE_0(v) + i));
264 for (i = 0; i < 3; i++)
265 vps[v].translate[i] = uif(get_buf_entry(buf, VIRGL_SET_VIEWPORT_STATE_TRANSLATE_0(v) + i));
266 }
267
268 vrend_set_viewport_states(ctx, start_slot, num_viewports, vps);
269 return 0;
270 }
271
vrend_decode_set_index_buffer(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)272 static int vrend_decode_set_index_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
273 {
274 if (length != 1 && length != 3)
275 return EINVAL;
276 vrend_set_index_buffer(ctx,
277 get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_HANDLE),
278 (length == 3) ? get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_INDEX_SIZE) : 0,
279 (length == 3) ? get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_OFFSET) : 0);
280 return 0;
281 }
282
vrend_decode_set_constant_buffer(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)283 static int vrend_decode_set_constant_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
284 {
285 uint32_t shader;
286 int nc = (length - 2);
287
288 if (length < 2)
289 return EINVAL;
290
291 shader = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE);
292 /* VIRGL_SET_CONSTANT_BUFFER_INDEX is not used */
293
294 if (shader >= PIPE_SHADER_TYPES)
295 return EINVAL;
296
297 vrend_set_constants(ctx, shader, nc, get_buf_ptr(buf, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
298 return 0;
299 }
300
vrend_decode_set_uniform_buffer(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)301 static int vrend_decode_set_uniform_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
302 {
303 if (length != VIRGL_SET_UNIFORM_BUFFER_SIZE)
304 return EINVAL;
305
306 uint32_t shader = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE);
307 uint32_t index = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_INDEX);
308 uint32_t offset = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_OFFSET);
309 uint32_t blength = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_LENGTH);
310 uint32_t handle = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE);
311
312 if (shader >= PIPE_SHADER_TYPES)
313 return EINVAL;
314
315 if (index >= PIPE_MAX_CONSTANT_BUFFERS)
316 return EINVAL;
317
318 vrend_set_uniform_buffer(ctx, shader, index, offset, blength, handle);
319 return 0;
320 }
321
vrend_decode_set_vertex_buffers(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)322 static int vrend_decode_set_vertex_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
323 {
324 int num_vbo;
325 int i;
326
327 /* must be a multiple of 3 */
328 if (length && (length % 3))
329 return EINVAL;
330
331 num_vbo = (length / 3);
332 if (num_vbo > PIPE_MAX_ATTRIBS)
333 return EINVAL;
334
335 for (i = 0; i < num_vbo; i++) {
336 vrend_set_single_vbo(ctx, i,
337 get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_STRIDE(i)),
338 get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_OFFSET(i)),
339 get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_HANDLE(i)));
340 }
341 vrend_set_num_vbo(ctx, num_vbo);
342 return 0;
343 }
344
vrend_decode_set_sampler_views(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)345 static int vrend_decode_set_sampler_views(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
346 {
347 uint32_t num_samps;
348 uint32_t i;
349 uint32_t shader_type;
350 uint32_t start_slot;
351
352 if (length < 2)
353 return EINVAL;
354 num_samps = length - 2;
355 shader_type = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_SHADER_TYPE);
356 start_slot = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_START_SLOT);
357
358 if (shader_type >= PIPE_SHADER_TYPES)
359 return EINVAL;
360
361 if (num_samps > PIPE_MAX_SHADER_SAMPLER_VIEWS ||
362 start_slot > (PIPE_MAX_SHADER_SAMPLER_VIEWS - num_samps))
363 return EINVAL;
364
365 for (i = 0; i < num_samps; i++) {
366 uint32_t handle = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_V0_HANDLE + i);
367 vrend_set_single_sampler_view(ctx, shader_type, i + start_slot, handle);
368 }
369 vrend_set_num_sampler_views(ctx, shader_type, start_slot, num_samps);
370 return 0;
371 }
372
vrend_decode_transfer_common(const uint32_t * buf,uint32_t * dst_handle,struct vrend_transfer_info * info)373 static void vrend_decode_transfer_common(const uint32_t *buf,
374 uint32_t *dst_handle,
375 struct vrend_transfer_info *info)
376 {
377 *dst_handle = get_buf_entry(buf, VIRGL_RESOURCE_IW_RES_HANDLE);
378
379 info->level = get_buf_entry(buf, VIRGL_RESOURCE_IW_LEVEL);
380 info->stride = get_buf_entry(buf, VIRGL_RESOURCE_IW_STRIDE);
381 info->layer_stride = get_buf_entry(buf, VIRGL_RESOURCE_IW_LAYER_STRIDE);
382 info->box->x = get_buf_entry(buf, VIRGL_RESOURCE_IW_X);
383 info->box->y = get_buf_entry(buf, VIRGL_RESOURCE_IW_Y);
384 info->box->z = get_buf_entry(buf, VIRGL_RESOURCE_IW_Z);
385 info->box->width = get_buf_entry(buf, VIRGL_RESOURCE_IW_W);
386 info->box->height = get_buf_entry(buf, VIRGL_RESOURCE_IW_H);
387 info->box->depth = get_buf_entry(buf, VIRGL_RESOURCE_IW_D);
388 }
389
vrend_decode_resource_inline_write(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)390 static int vrend_decode_resource_inline_write(struct vrend_context *ctx, const uint32_t *buf,
391 uint32_t length)
392 {
393 struct pipe_box box;
394 uint32_t dst_handle;
395 struct vrend_transfer_info info;
396 uint32_t data_len;
397 struct iovec dataiovec;
398 const void *data;
399
400 if (length < 12)
401 return EINVAL;
402
403 memset(&info, 0, sizeof(info));
404 info.box = &box;
405 vrend_decode_transfer_common(buf, &dst_handle, &info);
406 data_len = (length - 11) * 4;
407 data = get_buf_ptr(buf, VIRGL_RESOURCE_IW_DATA_START);
408
409 info.offset = 0;
410
411 dataiovec.iov_base = (void*)data;
412 dataiovec.iov_len = data_len;
413
414 info.iovec = &dataiovec;
415 info.iovec_cnt = 1;
416 return vrend_transfer_inline_write(ctx, dst_handle, &info);
417 }
418
vrend_decode_draw_vbo(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)419 static int vrend_decode_draw_vbo(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
420 {
421 struct pipe_draw_info info;
422 uint32_t cso;
423 uint32_t handle = 0, indirect_draw_count_handle = 0;
424 if (length != VIRGL_DRAW_VBO_SIZE && length != VIRGL_DRAW_VBO_SIZE_TESS &&
425 length != VIRGL_DRAW_VBO_SIZE_INDIRECT)
426 return EINVAL;
427 memset(&info, 0, sizeof(struct pipe_draw_info));
428
429 info.start = get_buf_entry(buf, VIRGL_DRAW_VBO_START);
430 info.count = get_buf_entry(buf, VIRGL_DRAW_VBO_COUNT);
431 info.mode = get_buf_entry(buf, VIRGL_DRAW_VBO_MODE);
432 info.indexed = !!get_buf_entry(buf, VIRGL_DRAW_VBO_INDEXED);
433 info.instance_count = get_buf_entry(buf, VIRGL_DRAW_VBO_INSTANCE_COUNT);
434 info.index_bias = get_buf_entry(buf, VIRGL_DRAW_VBO_INDEX_BIAS);
435 info.start_instance = get_buf_entry(buf, VIRGL_DRAW_VBO_START_INSTANCE);
436 info.primitive_restart = !!get_buf_entry(buf, VIRGL_DRAW_VBO_PRIMITIVE_RESTART);
437 info.restart_index = get_buf_entry(buf, VIRGL_DRAW_VBO_RESTART_INDEX);
438 info.min_index = get_buf_entry(buf, VIRGL_DRAW_VBO_MIN_INDEX);
439 info.max_index = get_buf_entry(buf, VIRGL_DRAW_VBO_MAX_INDEX);
440
441 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
442 info.vertices_per_patch = get_buf_entry(buf, VIRGL_DRAW_VBO_VERTICES_PER_PATCH);
443 info.drawid = get_buf_entry(buf, VIRGL_DRAW_VBO_DRAWID);
444 }
445
446 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
447 handle = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_HANDLE);
448 info.indirect.offset = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_OFFSET);
449 info.indirect.stride = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_STRIDE);
450 info.indirect.draw_count = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT);
451 info.indirect.indirect_draw_count_offset = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_OFFSET);
452 indirect_draw_count_handle = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_HANDLE);
453 }
454
455 cso = get_buf_entry(buf, VIRGL_DRAW_VBO_COUNT_FROM_SO);
456
457 return vrend_draw_vbo(ctx, &info, cso, handle, indirect_draw_count_handle);
458 }
459
vrend_decode_create_blend(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)460 static int vrend_decode_create_blend(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
461 {
462 struct pipe_blend_state *blend_state;
463 uint32_t tmp;
464 int i;
465
466 if (length != VIRGL_OBJ_BLEND_SIZE) {
467 return EINVAL;
468 }
469
470 blend_state = CALLOC_STRUCT(pipe_blend_state);
471 if (!blend_state)
472 return ENOMEM;
473
474 tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S0);
475 blend_state->independent_blend_enable = (tmp & 1);
476 blend_state->logicop_enable = (tmp >> 1) & 0x1;
477 blend_state->dither = (tmp >> 2) & 0x1;
478 blend_state->alpha_to_coverage = (tmp >> 3) & 0x1;
479 blend_state->alpha_to_one = (tmp >> 4) & 0x1;
480
481 tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S1);
482 blend_state->logicop_func = tmp & 0xf;
483
484 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
485 tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S2(i));
486 blend_state->rt[i].blend_enable = tmp & 0x1;
487 blend_state->rt[i].rgb_func = (tmp >> 1) & 0x7;
488 blend_state->rt[i].rgb_src_factor = (tmp >> 4) & 0x1f;
489 blend_state->rt[i].rgb_dst_factor = (tmp >> 9) & 0x1f;
490 blend_state->rt[i].alpha_func = (tmp >> 14) & 0x7;
491 blend_state->rt[i].alpha_src_factor = (tmp >> 17) & 0x1f;
492 blend_state->rt[i].alpha_dst_factor = (tmp >> 22) & 0x1f;
493 blend_state->rt[i].colormask = (tmp >> 27) & 0xf;
494 }
495
496 tmp = vrend_renderer_object_insert(ctx, blend_state, handle,
497 VIRGL_OBJECT_BLEND);
498 if (tmp == 0) {
499 FREE(blend_state);
500 return ENOMEM;
501 }
502 return 0;
503 }
504
vrend_decode_create_dsa(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)505 static int vrend_decode_create_dsa(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
506 {
507 int i;
508 struct pipe_depth_stencil_alpha_state *dsa_state;
509 uint32_t tmp;
510
511 if (length != VIRGL_OBJ_DSA_SIZE)
512 return EINVAL;
513
514 dsa_state = CALLOC_STRUCT(pipe_depth_stencil_alpha_state);
515 if (!dsa_state)
516 return ENOMEM;
517
518 tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_S0);
519 dsa_state->depth.enabled = tmp & 0x1;
520 dsa_state->depth.writemask = (tmp >> 1) & 0x1;
521 dsa_state->depth.func = (tmp >> 2) & 0x7;
522
523 dsa_state->alpha.enabled = (tmp >> 8) & 0x1;
524 dsa_state->alpha.func = (tmp >> 9) & 0x7;
525
526 for (i = 0; i < 2; i++) {
527 tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_S1 + i);
528 dsa_state->stencil[i].enabled = tmp & 0x1;
529 dsa_state->stencil[i].func = (tmp >> 1) & 0x7;
530 dsa_state->stencil[i].fail_op = (tmp >> 4) & 0x7;
531 dsa_state->stencil[i].zpass_op = (tmp >> 7) & 0x7;
532 dsa_state->stencil[i].zfail_op = (tmp >> 10) & 0x7;
533 dsa_state->stencil[i].valuemask = (tmp >> 13) & 0xff;
534 dsa_state->stencil[i].writemask = (tmp >> 21) & 0xff;
535 }
536
537 tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_ALPHA_REF);
538 dsa_state->alpha.ref_value = uif(tmp);
539
540 tmp = vrend_renderer_object_insert(ctx, dsa_state, handle,
541 VIRGL_OBJECT_DSA);
542 if (tmp == 0) {
543 FREE(dsa_state);
544 return ENOMEM;
545 }
546 return 0;
547 }
548
vrend_decode_create_rasterizer(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)549 static int vrend_decode_create_rasterizer(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
550 {
551 struct pipe_rasterizer_state *rs_state;
552 uint32_t tmp;
553
554 if (length != VIRGL_OBJ_RS_SIZE)
555 return EINVAL;
556
557 rs_state = CALLOC_STRUCT(pipe_rasterizer_state);
558 if (!rs_state)
559 return ENOMEM;
560
561 tmp = get_buf_entry(buf, VIRGL_OBJ_RS_S0);
562 #define ebit(name, bit) rs_state->name = (tmp >> bit) & 0x1
563 #define emask(name, bit, mask) rs_state->name = (tmp >> bit) & mask
564
565 ebit(flatshade, 0);
566 ebit(depth_clip, 1);
567 ebit(clip_halfz, 2);
568 ebit(rasterizer_discard, 3);
569 ebit(flatshade_first, 4);
570 ebit(light_twoside, 5);
571 ebit(sprite_coord_mode, 6);
572 ebit(point_quad_rasterization, 7);
573 emask(cull_face, 8, 0x3);
574 emask(fill_front, 10, 0x3);
575 emask(fill_back, 12, 0x3);
576 ebit(scissor, 14);
577 ebit(front_ccw, 15);
578 ebit(clamp_vertex_color, 16);
579 ebit(clamp_fragment_color, 17);
580 ebit(offset_line, 18);
581 ebit(offset_point, 19);
582 ebit(offset_tri, 20);
583 ebit(poly_smooth, 21);
584 ebit(poly_stipple_enable, 22);
585 ebit(point_smooth, 23);
586 ebit(point_size_per_vertex, 24);
587 ebit(multisample, 25);
588 ebit(line_smooth, 26);
589 ebit(line_stipple_enable, 27);
590 ebit(line_last_pixel, 28);
591 ebit(half_pixel_center, 29);
592 ebit(bottom_edge_rule, 30);
593 ebit(force_persample_interp, 31);
594 rs_state->point_size = uif(get_buf_entry(buf, VIRGL_OBJ_RS_POINT_SIZE));
595 rs_state->sprite_coord_enable = get_buf_entry(buf, VIRGL_OBJ_RS_SPRITE_COORD_ENABLE);
596 tmp = get_buf_entry(buf, VIRGL_OBJ_RS_S3);
597 emask(line_stipple_pattern, 0, 0xffff);
598 emask(line_stipple_factor, 16, 0xff);
599 emask(clip_plane_enable, 24, 0xff);
600
601 rs_state->line_width = uif(get_buf_entry(buf, VIRGL_OBJ_RS_LINE_WIDTH));
602 rs_state->offset_units = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_UNITS));
603 rs_state->offset_scale = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_SCALE));
604 rs_state->offset_clamp = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_CLAMP));
605
606 tmp = vrend_renderer_object_insert(ctx, rs_state, handle,
607 VIRGL_OBJECT_RASTERIZER);
608 if (tmp == 0) {
609 FREE(rs_state);
610 return ENOMEM;
611 }
612 return 0;
613 }
614
vrend_decode_create_surface(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)615 static int vrend_decode_create_surface(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
616 {
617 uint32_t res_handle, format, val0, val1;
618 int ret;
619
620 if (length != VIRGL_OBJ_SURFACE_SIZE)
621 return EINVAL;
622
623 res_handle = get_buf_entry(buf, VIRGL_OBJ_SURFACE_RES_HANDLE);
624 format = get_buf_entry(buf, VIRGL_OBJ_SURFACE_FORMAT);
625 /* decide later if these are texture or buffer */
626 val0 = get_buf_entry(buf, VIRGL_OBJ_SURFACE_BUFFER_FIRST_ELEMENT);
627 val1 = get_buf_entry(buf, VIRGL_OBJ_SURFACE_BUFFER_LAST_ELEMENT);
628 ret = vrend_create_surface(ctx, handle, res_handle, format, val0, val1);
629 return ret;
630 }
631
vrend_decode_create_sampler_view(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)632 static int vrend_decode_create_sampler_view(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
633 {
634 uint32_t res_handle, format, val0, val1, swizzle_packed;
635
636 if (length != VIRGL_OBJ_SAMPLER_VIEW_SIZE)
637 return EINVAL;
638
639 res_handle = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_RES_HANDLE);
640 format = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_FORMAT);
641 val0 = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_FIRST_ELEMENT);
642 val1 = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_LAST_ELEMENT);
643 swizzle_packed = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE);
644 return vrend_create_sampler_view(ctx, handle, res_handle, format, val0, val1,swizzle_packed);
645 }
646
vrend_decode_create_sampler_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)647 static int vrend_decode_create_sampler_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
648 {
649 struct pipe_sampler_state state;
650 int i;
651 uint32_t tmp;
652
653 if (length != VIRGL_OBJ_SAMPLER_STATE_SIZE)
654 return EINVAL;
655 tmp = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_S0);
656 state.wrap_s = tmp & 0x7;
657 state.wrap_t = (tmp >> 3) & 0x7;
658 state.wrap_r = (tmp >> 6) & 0x7;
659 state.min_img_filter = (tmp >> 9) & 0x3;
660 state.min_mip_filter = (tmp >> 11) & 0x3;
661 state.mag_img_filter = (tmp >> 13) & 0x3;
662 state.compare_mode = (tmp >> 15) & 0x1;
663 state.compare_func = (tmp >> 16) & 0x7;
664 state.seamless_cube_map = (tmp >> 19) & 0x1;
665
666 state.lod_bias = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS));
667 state.min_lod = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_MIN_LOD));
668 state.max_lod = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_MAX_LOD));
669
670 for (i = 0; i < 4; i++)
671 state.border_color.ui[i] = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_BORDER_COLOR(i));
672
673 if (state.min_mip_filter != PIPE_TEX_MIPFILTER_NONE &&
674 state.min_mip_filter != PIPE_TEX_MIPFILTER_LINEAR &&
675 state.min_mip_filter != PIPE_TEX_MIPFILTER_NEAREST)
676 return EINVAL;
677
678 return vrend_create_sampler_state(ctx, handle, &state);
679 }
680
vrend_decode_create_ve(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)681 static int vrend_decode_create_ve(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
682 {
683 struct pipe_vertex_element *ve = NULL;
684 int num_elements;
685 int i;
686 int ret;
687
688 if (length < 1)
689 return EINVAL;
690
691 if ((length - 1) % 4)
692 return EINVAL;
693
694 num_elements = (length - 1) / 4;
695
696 if (num_elements) {
697 ve = calloc(num_elements, sizeof(struct pipe_vertex_element));
698
699 if (!ve)
700 return ENOMEM;
701
702 for (i = 0; i < num_elements; i++) {
703 ve[i].src_offset = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_OFFSET(i));
704 ve[i].instance_divisor = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_INSTANCE_DIVISOR(i));
705 ve[i].vertex_buffer_index = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_VERTEX_BUFFER_INDEX(i));
706
707 if (ve[i].vertex_buffer_index >= PIPE_MAX_ATTRIBS) {
708 FREE(ve);
709 return EINVAL;
710 }
711
712 ve[i].src_format = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_FORMAT(i));
713 }
714 }
715
716 ret = vrend_create_vertex_elements_state(ctx, handle, num_elements, ve);
717
718 FREE(ve);
719 return ret;
720 }
721
vrend_decode_create_query(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)722 static int vrend_decode_create_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
723 {
724 uint32_t query_type;
725 uint32_t query_index;
726 uint32_t res_handle;
727 uint32_t offset;
728 uint32_t tmp;
729
730 if (length != VIRGL_OBJ_QUERY_SIZE)
731 return EINVAL;
732
733 tmp = get_buf_entry(buf, VIRGL_OBJ_QUERY_TYPE_INDEX);
734 query_type = VIRGL_OBJ_QUERY_TYPE(tmp);
735 query_index = (tmp >> 16) & 0xffff;
736
737 offset = get_buf_entry(buf, VIRGL_OBJ_QUERY_OFFSET);
738 res_handle = get_buf_entry(buf, VIRGL_OBJ_QUERY_RES_HANDLE);
739
740 return vrend_create_query(ctx, handle, query_type, query_index, res_handle, offset);
741 }
742
vrend_decode_create_object(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)743 static int vrend_decode_create_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
744 {
745 if (length < 1)
746 return EINVAL;
747
748 uint32_t header = get_buf_entry(buf, VIRGL_OBJ_CREATE_HEADER);
749 uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_CREATE_HANDLE);
750 uint8_t obj_type = (header >> 8) & 0xff;
751 int ret = 0;
752
753 if (handle == 0)
754 return EINVAL;
755
756 VREND_DEBUG(dbg_object, ctx," CREATE %-18s handle:0x%x len:%d\n",
757 vrend_get_object_type_name(obj_type), handle, length);
758
759 TRACE_SCOPE(vrend_get_object_type_name(obj_type));
760
761 switch (obj_type){
762 case VIRGL_OBJECT_BLEND:
763 ret = vrend_decode_create_blend(ctx, buf, handle, length);
764 break;
765 case VIRGL_OBJECT_DSA:
766 ret = vrend_decode_create_dsa(ctx, buf, handle, length);
767 break;
768 case VIRGL_OBJECT_RASTERIZER:
769 ret = vrend_decode_create_rasterizer(ctx, buf, handle, length);
770 break;
771 case VIRGL_OBJECT_SHADER:
772 ret = vrend_decode_create_shader(ctx, buf, handle, length);
773 break;
774 case VIRGL_OBJECT_VERTEX_ELEMENTS:
775 ret = vrend_decode_create_ve(ctx, buf, handle, length);
776 break;
777 case VIRGL_OBJECT_SURFACE:
778 ret = vrend_decode_create_surface(ctx, buf, handle, length);
779 break;
780 case VIRGL_OBJECT_SAMPLER_VIEW:
781 ret = vrend_decode_create_sampler_view(ctx, buf, handle, length);
782 break;
783 case VIRGL_OBJECT_SAMPLER_STATE:
784 ret = vrend_decode_create_sampler_state(ctx, buf, handle, length);
785 break;
786 case VIRGL_OBJECT_QUERY:
787 ret = vrend_decode_create_query(ctx, buf, handle, length);
788 break;
789 case VIRGL_OBJECT_STREAMOUT_TARGET:
790 ret = vrend_decode_create_stream_output_target(ctx, buf, handle, length);
791 break;
792 default:
793 return EINVAL;
794 }
795
796 return ret;
797 }
798
vrend_decode_bind_object(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)799 static int vrend_decode_bind_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
800 {
801 if (length != 1)
802 return EINVAL;
803
804 uint32_t header = get_buf_entry(buf, VIRGL_OBJ_BIND_HEADER);
805 uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_BIND_HANDLE);
806 uint8_t obj_type = (header >> 8) & 0xff;
807
808 VREND_DEBUG(dbg_object, ctx,
809 " BIND %-20s handle:0x%x len:%d\n",
810 vrend_get_object_type_name(obj_type), handle, length);
811
812 switch (obj_type) {
813 case VIRGL_OBJECT_BLEND:
814 vrend_object_bind_blend(ctx, handle);
815 break;
816 case VIRGL_OBJECT_DSA:
817 vrend_object_bind_dsa(ctx, handle);
818 break;
819 case VIRGL_OBJECT_RASTERIZER:
820 vrend_object_bind_rasterizer(ctx, handle);
821 break;
822 case VIRGL_OBJECT_VERTEX_ELEMENTS:
823 vrend_bind_vertex_elements_state(ctx, handle);
824 break;
825 default:
826 return EINVAL;
827 }
828
829 return 0;
830 }
831
vrend_decode_destroy_object(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)832 static int vrend_decode_destroy_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
833 {
834 if (length != 1)
835 return EINVAL;
836
837 uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_DESTROY_HANDLE);
838
839 VREND_DEBUG_EXT(dbg_object, ctx,
840 uint32_t obj = (get_buf_entry(buf, 0) >> 8) & 0xFF;
841 vrend_printf(" DESTROY %-17s handle:0x%x\n",
842 vrend_get_object_type_name(obj), handle));
843
844 vrend_renderer_object_destroy(ctx, handle);
845 return 0;
846 }
847
vrend_decode_set_stencil_ref(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)848 static int vrend_decode_set_stencil_ref(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
849 {
850 if (length != VIRGL_SET_STENCIL_REF_SIZE)
851 return EINVAL;
852
853 struct pipe_stencil_ref ref;
854 uint32_t val = get_buf_entry(buf, VIRGL_SET_STENCIL_REF);
855
856 ref.ref_value[0] = val & 0xff;
857 ref.ref_value[1] = (val >> 8) & 0xff;
858 vrend_set_stencil_ref(ctx, &ref);
859 return 0;
860 }
861
vrend_decode_set_blend_color(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)862 static int vrend_decode_set_blend_color(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
863 {
864 struct pipe_blend_color color;
865 int i;
866
867 if (length != VIRGL_SET_BLEND_COLOR_SIZE)
868 return EINVAL;
869
870 for (i = 0; i < 4; i++)
871 color.color[i] = uif(get_buf_entry(buf, VIRGL_SET_BLEND_COLOR(i)));
872
873 vrend_set_blend_color(ctx, &color);
874 return 0;
875 }
876
vrend_decode_set_scissor_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)877 static int vrend_decode_set_scissor_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
878 {
879 struct pipe_scissor_state ss[PIPE_MAX_VIEWPORTS];
880 uint32_t temp;
881 int32_t num_scissor;
882 uint32_t start_slot;
883 int s;
884 if (length < 1)
885 return EINVAL;
886
887 if ((length - 1) % 2)
888 return EINVAL;
889
890 num_scissor = (length - 1) / 2;
891 if (num_scissor > PIPE_MAX_VIEWPORTS)
892 return EINVAL;
893
894 start_slot = get_buf_entry(buf, VIRGL_SET_SCISSOR_START_SLOT);
895
896 for (s = 0; s < num_scissor; s++) {
897 temp = get_buf_entry(buf, VIRGL_SET_SCISSOR_MINX_MINY(s));
898 ss[s].minx = temp & 0xffff;
899 ss[s].miny = (temp >> 16) & 0xffff;
900
901 temp = get_buf_entry(buf, VIRGL_SET_SCISSOR_MAXX_MAXY(s));
902 ss[s].maxx = temp & 0xffff;
903 ss[s].maxy = (temp >> 16) & 0xffff;
904 }
905
906 vrend_set_scissor_state(ctx, start_slot, num_scissor, ss);
907 return 0;
908 }
909
vrend_decode_set_polygon_stipple(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)910 static int vrend_decode_set_polygon_stipple(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
911 {
912 struct pipe_poly_stipple ps;
913 int i;
914
915 if (length != VIRGL_POLYGON_STIPPLE_SIZE)
916 return EINVAL;
917
918 for (i = 0; i < 32; i++)
919 ps.stipple[i] = get_buf_entry(buf, VIRGL_POLYGON_STIPPLE_P0 + i);
920
921 vrend_set_polygon_stipple(ctx, &ps);
922 return 0;
923 }
924
vrend_decode_set_clip_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)925 static int vrend_decode_set_clip_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
926 {
927 struct pipe_clip_state clip;
928 int i, j;
929
930 if (length != VIRGL_SET_CLIP_STATE_SIZE)
931 return EINVAL;
932
933 for (i = 0; i < 8; i++)
934 for (j = 0; j < 4; j++)
935 clip.ucp[i][j] = uif(get_buf_entry(buf, VIRGL_SET_CLIP_STATE_C0 + (i * 4) + j));
936 vrend_set_clip_state(ctx, &clip);
937 return 0;
938 }
939
vrend_decode_set_sample_mask(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)940 static int vrend_decode_set_sample_mask(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
941 {
942 unsigned mask;
943
944 if (length != VIRGL_SET_SAMPLE_MASK_SIZE)
945 return EINVAL;
946 mask = get_buf_entry(buf, VIRGL_SET_SAMPLE_MASK_MASK);
947 vrend_set_sample_mask(ctx, mask);
948 return 0;
949 }
950
vrend_decode_set_min_samples(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)951 static int vrend_decode_set_min_samples(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
952 {
953 unsigned min_samples;
954
955 if (length != VIRGL_SET_MIN_SAMPLES_SIZE)
956 return EINVAL;
957 min_samples = get_buf_entry(buf, VIRGL_SET_MIN_SAMPLES_MASK);
958 vrend_set_min_samples(ctx, min_samples);
959 return 0;
960 }
961
vrend_decode_resource_copy_region(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)962 static int vrend_decode_resource_copy_region(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
963 {
964 struct pipe_box box;
965 uint32_t dst_handle, src_handle;
966 uint32_t dst_level, dstx, dsty, dstz;
967 uint32_t src_level;
968
969 if (length != VIRGL_CMD_RESOURCE_COPY_REGION_SIZE)
970 return EINVAL;
971
972 dst_handle = get_buf_entry(buf, VIRGL_CMD_RCR_DST_RES_HANDLE);
973 dst_level = get_buf_entry(buf, VIRGL_CMD_RCR_DST_LEVEL);
974 dstx = get_buf_entry(buf, VIRGL_CMD_RCR_DST_X);
975 dsty = get_buf_entry(buf, VIRGL_CMD_RCR_DST_Y);
976 dstz = get_buf_entry(buf, VIRGL_CMD_RCR_DST_Z);
977 src_handle = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_RES_HANDLE);
978 src_level = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_LEVEL);
979 box.x = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_X);
980 box.y = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_Y);
981 box.z = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_Z);
982 box.width = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_W);
983 box.height = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_H);
984 box.depth = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_D);
985
986 vrend_renderer_resource_copy_region(ctx, dst_handle,
987 dst_level, dstx, dsty, dstz,
988 src_handle, src_level,
989 &box);
990 return 0;
991 }
992
993
vrend_decode_blit(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)994 static int vrend_decode_blit(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
995 {
996 struct pipe_blit_info info;
997 uint32_t dst_handle, src_handle, temp;
998
999 if (length != VIRGL_CMD_BLIT_SIZE)
1000 return EINVAL;
1001 temp = get_buf_entry(buf, VIRGL_CMD_BLIT_S0);
1002 info.mask = temp & 0xff;
1003 info.filter = (temp >> 8) & 0x3;
1004 info.scissor_enable = (temp >> 10) & 0x1;
1005 info.render_condition_enable = (temp >> 11) & 0x1;
1006 info.alpha_blend = (temp >> 12) & 0x1;
1007 temp = get_buf_entry(buf, VIRGL_CMD_BLIT_SCISSOR_MINX_MINY);
1008 info.scissor.minx = temp & 0xffff;
1009 info.scissor.miny = (temp >> 16) & 0xffff;
1010 temp = get_buf_entry(buf, VIRGL_CMD_BLIT_SCISSOR_MAXX_MAXY);
1011 info.scissor.maxx = temp & 0xffff;
1012 info.scissor.maxy = (temp >> 16) & 0xffff;
1013 dst_handle = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_RES_HANDLE);
1014 info.dst.level = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_LEVEL);
1015 info.dst.format = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_FORMAT);
1016 info.dst.box.x = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_X);
1017 info.dst.box.y = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_Y);
1018 info.dst.box.z = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_Z);
1019 info.dst.box.width = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_W);
1020 info.dst.box.height = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_H);
1021 info.dst.box.depth = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_D);
1022
1023 src_handle = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_RES_HANDLE);
1024 info.src.level = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_LEVEL);
1025 info.src.format = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_FORMAT);
1026 info.src.box.x = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_X);
1027 info.src.box.y = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_Y);
1028 info.src.box.z = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_Z);
1029 info.src.box.width = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_W);
1030 info.src.box.height = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_H);
1031 info.src.box.depth = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_D);
1032
1033 vrend_renderer_blit(ctx, dst_handle, src_handle, &info);
1034 return 0;
1035 }
1036
vrend_decode_bind_sampler_states(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1037 static int vrend_decode_bind_sampler_states(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1038 {
1039 if (length < 2)
1040 return EINVAL;
1041
1042 uint32_t shader_type = get_buf_entry(buf, VIRGL_BIND_SAMPLER_STATES_SHADER_TYPE);
1043 uint32_t start_slot = get_buf_entry(buf, VIRGL_BIND_SAMPLER_STATES_START_SLOT);
1044 uint32_t num_states = length - 2;
1045
1046 if (shader_type >= PIPE_SHADER_TYPES)
1047 return EINVAL;
1048
1049 vrend_bind_sampler_states(ctx, shader_type, start_slot, num_states,
1050 get_buf_ptr(buf, VIRGL_BIND_SAMPLER_STATES_S0_HANDLE));
1051 return 0;
1052 }
1053
vrend_decode_begin_query(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1054 static int vrend_decode_begin_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1055 {
1056 if (length != 1)
1057 return EINVAL;
1058
1059 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_BEGIN_HANDLE);
1060
1061 return vrend_begin_query(ctx, handle);
1062 }
1063
vrend_decode_end_query(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1064 static int vrend_decode_end_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1065 {
1066 if (length != 1)
1067 return EINVAL;
1068
1069 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_END_HANDLE);
1070
1071 return vrend_end_query(ctx, handle);
1072 }
1073
vrend_decode_get_query_result(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1074 static int vrend_decode_get_query_result(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1075 {
1076 if (length != 2)
1077 return EINVAL;
1078
1079 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_HANDLE);
1080 uint32_t wait = get_buf_entry(buf, VIRGL_QUERY_RESULT_WAIT);
1081
1082 vrend_get_query_result(ctx, handle, wait);
1083 return 0;
1084 }
1085
vrend_decode_get_query_result_qbo(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1086 static int vrend_decode_get_query_result_qbo(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1087 {
1088 if (length != VIRGL_QUERY_RESULT_QBO_SIZE)
1089 return EINVAL;
1090
1091 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_HANDLE);
1092 uint32_t qbo_handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_QBO_HANDLE);
1093 uint32_t wait = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_WAIT);
1094 uint32_t result_type = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_RESULT_TYPE);
1095 uint32_t offset = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_OFFSET);
1096 int32_t index = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_INDEX);
1097
1098 vrend_get_query_result_qbo(ctx, handle, qbo_handle, wait, result_type, offset, index);
1099 return 0;
1100 }
1101
vrend_decode_set_render_condition(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1102 static int vrend_decode_set_render_condition(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1103 {
1104 if (length != VIRGL_RENDER_CONDITION_SIZE)
1105 return EINVAL;
1106
1107 uint32_t handle = get_buf_entry(buf, VIRGL_RENDER_CONDITION_HANDLE);
1108 bool condition = get_buf_entry(buf, VIRGL_RENDER_CONDITION_CONDITION) & 1;
1109 uint mode = get_buf_entry(buf, VIRGL_RENDER_CONDITION_MODE);
1110
1111 vrend_render_condition(ctx, handle, condition, mode);
1112 return 0;
1113 }
1114
vrend_decode_set_sub_ctx(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1115 static int vrend_decode_set_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1116 {
1117 if (length != 1)
1118 return EINVAL;
1119
1120 uint32_t ctx_sub_id = get_buf_entry(buf, 1);
1121
1122 vrend_renderer_set_sub_ctx(ctx, ctx_sub_id);
1123 return 0;
1124 }
1125
vrend_decode_create_sub_ctx(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1126 static int vrend_decode_create_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1127 {
1128 if (length != 1)
1129 return EINVAL;
1130
1131 uint32_t ctx_sub_id = get_buf_entry(buf, 1);
1132
1133 vrend_renderer_create_sub_ctx(ctx, ctx_sub_id);
1134 return 0;
1135 }
1136
vrend_decode_destroy_sub_ctx(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1137 static int vrend_decode_destroy_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1138 {
1139 if (length != 1)
1140 return EINVAL;
1141
1142 uint32_t ctx_sub_id = get_buf_entry(buf, 1);
1143
1144 vrend_renderer_destroy_sub_ctx(ctx, ctx_sub_id);
1145 return 0;
1146 }
1147
vrend_decode_bind_shader(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1148 static int vrend_decode_bind_shader(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1149 {
1150 uint32_t handle, type;
1151 if (length != VIRGL_BIND_SHADER_SIZE)
1152 return EINVAL;
1153
1154 handle = get_buf_entry(buf, VIRGL_BIND_SHADER_HANDLE);
1155 type = get_buf_entry(buf, VIRGL_BIND_SHADER_TYPE);
1156
1157 vrend_bind_shader(ctx, handle, type);
1158 return 0;
1159 }
1160
vrend_decode_set_tess_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1161 static int vrend_decode_set_tess_state(struct vrend_context *ctx,
1162 const uint32_t *buf, uint32_t length)
1163 {
1164 float tess_factors[6];
1165 int i;
1166
1167 if (length != VIRGL_TESS_STATE_SIZE)
1168 return EINVAL;
1169
1170 for (i = 0; i < 6; i++) {
1171 tess_factors[i] = uif(get_buf_entry(buf, i + 1));
1172 }
1173 vrend_set_tess_state(ctx, tess_factors);
1174 return 0;
1175 }
1176
vrend_decode_set_shader_buffers(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1177 static int vrend_decode_set_shader_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1178 {
1179 uint32_t num_ssbo;
1180 uint32_t shader_type, start_slot;
1181
1182 if (length < 2)
1183 return EINVAL;
1184
1185 num_ssbo = (length - 2) / VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE;
1186 shader_type = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_SHADER_TYPE);
1187 start_slot = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_START_SLOT);
1188 if (shader_type >= PIPE_SHADER_TYPES)
1189 return EINVAL;
1190
1191 if (num_ssbo < 1)
1192 return 0;
1193
1194 if (start_slot > PIPE_MAX_SHADER_BUFFERS ||
1195 start_slot > PIPE_MAX_SHADER_BUFFERS - num_ssbo)
1196 return EINVAL;
1197
1198 for (uint32_t i = 0; i < num_ssbo; i++) {
1199 uint32_t offset = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_OFFSET(i));
1200 uint32_t buf_len = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_LENGTH(i));
1201 uint32_t handle = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_RES_HANDLE(i));
1202 vrend_set_single_ssbo(ctx, shader_type, start_slot + i, offset, buf_len,
1203 handle);
1204 }
1205 return 0;
1206 }
1207
vrend_decode_set_atomic_buffers(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1208 static int vrend_decode_set_atomic_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1209 {
1210 uint32_t num_abo;
1211 uint32_t start_slot;
1212
1213 if (length < 2)
1214 return EINVAL;
1215
1216 num_abo = (length - 1) / VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE;
1217 start_slot = get_buf_entry(buf, VIRGL_SET_ATOMIC_BUFFER_START_SLOT);
1218 if (num_abo < 1)
1219 return 0;
1220
1221 if (num_abo > PIPE_MAX_HW_ATOMIC_BUFFERS ||
1222 start_slot > PIPE_MAX_HW_ATOMIC_BUFFERS ||
1223 start_slot > PIPE_MAX_HW_ATOMIC_BUFFERS - num_abo)
1224 return EINVAL;
1225
1226 for (uint32_t i = 0; i < num_abo; i++) {
1227 uint32_t offset = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2);
1228 uint32_t buf_len = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3);
1229 uint32_t handle = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4);
1230 vrend_set_single_abo(ctx, start_slot + i, offset, buf_len, handle);
1231 }
1232
1233 return 0;
1234 }
1235
vrend_decode_set_shader_images(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1236 static int vrend_decode_set_shader_images(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1237 {
1238 uint32_t num_images;
1239 uint32_t shader_type, start_slot;
1240 if (length < 2)
1241 return EINVAL;
1242
1243 num_images = (length - 2) / VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE;
1244 shader_type = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_SHADER_TYPE);
1245 start_slot = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_START_SLOT);
1246 if (shader_type >= PIPE_SHADER_TYPES)
1247 return EINVAL;
1248
1249 if (num_images < 1) {
1250 return 0;
1251 }
1252 if (start_slot > PIPE_MAX_SHADER_IMAGES ||
1253 start_slot > PIPE_MAX_SHADER_IMAGES - num_images)
1254 return EINVAL;
1255
1256 for (uint32_t i = 0; i < num_images; i++) {
1257 uint32_t format = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_FORMAT(i));
1258 uint32_t access = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_ACCESS(i));
1259 uint32_t layer_offset = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_LAYER_OFFSET(i));
1260 uint32_t level_size = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_LEVEL_SIZE(i));
1261 uint32_t handle = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_RES_HANDLE(i));
1262 vrend_set_single_image_view(ctx, shader_type, start_slot + i, format, access,
1263 layer_offset, level_size, handle);
1264 }
1265 return 0;
1266 }
1267
vrend_decode_memory_barrier(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1268 static int vrend_decode_memory_barrier(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1269 {
1270 if (length != VIRGL_MEMORY_BARRIER_SIZE)
1271 return EINVAL;
1272
1273 unsigned flags = get_buf_entry(buf, VIRGL_MEMORY_BARRIER_FLAGS);
1274 vrend_memory_barrier(ctx, flags);
1275 return 0;
1276 }
1277
vrend_decode_launch_grid(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1278 static int vrend_decode_launch_grid(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1279 {
1280 uint32_t block[3], grid[3];
1281 uint32_t indirect_handle, indirect_offset;
1282 if (length != VIRGL_LAUNCH_GRID_SIZE)
1283 return EINVAL;
1284
1285 block[0] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_X);
1286 block[1] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_Y);
1287 block[2] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_Z);
1288 grid[0] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_X);
1289 grid[1] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_Y);
1290 grid[2] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_Z);
1291 indirect_handle = get_buf_entry(buf, VIRGL_LAUNCH_INDIRECT_HANDLE);
1292 indirect_offset = get_buf_entry(buf, VIRGL_LAUNCH_INDIRECT_OFFSET);
1293 vrend_launch_grid(ctx, block, grid, indirect_handle, indirect_offset);
1294 return 0;
1295 }
1296
vrend_decode_set_streamout_targets(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1297 static int vrend_decode_set_streamout_targets(struct vrend_context *ctx,
1298 const uint32_t *buf, uint32_t length)
1299 {
1300 uint32_t handles[16];
1301 uint32_t num_handles = length - 1;
1302 uint32_t append_bitmask;
1303 uint i;
1304
1305 if (length < 1)
1306 return EINVAL;
1307 if (num_handles > ARRAY_SIZE(handles))
1308 return EINVAL;
1309
1310 append_bitmask = get_buf_entry(buf, VIRGL_SET_STREAMOUT_TARGETS_APPEND_BITMASK);
1311 for (i = 0; i < num_handles; i++)
1312 handles[i] = get_buf_entry(buf, VIRGL_SET_STREAMOUT_TARGETS_H0 + i);
1313 vrend_set_streamout_targets(ctx, append_bitmask, num_handles, handles);
1314 return 0;
1315 }
1316
vrend_decode_texture_barrier(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1317 static int vrend_decode_texture_barrier(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1318 {
1319 if (length != VIRGL_TEXTURE_BARRIER_SIZE)
1320 return EINVAL;
1321
1322 unsigned flags = get_buf_entry(buf, VIRGL_TEXTURE_BARRIER_FLAGS);
1323 vrend_texture_barrier(ctx, flags);
1324 return 0;
1325 }
1326
vrend_decode_set_debug_mask(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1327 static int vrend_decode_set_debug_mask(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1328 {
1329 char *flagstring;
1330 int slen = sizeof(uint32_t) * length;
1331
1332 if (length < VIRGL_SET_DEBUG_FLAGS_MIN_SIZE)
1333 return EINVAL;
1334
1335 const uint32_t *flag_buf = get_buf_ptr(buf, VIRGL_SET_DEBUG_FLAGSTRING_OFFSET);
1336 flagstring = malloc(slen+1);
1337
1338 if (!flagstring) {
1339 return ENOMEM;
1340 }
1341
1342 memcpy(flagstring, flag_buf, slen);
1343 flagstring[slen] = 0;
1344 vrend_context_set_debug_flags(ctx, flagstring);
1345
1346 free(flagstring);
1347
1348 return 0;
1349 }
1350
vrend_decode_set_tweaks(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1351 static int vrend_decode_set_tweaks(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1352 {
1353 VREND_DEBUG(dbg_tweak, NULL, "Received TWEAK set command\n");
1354
1355 if (length < VIRGL_SET_TWEAKS_SIZE)
1356 return EINVAL;
1357
1358 uint32_t tweak_id = get_buf_entry(buf, VIRGL_SET_TWEAKS_ID);
1359 uint32_t tweak_value = get_buf_entry(buf, VIRGL_SET_TWEAKS_VALUE);
1360
1361 vrend_set_active_tweaks(vrend_get_context_tweaks(ctx), tweak_id, tweak_value);
1362 return 0;
1363 }
1364
1365
vrend_decode_transfer3d(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1366 static int vrend_decode_transfer3d(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1367 {
1368 struct pipe_box box;
1369 uint32_t dst_handle;
1370 struct vrend_transfer_info info;
1371
1372 if (length < VIRGL_TRANSFER3D_SIZE)
1373 return EINVAL;
1374
1375 memset(&info, 0, sizeof(info));
1376 info.box = &box;
1377 vrend_decode_transfer_common(buf, &dst_handle, &info);
1378 info.offset = get_buf_entry(buf, VIRGL_TRANSFER3D_DATA_OFFSET);
1379 int transfer_mode = get_buf_entry(buf, VIRGL_TRANSFER3D_DIRECTION);
1380
1381 if (transfer_mode != VIRGL_TRANSFER_TO_HOST &&
1382 transfer_mode != VIRGL_TRANSFER_FROM_HOST)
1383 return EINVAL;
1384
1385 return vrend_renderer_transfer_iov(ctx, dst_handle, &info,
1386 transfer_mode);
1387 }
1388
vrend_decode_copy_transfer3d(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1389 static int vrend_decode_copy_transfer3d(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1390 {
1391 struct pipe_box box;
1392 struct vrend_transfer_info info;
1393 uint32_t dst_handle;
1394 uint32_t src_handle;
1395
1396 if (length != VIRGL_COPY_TRANSFER3D_SIZE)
1397 return EINVAL;
1398
1399 memset(&info, 0, sizeof(info));
1400 info.box = &box;
1401 vrend_decode_transfer_common(buf, &dst_handle, &info);
1402 info.offset = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET);
1403 info.synchronized = (get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SYNCHRONIZED) != 0);
1404
1405 src_handle = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE);
1406
1407 return vrend_renderer_copy_transfer3d(ctx, dst_handle, src_handle,
1408 &info);
1409 }
1410
vrend_decode_pipe_resource_create(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1411 static int vrend_decode_pipe_resource_create(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1412 {
1413 struct vrend_renderer_resource_create_args args = { 0 };
1414 uint32_t blob_id;
1415
1416 if (length != VIRGL_PIPE_RES_CREATE_SIZE)
1417 return EINVAL;
1418
1419 args.target = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_TARGET);
1420 args.format = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_FORMAT);
1421 args.bind = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_BIND);
1422 args.width = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_WIDTH);
1423 args.height = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_HEIGHT);
1424 args.depth = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_DEPTH);
1425 args.array_size = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_ARRAY_SIZE);
1426 args.last_level = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_LAST_LEVEL);
1427 args.nr_samples = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_NR_SAMPLES);
1428 args.flags = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_FLAGS);
1429 blob_id = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_BLOB_ID);
1430
1431 return vrend_renderer_pipe_resource_create(ctx, blob_id, &args);
1432 }
1433
vrend_decode_pipe_resource_set_type(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1434 static int vrend_decode_pipe_resource_set_type(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1435 {
1436 struct vrend_renderer_resource_set_type_args args = { 0 };
1437 uint32_t res_id;
1438
1439 if (length >= VIRGL_PIPE_RES_SET_TYPE_SIZE(0))
1440 args.plane_count = (length - VIRGL_PIPE_RES_SET_TYPE_SIZE(0)) / 2;
1441
1442 if (length != VIRGL_PIPE_RES_SET_TYPE_SIZE(args.plane_count) ||
1443 !args.plane_count || args.plane_count > VIRGL_GBM_MAX_PLANES)
1444 return EINVAL;
1445
1446 res_id = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE);
1447 args.format = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_FORMAT);
1448 args.bind = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_BIND);
1449 args.width = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_WIDTH);
1450 args.height = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_HEIGHT);
1451 args.usage = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_USAGE);
1452 args.modifier = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO);
1453 args.modifier |= (uint64_t)get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI) << 32;
1454 for (uint32_t i = 0; i < args.plane_count; i++) {
1455 args.plane_strides[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i));
1456 args.plane_offsets[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i));
1457 }
1458
1459 return vrend_renderer_pipe_resource_set_type(ctx, res_id, &args);
1460 }
1461
1462 static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
1463 uint32_t ctx_id);
1464
vrend_decode_ctx_fence_retire(void * fence_cookie,void * retire_data)1465 static void vrend_decode_ctx_fence_retire(void *fence_cookie,
1466 void *retire_data)
1467 {
1468 struct vrend_decode_ctx *dctx = retire_data;
1469 dctx->base.fence_retire(&dctx->base, 0, fence_cookie);
1470 }
1471
vrend_renderer_context_create(uint32_t handle,uint32_t nlen,const char * debug_name)1472 struct virgl_context *vrend_renderer_context_create(uint32_t handle,
1473 uint32_t nlen,
1474 const char *debug_name)
1475 {
1476 struct vrend_decode_ctx *dctx;
1477
1478 dctx = malloc(sizeof(struct vrend_decode_ctx));
1479 if (!dctx)
1480 return NULL;
1481
1482 vrend_decode_ctx_init_base(dctx, handle);
1483
1484 dctx->grctx = vrend_create_context(handle, nlen, debug_name);
1485 if (!dctx->grctx) {
1486 free(dctx);
1487 return NULL;
1488 }
1489
1490 vrend_renderer_set_fence_retire(dctx->grctx,
1491 vrend_decode_ctx_fence_retire,
1492 dctx);
1493
1494 return &dctx->base;
1495 }
1496
vrend_decode_ctx_destroy(struct virgl_context * ctx)1497 static void vrend_decode_ctx_destroy(struct virgl_context *ctx)
1498 {
1499 TRACE_FUNC();
1500 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1501
1502 vrend_destroy_context(dctx->grctx);
1503 free(dctx);
1504 }
1505
vrend_decode_ctx_attach_resource(struct virgl_context * ctx,struct virgl_resource * res)1506 static void vrend_decode_ctx_attach_resource(struct virgl_context *ctx,
1507 struct virgl_resource *res)
1508 {
1509 TRACE_FUNC();
1510 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1511 vrend_renderer_attach_res_ctx(dctx->grctx, res);
1512 }
1513
vrend_decode_ctx_detach_resource(struct virgl_context * ctx,struct virgl_resource * res)1514 static void vrend_decode_ctx_detach_resource(struct virgl_context *ctx,
1515 struct virgl_resource *res)
1516 {
1517 TRACE_FUNC();
1518 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1519 vrend_renderer_detach_res_ctx(dctx->grctx, res);
1520 }
1521
vrend_decode_ctx_transfer_3d(struct virgl_context * ctx,struct virgl_resource * res,const struct vrend_transfer_info * info,int transfer_mode)1522 static int vrend_decode_ctx_transfer_3d(struct virgl_context *ctx,
1523 struct virgl_resource *res,
1524 const struct vrend_transfer_info *info,
1525 int transfer_mode)
1526 {
1527 TRACE_FUNC();
1528 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1529 return vrend_renderer_transfer_iov(dctx->grctx, res->res_id, info,
1530 transfer_mode);
1531 }
1532
vrend_decode_ctx_get_blob(struct virgl_context * ctx,uint64_t blob_id,UNUSED uint32_t blob_flags,struct virgl_context_blob * blob)1533 static int vrend_decode_ctx_get_blob(struct virgl_context *ctx,
1534 uint64_t blob_id,
1535 UNUSED uint32_t blob_flags,
1536 struct virgl_context_blob *blob)
1537 {
1538 TRACE_FUNC();
1539 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1540
1541 blob->type = VIRGL_RESOURCE_FD_INVALID;
1542 /* this transfers ownership and blob_id is no longer valid */
1543 blob->u.pipe_resource = vrend_get_blob_pipe(dctx->grctx, blob_id);
1544 if (!blob->u.pipe_resource)
1545 return -EINVAL;
1546
1547 blob->map_info = vrend_renderer_resource_get_map_info(blob->u.pipe_resource);
1548 return 0;
1549 }
1550
vrend_decode_get_memory_info(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1551 static int vrend_decode_get_memory_info(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1552 {
1553 TRACE_FUNC();
1554 if (length != 1)
1555 return EINVAL;
1556
1557 uint32_t res_handle = get_buf_entry(buf, 1);
1558
1559 vrend_renderer_get_meminfo(ctx, res_handle);
1560
1561 return 0;
1562 }
1563
1564 typedef int (*vrend_decode_callback)(struct vrend_context *ctx, const uint32_t *buf, uint32_t length);
1565
vrend_decode_dummy(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1566 static int vrend_decode_dummy(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1567 {
1568 (void)ctx;
1569 (void)buf;
1570 (void)length;
1571 return 0;
1572 }
1573
1574 static const vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
1575 [VIRGL_CCMD_NOP] = vrend_decode_dummy,
1576 [VIRGL_CCMD_CREATE_OBJECT] = vrend_decode_create_object,
1577 [VIRGL_CCMD_BIND_OBJECT] = vrend_decode_bind_object,
1578 [VIRGL_CCMD_DESTROY_OBJECT] = vrend_decode_destroy_object,
1579 [VIRGL_CCMD_CLEAR] = vrend_decode_clear,
1580 [VIRGL_CCMD_CLEAR_TEXTURE] = vrend_decode_clear_texture,
1581 [VIRGL_CCMD_DRAW_VBO] = vrend_decode_draw_vbo,
1582 [VIRGL_CCMD_SET_FRAMEBUFFER_STATE] = vrend_decode_set_framebuffer_state,
1583 [VIRGL_CCMD_SET_VERTEX_BUFFERS] = vrend_decode_set_vertex_buffers,
1584 [VIRGL_CCMD_RESOURCE_INLINE_WRITE] = vrend_decode_resource_inline_write,
1585 [VIRGL_CCMD_SET_VIEWPORT_STATE] = vrend_decode_set_viewport_state,
1586 [VIRGL_CCMD_SET_SAMPLER_VIEWS] = vrend_decode_set_sampler_views,
1587 [VIRGL_CCMD_SET_INDEX_BUFFER] = vrend_decode_set_index_buffer,
1588 [VIRGL_CCMD_SET_CONSTANT_BUFFER] = vrend_decode_set_constant_buffer,
1589 [VIRGL_CCMD_SET_STENCIL_REF] = vrend_decode_set_stencil_ref,
1590 [VIRGL_CCMD_SET_BLEND_COLOR] = vrend_decode_set_blend_color,
1591 [VIRGL_CCMD_SET_SCISSOR_STATE] = vrend_decode_set_scissor_state,
1592 [VIRGL_CCMD_BLIT] = vrend_decode_blit,
1593 [VIRGL_CCMD_RESOURCE_COPY_REGION] = vrend_decode_resource_copy_region,
1594 [VIRGL_CCMD_BIND_SAMPLER_STATES] = vrend_decode_bind_sampler_states,
1595 [VIRGL_CCMD_BEGIN_QUERY] = vrend_decode_begin_query,
1596 [VIRGL_CCMD_END_QUERY] = vrend_decode_end_query,
1597 [VIRGL_CCMD_GET_QUERY_RESULT] = vrend_decode_get_query_result,
1598 [VIRGL_CCMD_SET_POLYGON_STIPPLE] = vrend_decode_set_polygon_stipple,
1599 [VIRGL_CCMD_SET_CLIP_STATE] = vrend_decode_set_clip_state,
1600 [VIRGL_CCMD_SET_SAMPLE_MASK] = vrend_decode_set_sample_mask,
1601 [VIRGL_CCMD_SET_MIN_SAMPLES] = vrend_decode_set_min_samples,
1602 [VIRGL_CCMD_SET_STREAMOUT_TARGETS] = vrend_decode_set_streamout_targets,
1603 [VIRGL_CCMD_SET_RENDER_CONDITION] = vrend_decode_set_render_condition,
1604 [VIRGL_CCMD_SET_UNIFORM_BUFFER] = vrend_decode_set_uniform_buffer,
1605 [VIRGL_CCMD_SET_SUB_CTX] = vrend_decode_set_sub_ctx,
1606 [VIRGL_CCMD_CREATE_SUB_CTX] = vrend_decode_create_sub_ctx,
1607 [VIRGL_CCMD_DESTROY_SUB_CTX] = vrend_decode_destroy_sub_ctx,
1608 [VIRGL_CCMD_BIND_SHADER] = vrend_decode_bind_shader,
1609 [VIRGL_CCMD_SET_TESS_STATE] = vrend_decode_set_tess_state,
1610 [VIRGL_CCMD_SET_SHADER_BUFFERS] = vrend_decode_set_shader_buffers,
1611 [VIRGL_CCMD_SET_SHADER_IMAGES] = vrend_decode_set_shader_images,
1612 [VIRGL_CCMD_SET_ATOMIC_BUFFERS] = vrend_decode_set_atomic_buffers,
1613 [VIRGL_CCMD_MEMORY_BARRIER] = vrend_decode_memory_barrier,
1614 [VIRGL_CCMD_LAUNCH_GRID] = vrend_decode_launch_grid,
1615 [VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH] = vrend_decode_set_framebuffer_state_no_attach,
1616 [VIRGL_CCMD_TEXTURE_BARRIER] = vrend_decode_texture_barrier,
1617 [VIRGL_CCMD_SET_DEBUG_FLAGS] = vrend_decode_set_debug_mask,
1618 [VIRGL_CCMD_GET_QUERY_RESULT_QBO] = vrend_decode_get_query_result_qbo,
1619 [VIRGL_CCMD_TRANSFER3D] = vrend_decode_transfer3d,
1620 [VIRGL_CCMD_COPY_TRANSFER3D] = vrend_decode_copy_transfer3d,
1621 [VIRGL_CCMD_END_TRANSFERS] = vrend_decode_dummy,
1622 [VIRGL_CCMD_SET_TWEAKS] = vrend_decode_set_tweaks,
1623 [VIRGL_CCMD_PIPE_RESOURCE_CREATE] = vrend_decode_pipe_resource_create,
1624 [VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE] = vrend_decode_pipe_resource_set_type,
1625 [VIRGL_CCMD_GET_MEMORY_INFO] = vrend_decode_get_memory_info,
1626 };
1627
vrend_decode_ctx_submit_cmd(struct virgl_context * ctx,const void * buffer,size_t size)1628 static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
1629 const void *buffer,
1630 size_t size)
1631 {
1632 TRACE_FUNC();
1633 struct vrend_decode_ctx *gdctx = (struct vrend_decode_ctx *)ctx;
1634 bool bret;
1635 int ret;
1636
1637 bret = vrend_hw_switch_context(gdctx->grctx, true);
1638 if (bret == false)
1639 return EINVAL;
1640
1641 const uint32_t *typed_buf = (const uint32_t *)buffer;
1642 const uint32_t buf_total = size / sizeof(uint32_t);
1643 uint32_t buf_offset = 0;
1644
1645 while (buf_offset < buf_total) {
1646 #ifndef NDEBUG
1647 const uint32_t cur_offset = buf_offset;
1648 #endif
1649
1650 const uint32_t *buf = &typed_buf[buf_offset];
1651 uint32_t len = *buf >> 16;
1652 uint32_t cmd = *buf & 0xff;
1653
1654 if (cmd >= VIRGL_MAX_COMMANDS)
1655 return EINVAL;
1656
1657 buf_offset += len + 1;
1658
1659 ret = 0;
1660 /* check if the guest is doing something bad */
1661 if (buf_offset > buf_total) {
1662 vrend_report_buffer_error(gdctx->grctx, 0);
1663 break;
1664 }
1665
1666 VREND_DEBUG(dbg_cmd, gdctx->grctx, "%-4d %-20s len:%d\n",
1667 cur_offset, vrend_get_comand_name(cmd), len);
1668
1669 TRACE_SCOPE_SLOW(vrend_get_comand_name(cmd));
1670
1671 ret = decode_table[cmd](gdctx->grctx, buf, len);
1672 if (ret) {
1673 if (ret == EINVAL)
1674 vrend_report_buffer_error(gdctx->grctx, *buf);
1675 return ret;
1676 }
1677 }
1678 return 0;
1679 }
1680
vrend_decode_ctx_get_fencing_fd(UNUSED struct virgl_context * ctx)1681 static int vrend_decode_ctx_get_fencing_fd(UNUSED struct virgl_context *ctx)
1682 {
1683 return vrend_renderer_get_poll_fd();
1684 }
1685
vrend_decode_ctx_retire_fences(UNUSED struct virgl_context * ctx)1686 static void vrend_decode_ctx_retire_fences(UNUSED struct virgl_context *ctx)
1687 {
1688 vrend_renderer_check_fences();
1689 }
1690
vrend_decode_ctx_submit_fence(struct virgl_context * ctx,uint32_t flags,uint64_t queue_id,void * fence_cookie)1691 static int vrend_decode_ctx_submit_fence(struct virgl_context *ctx,
1692 uint32_t flags,
1693 uint64_t queue_id,
1694 void *fence_cookie)
1695 {
1696 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1697
1698 if (queue_id)
1699 return -EINVAL;
1700
1701 return vrend_renderer_create_fence(dctx->grctx, flags, fence_cookie);
1702 }
1703
vrend_decode_ctx_init_base(struct vrend_decode_ctx * dctx,uint32_t ctx_id)1704 static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
1705 uint32_t ctx_id)
1706 {
1707 struct virgl_context *ctx = &dctx->base ;
1708
1709 for (unsigned i = 0; i < VIRGL_MAX_COMMANDS; ++i)
1710 assert(decode_table[i]);
1711
1712 ctx->ctx_id = ctx_id;
1713 ctx->destroy = vrend_decode_ctx_destroy;
1714 ctx->attach_resource = vrend_decode_ctx_attach_resource;
1715 ctx->detach_resource = vrend_decode_ctx_detach_resource;
1716 ctx->transfer_3d = vrend_decode_ctx_transfer_3d;
1717 ctx->get_blob = vrend_decode_ctx_get_blob;
1718 ctx->get_blob_done = NULL;
1719 ctx->submit_cmd = vrend_decode_ctx_submit_cmd;
1720
1721 ctx->get_fencing_fd = vrend_decode_ctx_get_fencing_fd;
1722 ctx->retire_fences = vrend_decode_ctx_retire_fences;
1723 ctx->submit_fence = vrend_decode_ctx_submit_fence;
1724 }
1725