1 /**************************************************************************
2 *
3 * Copyright (C) 2014 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 **************************************************************************/
24 #include <stdint.h>
25 #include <string.h>
26 #include <stdio.h>
27 #include <errno.h>
28 #include <epoxy/gl.h>
29 #include <fcntl.h>
30
31 #include "util/u_memory.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_state.h"
34 #include "pipe/p_shader_tokens.h"
35 #include "virgl_context.h"
36 #include "virgl_resource.h"
37 #include "vrend_renderer.h"
38 #include "vrend_object.h"
39 #include "tgsi/tgsi_text.h"
40 #include "vrend_debug.h"
41 #include "vrend_tweaks.h"
42 #include "virgl_util.h"
43
44 /* decode side */
45 #define DECODE_MAX_TOKENS 8000
46
47 struct vrend_decode_ctx {
48 struct virgl_context base;
49 struct vrend_context *grctx;
50 };
51
get_buf_entry(const uint32_t * buf,uint32_t offset)52 static inline uint32_t get_buf_entry(const uint32_t *buf, uint32_t offset)
53 {
54 return buf[offset];
55 }
56
get_buf_ptr(const uint32_t * buf,uint32_t offset)57 static inline const void *get_buf_ptr(const uint32_t *buf, uint32_t offset)
58 {
59 return &buf[offset];
60 }
61
vrend_decode_create_shader(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)62 static int vrend_decode_create_shader(struct vrend_context *ctx,
63 const uint32_t *buf,
64 uint32_t handle,
65 uint16_t length)
66 {
67 struct pipe_stream_output_info so_info;
68 uint i;
69 int ret;
70 uint32_t shader_offset, req_local_mem = 0;
71 unsigned num_tokens, num_so_outputs, offlen;
72 const uint8_t *shd_text;
73 uint32_t type;
74
75 if (length < VIRGL_OBJ_SHADER_HDR_SIZE(0))
76 return EINVAL;
77
78 type = get_buf_entry(buf, VIRGL_OBJ_SHADER_TYPE);
79 num_tokens = get_buf_entry(buf, VIRGL_OBJ_SHADER_NUM_TOKENS);
80 offlen = get_buf_entry(buf, VIRGL_OBJ_SHADER_OFFSET);
81
82 if (type == PIPE_SHADER_COMPUTE) {
83 req_local_mem = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
84 num_so_outputs = 0;
85 } else {
86 num_so_outputs = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
87 if (length < VIRGL_OBJ_SHADER_HDR_SIZE(num_so_outputs))
88 return EINVAL;
89
90 if (num_so_outputs > PIPE_MAX_SO_OUTPUTS)
91 return EINVAL;
92 }
93
94 shader_offset = 6;
95 if (num_so_outputs) {
96 so_info.num_outputs = num_so_outputs;
97 if (so_info.num_outputs) {
98 for (i = 0; i < 4; i++)
99 so_info.stride[i] = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_STRIDE(i));
100 for (i = 0; i < so_info.num_outputs; i++) {
101 uint32_t tmp = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_OUTPUT0(i));
102
103 so_info.output[i].register_index = tmp & 0xff;
104 so_info.output[i].start_component = (tmp >> 8) & 0x3;
105 so_info.output[i].num_components = (tmp >> 10) & 0x7;
106 so_info.output[i].output_buffer = (tmp >> 13) & 0x7;
107 so_info.output[i].dst_offset = (tmp >> 16) & 0xffff;
108 tmp = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(i));
109 so_info.output[i].stream = (tmp & 0x3);
110 so_info.output[i].need_temp = so_info.output[i].num_components < 4;
111 }
112
113 for (i = 0; i < so_info.num_outputs - 1; i++) {
114 for (unsigned j = i + 1; j < so_info.num_outputs; j++) {
115 so_info.output[j].need_temp |=
116 (so_info.output[i].register_index == so_info.output[j].register_index);
117 }
118 }
119 }
120 shader_offset += 4 + (2 * num_so_outputs);
121 } else
122 memset(&so_info, 0, sizeof(so_info));
123
124 shd_text = get_buf_ptr(buf, shader_offset);
125 ret = vrend_create_shader(ctx, handle, &so_info, req_local_mem, (const char *)shd_text, offlen, num_tokens, type, length - shader_offset + 1);
126
127 return ret;
128 }
129
vrend_decode_create_stream_output_target(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)130 static int vrend_decode_create_stream_output_target(struct vrend_context *ctx, const uint32_t *buf,
131 uint32_t handle, uint16_t length)
132 {
133 uint32_t res_handle, buffer_size, buffer_offset;
134
135 if (length != VIRGL_OBJ_STREAMOUT_SIZE)
136 return EINVAL;
137
138 res_handle = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_RES_HANDLE);
139 buffer_offset = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET);
140 buffer_size = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_BUFFER_SIZE);
141
142 return vrend_create_so_target(ctx, handle, res_handle, buffer_offset,
143 buffer_size);
144 }
145
vrend_decode_set_framebuffer_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)146 static int vrend_decode_set_framebuffer_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
147 {
148 if (length < 2)
149 return EINVAL;
150
151 uint32_t nr_cbufs = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NR_CBUFS);
152 uint32_t zsurf_handle = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NR_ZSURF_HANDLE);
153 uint32_t surf_handle[8];
154 uint32_t i;
155
156 if (length != (2u + nr_cbufs))
157 return EINVAL;
158
159 if (nr_cbufs > 8)
160 return EINVAL;
161
162 for (i = 0; i < nr_cbufs; i++)
163 surf_handle[i] = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_CBUF_HANDLE(i));
164 vrend_set_framebuffer_state(ctx, nr_cbufs, surf_handle, zsurf_handle);
165 return 0;
166 }
167
vrend_decode_set_framebuffer_state_no_attach(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)168 static int vrend_decode_set_framebuffer_state_no_attach(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
169 {
170 uint32_t width, height;
171 uint32_t layers, samples;
172 uint32_t tmp;
173
174 if (length != VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE)
175 return EINVAL;
176
177 tmp = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH_HEIGHT);
178 width = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH(tmp);
179 height = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_HEIGHT(tmp);
180
181 tmp = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS_SAMPLES);
182 layers = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS(tmp);
183 samples = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SAMPLES(tmp);
184
185 vrend_set_framebuffer_state_no_attach(ctx, width, height, layers, samples);
186 return 0;
187 }
188
vrend_decode_clear(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)189 static int vrend_decode_clear(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
190 {
191 union pipe_color_union color;
192 double depth;
193 unsigned stencil, buffers;
194 int i;
195
196 if (length != VIRGL_OBJ_CLEAR_SIZE)
197 return EINVAL;
198 buffers = get_buf_entry(buf, VIRGL_OBJ_CLEAR_BUFFERS);
199 for (i = 0; i < 4; i++)
200 color.ui[i] = get_buf_entry(buf, VIRGL_OBJ_CLEAR_COLOR_0 + i);
201 const void *depth_ptr = get_buf_ptr(buf, VIRGL_OBJ_CLEAR_DEPTH_0);
202 memcpy(&depth, depth_ptr, sizeof(double));
203 stencil = get_buf_entry(buf, VIRGL_OBJ_CLEAR_STENCIL);
204
205 vrend_clear(ctx, buffers, &color, depth, stencil);
206 return 0;
207 }
208
vrend_decode_clear_texture(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)209 static int vrend_decode_clear_texture(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
210 {
211 struct pipe_box box;
212 uint32_t handle;
213 uint32_t level;
214 uint32_t arr[4] = {0};
215
216 if (length != VIRGL_CLEAR_TEXTURE_SIZE)
217 return EINVAL;
218
219 handle = get_buf_entry(buf, VIRGL_TEXTURE_HANDLE);
220 level = get_buf_entry(buf, VIRGL_TEXTURE_LEVEL);
221 box.x = get_buf_entry(buf, VIRGL_TEXTURE_SRC_X);
222 box.y = get_buf_entry(buf, VIRGL_TEXTURE_SRC_Y);
223 box.z = get_buf_entry(buf, VIRGL_TEXTURE_SRC_Z);
224 box.width = get_buf_entry(buf, VIRGL_TEXTURE_SRC_W);
225 box.height = get_buf_entry(buf, VIRGL_TEXTURE_SRC_H);
226 box.depth = get_buf_entry(buf, VIRGL_TEXTURE_SRC_D);
227 arr[0] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_A);
228 arr[1] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_B);
229 arr[2] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_C);
230 arr[3] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_D);
231
232 vrend_clear_texture(ctx, handle, level, &box, (void *) &arr);
233 return 0;
234 }
235
uif(unsigned int ui)236 static float uif(unsigned int ui)
237 {
238 union { float f; unsigned int ui; } myuif;
239 myuif.ui = ui;
240 return myuif.f;
241 }
242
vrend_decode_set_viewport_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)243 static int vrend_decode_set_viewport_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
244 {
245 struct pipe_viewport_state vps[PIPE_MAX_VIEWPORTS];
246 uint i, v;
247 uint32_t num_viewports, start_slot;
248 if (length < 1)
249 return EINVAL;
250
251 if ((length - 1) % 6)
252 return EINVAL;
253
254 num_viewports = (length - 1) / 6;
255 start_slot = get_buf_entry(buf, VIRGL_SET_VIEWPORT_START_SLOT);
256
257 if (num_viewports > PIPE_MAX_VIEWPORTS ||
258 start_slot > (PIPE_MAX_VIEWPORTS - num_viewports))
259 return EINVAL;
260
261 for (v = 0; v < num_viewports; v++) {
262 for (i = 0; i < 3; i++)
263 vps[v].scale[i] = uif(get_buf_entry(buf, VIRGL_SET_VIEWPORT_STATE_SCALE_0(v) + i));
264 for (i = 0; i < 3; i++)
265 vps[v].translate[i] = uif(get_buf_entry(buf, VIRGL_SET_VIEWPORT_STATE_TRANSLATE_0(v) + i));
266 }
267
268 vrend_set_viewport_states(ctx, start_slot, num_viewports, vps);
269 return 0;
270 }
271
vrend_decode_set_index_buffer(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)272 static int vrend_decode_set_index_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
273 {
274 if (length != 1 && length != 3)
275 return EINVAL;
276 vrend_set_index_buffer(ctx,
277 get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_HANDLE),
278 (length == 3) ? get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_INDEX_SIZE) : 0,
279 (length == 3) ? get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_OFFSET) : 0);
280 return 0;
281 }
282
vrend_decode_set_constant_buffer(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)283 static int vrend_decode_set_constant_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
284 {
285 uint32_t shader;
286 int nc = (length - 2);
287
288 if (length < 2)
289 return EINVAL;
290
291 shader = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE);
292 /* VIRGL_SET_CONSTANT_BUFFER_INDEX is not used */
293
294 if (shader >= PIPE_SHADER_TYPES)
295 return EINVAL;
296
297 vrend_set_constants(ctx, shader, nc, get_buf_ptr(buf, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
298 return 0;
299 }
300
vrend_decode_set_uniform_buffer(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)301 static int vrend_decode_set_uniform_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
302 {
303 if (length != VIRGL_SET_UNIFORM_BUFFER_SIZE)
304 return EINVAL;
305
306 uint32_t shader = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE);
307 uint32_t index = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_INDEX);
308 uint32_t offset = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_OFFSET);
309 uint32_t blength = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_LENGTH);
310 uint32_t handle = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE);
311
312 if (shader >= PIPE_SHADER_TYPES)
313 return EINVAL;
314
315 if (index >= PIPE_MAX_CONSTANT_BUFFERS)
316 return EINVAL;
317
318 vrend_set_uniform_buffer(ctx, shader, index, offset, blength, handle);
319 return 0;
320 }
321
vrend_decode_set_vertex_buffers(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)322 static int vrend_decode_set_vertex_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
323 {
324 int num_vbo;
325 int i;
326
327 /* must be a multiple of 3 */
328 if (length && (length % 3))
329 return EINVAL;
330
331 num_vbo = (length / 3);
332 if (num_vbo > PIPE_MAX_ATTRIBS)
333 return EINVAL;
334
335 for (i = 0; i < num_vbo; i++) {
336 vrend_set_single_vbo(ctx, i,
337 get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_STRIDE(i)),
338 get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_OFFSET(i)),
339 get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_HANDLE(i)));
340 }
341 vrend_set_num_vbo(ctx, num_vbo);
342 return 0;
343 }
344
vrend_decode_set_sampler_views(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)345 static int vrend_decode_set_sampler_views(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
346 {
347 uint32_t num_samps;
348 uint32_t i;
349 uint32_t shader_type;
350 uint32_t start_slot;
351
352 if (length < 2)
353 return EINVAL;
354 num_samps = length - 2;
355 shader_type = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_SHADER_TYPE);
356 start_slot = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_START_SLOT);
357
358 if (shader_type >= PIPE_SHADER_TYPES)
359 return EINVAL;
360
361 if (num_samps > PIPE_MAX_SHADER_SAMPLER_VIEWS ||
362 start_slot > (PIPE_MAX_SHADER_SAMPLER_VIEWS - num_samps))
363 return EINVAL;
364
365 for (i = 0; i < num_samps; i++) {
366 uint32_t handle = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_V0_HANDLE + i);
367 vrend_set_single_sampler_view(ctx, shader_type, i + start_slot, handle);
368 }
369 vrend_set_num_sampler_views(ctx, shader_type, start_slot, num_samps);
370 return 0;
371 }
372
vrend_decode_transfer_common(const uint32_t * buf,uint32_t * dst_handle,struct vrend_transfer_info * info)373 static void vrend_decode_transfer_common(const uint32_t *buf,
374 uint32_t *dst_handle,
375 struct vrend_transfer_info *info)
376 {
377 *dst_handle = get_buf_entry(buf, VIRGL_RESOURCE_IW_RES_HANDLE);
378
379 info->level = get_buf_entry(buf, VIRGL_RESOURCE_IW_LEVEL);
380 info->stride = get_buf_entry(buf, VIRGL_RESOURCE_IW_STRIDE);
381 info->layer_stride = get_buf_entry(buf, VIRGL_RESOURCE_IW_LAYER_STRIDE);
382 info->box->x = get_buf_entry(buf, VIRGL_RESOURCE_IW_X);
383 info->box->y = get_buf_entry(buf, VIRGL_RESOURCE_IW_Y);
384 info->box->z = get_buf_entry(buf, VIRGL_RESOURCE_IW_Z);
385 info->box->width = get_buf_entry(buf, VIRGL_RESOURCE_IW_W);
386 info->box->height = get_buf_entry(buf, VIRGL_RESOURCE_IW_H);
387 info->box->depth = get_buf_entry(buf, VIRGL_RESOURCE_IW_D);
388 }
389
vrend_decode_resource_inline_write(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)390 static int vrend_decode_resource_inline_write(struct vrend_context *ctx, const uint32_t *buf,
391 uint32_t length)
392 {
393 struct pipe_box box;
394 uint32_t dst_handle;
395 struct vrend_transfer_info info;
396 uint32_t data_len;
397 struct iovec dataiovec;
398 const void *data;
399
400 if (length < 12)
401 return EINVAL;
402
403 memset(&info, 0, sizeof(info));
404 info.box = &box;
405 vrend_decode_transfer_common(buf, &dst_handle, &info);
406 data_len = (length - 11) * 4;
407 data = get_buf_ptr(buf, VIRGL_RESOURCE_IW_DATA_START);
408
409 info.offset = 0;
410
411 dataiovec.iov_base = (void*)data;
412 dataiovec.iov_len = data_len;
413
414 info.iovec = &dataiovec;
415 info.iovec_cnt = 1;
416 return vrend_transfer_inline_write(ctx, dst_handle, &info);
417 }
418
vrend_decode_draw_vbo(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)419 static int vrend_decode_draw_vbo(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
420 {
421 struct pipe_draw_info info;
422 uint32_t cso;
423 uint32_t handle = 0, indirect_draw_count_handle = 0;
424 if (length != VIRGL_DRAW_VBO_SIZE && length != VIRGL_DRAW_VBO_SIZE_TESS &&
425 length != VIRGL_DRAW_VBO_SIZE_INDIRECT)
426 return EINVAL;
427 memset(&info, 0, sizeof(struct pipe_draw_info));
428
429 info.start = get_buf_entry(buf, VIRGL_DRAW_VBO_START);
430 info.count = get_buf_entry(buf, VIRGL_DRAW_VBO_COUNT);
431 info.mode = get_buf_entry(buf, VIRGL_DRAW_VBO_MODE);
432 info.indexed = !!get_buf_entry(buf, VIRGL_DRAW_VBO_INDEXED);
433 info.instance_count = get_buf_entry(buf, VIRGL_DRAW_VBO_INSTANCE_COUNT);
434 info.index_bias = get_buf_entry(buf, VIRGL_DRAW_VBO_INDEX_BIAS);
435 info.start_instance = get_buf_entry(buf, VIRGL_DRAW_VBO_START_INSTANCE);
436 info.primitive_restart = !!get_buf_entry(buf, VIRGL_DRAW_VBO_PRIMITIVE_RESTART);
437 info.restart_index = get_buf_entry(buf, VIRGL_DRAW_VBO_RESTART_INDEX);
438 info.min_index = get_buf_entry(buf, VIRGL_DRAW_VBO_MIN_INDEX);
439 info.max_index = get_buf_entry(buf, VIRGL_DRAW_VBO_MAX_INDEX);
440
441 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
442 info.vertices_per_patch = get_buf_entry(buf, VIRGL_DRAW_VBO_VERTICES_PER_PATCH);
443 info.drawid = get_buf_entry(buf, VIRGL_DRAW_VBO_DRAWID);
444 }
445
446 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
447 handle = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_HANDLE);
448 info.indirect.offset = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_OFFSET);
449 info.indirect.stride = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_STRIDE);
450 info.indirect.draw_count = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT);
451 info.indirect.indirect_draw_count_offset = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_OFFSET);
452 indirect_draw_count_handle = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_HANDLE);
453 }
454
455 cso = get_buf_entry(buf, VIRGL_DRAW_VBO_COUNT_FROM_SO);
456
457 return vrend_draw_vbo(ctx, &info, cso, handle, indirect_draw_count_handle);
458 }
459
vrend_decode_create_blend(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)460 static int vrend_decode_create_blend(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
461 {
462 struct pipe_blend_state *blend_state;
463 uint32_t tmp;
464 int i;
465
466 if (length != VIRGL_OBJ_BLEND_SIZE) {
467 return EINVAL;
468 }
469
470 blend_state = CALLOC_STRUCT(pipe_blend_state);
471 if (!blend_state)
472 return ENOMEM;
473
474 tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S0);
475 blend_state->independent_blend_enable = (tmp & 1);
476 blend_state->logicop_enable = (tmp >> 1) & 0x1;
477 blend_state->dither = (tmp >> 2) & 0x1;
478 blend_state->alpha_to_coverage = (tmp >> 3) & 0x1;
479 blend_state->alpha_to_one = (tmp >> 4) & 0x1;
480
481 tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S1);
482 blend_state->logicop_func = tmp & 0xf;
483
484 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
485 tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S2(i));
486 blend_state->rt[i].blend_enable = tmp & 0x1;
487 blend_state->rt[i].rgb_func = (tmp >> 1) & 0x7;
488 blend_state->rt[i].rgb_src_factor = (tmp >> 4) & 0x1f;
489 blend_state->rt[i].rgb_dst_factor = (tmp >> 9) & 0x1f;
490 blend_state->rt[i].alpha_func = (tmp >> 14) & 0x7;
491 blend_state->rt[i].alpha_src_factor = (tmp >> 17) & 0x1f;
492 blend_state->rt[i].alpha_dst_factor = (tmp >> 22) & 0x1f;
493 blend_state->rt[i].colormask = (tmp >> 27) & 0xf;
494 }
495
496 tmp = vrend_renderer_object_insert(ctx, blend_state, handle,
497 VIRGL_OBJECT_BLEND);
498 if (tmp == 0) {
499 FREE(blend_state);
500 return ENOMEM;
501 }
502 return 0;
503 }
504
vrend_decode_create_dsa(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)505 static int vrend_decode_create_dsa(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
506 {
507 int i;
508 struct pipe_depth_stencil_alpha_state *dsa_state;
509 uint32_t tmp;
510
511 if (length != VIRGL_OBJ_DSA_SIZE)
512 return EINVAL;
513
514 dsa_state = CALLOC_STRUCT(pipe_depth_stencil_alpha_state);
515 if (!dsa_state)
516 return ENOMEM;
517
518 tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_S0);
519 dsa_state->depth.enabled = tmp & 0x1;
520 dsa_state->depth.writemask = (tmp >> 1) & 0x1;
521 dsa_state->depth.func = (tmp >> 2) & 0x7;
522
523 dsa_state->alpha.enabled = (tmp >> 8) & 0x1;
524 dsa_state->alpha.func = (tmp >> 9) & 0x7;
525
526 for (i = 0; i < 2; i++) {
527 tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_S1 + i);
528 dsa_state->stencil[i].enabled = tmp & 0x1;
529 dsa_state->stencil[i].func = (tmp >> 1) & 0x7;
530 dsa_state->stencil[i].fail_op = (tmp >> 4) & 0x7;
531 dsa_state->stencil[i].zpass_op = (tmp >> 7) & 0x7;
532 dsa_state->stencil[i].zfail_op = (tmp >> 10) & 0x7;
533 dsa_state->stencil[i].valuemask = (tmp >> 13) & 0xff;
534 dsa_state->stencil[i].writemask = (tmp >> 21) & 0xff;
535 }
536
537 tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_ALPHA_REF);
538 dsa_state->alpha.ref_value = uif(tmp);
539
540 tmp = vrend_renderer_object_insert(ctx, dsa_state, handle,
541 VIRGL_OBJECT_DSA);
542 if (tmp == 0) {
543 FREE(dsa_state);
544 return ENOMEM;
545 }
546 return 0;
547 }
548
vrend_decode_create_rasterizer(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)549 static int vrend_decode_create_rasterizer(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
550 {
551 struct pipe_rasterizer_state *rs_state;
552 uint32_t tmp;
553
554 if (length != VIRGL_OBJ_RS_SIZE)
555 return EINVAL;
556
557 rs_state = CALLOC_STRUCT(pipe_rasterizer_state);
558 if (!rs_state)
559 return ENOMEM;
560
561 tmp = get_buf_entry(buf, VIRGL_OBJ_RS_S0);
562 #define ebit(name, bit) rs_state->name = (tmp >> bit) & 0x1
563 #define emask(name, bit, mask) rs_state->name = (tmp >> bit) & mask
564
565 ebit(flatshade, 0);
566 ebit(depth_clip, 1);
567 ebit(clip_halfz, 2);
568 ebit(rasterizer_discard, 3);
569 ebit(flatshade_first, 4);
570 ebit(light_twoside, 5);
571 ebit(sprite_coord_mode, 6);
572 ebit(point_quad_rasterization, 7);
573 emask(cull_face, 8, 0x3);
574 emask(fill_front, 10, 0x3);
575 emask(fill_back, 12, 0x3);
576 ebit(scissor, 14);
577 ebit(front_ccw, 15);
578 ebit(clamp_vertex_color, 16);
579 ebit(clamp_fragment_color, 17);
580 ebit(offset_line, 18);
581 ebit(offset_point, 19);
582 ebit(offset_tri, 20);
583 ebit(poly_smooth, 21);
584 ebit(poly_stipple_enable, 22);
585 ebit(point_smooth, 23);
586 ebit(point_size_per_vertex, 24);
587 ebit(multisample, 25);
588 ebit(line_smooth, 26);
589 ebit(line_stipple_enable, 27);
590 ebit(line_last_pixel, 28);
591 ebit(half_pixel_center, 29);
592 ebit(bottom_edge_rule, 30);
593 ebit(force_persample_interp, 31);
594 rs_state->point_size = uif(get_buf_entry(buf, VIRGL_OBJ_RS_POINT_SIZE));
595 rs_state->sprite_coord_enable = get_buf_entry(buf, VIRGL_OBJ_RS_SPRITE_COORD_ENABLE);
596 tmp = get_buf_entry(buf, VIRGL_OBJ_RS_S3);
597 emask(line_stipple_pattern, 0, 0xffff);
598 emask(line_stipple_factor, 16, 0xff);
599 emask(clip_plane_enable, 24, 0xff);
600
601 rs_state->line_width = uif(get_buf_entry(buf, VIRGL_OBJ_RS_LINE_WIDTH));
602 rs_state->offset_units = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_UNITS));
603 rs_state->offset_scale = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_SCALE));
604 rs_state->offset_clamp = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_CLAMP));
605
606 tmp = vrend_renderer_object_insert(ctx, rs_state, handle,
607 VIRGL_OBJECT_RASTERIZER);
608 if (tmp == 0) {
609 FREE(rs_state);
610 return ENOMEM;
611 }
612 return 0;
613 }
614
vrend_decode_create_surface_common(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint32_t sample_count)615 static int vrend_decode_create_surface_common(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint32_t sample_count)
616 {
617 uint32_t res_handle, format, val0, val1;
618
619 res_handle = get_buf_entry(buf, VIRGL_OBJ_SURFACE_RES_HANDLE);
620 format = get_buf_entry(buf, VIRGL_OBJ_SURFACE_FORMAT);
621 /* decide later if these are texture or buffer */
622 val0 = get_buf_entry(buf, VIRGL_OBJ_SURFACE_BUFFER_FIRST_ELEMENT);
623 val1 = get_buf_entry(buf, VIRGL_OBJ_SURFACE_BUFFER_LAST_ELEMENT);
624
625 return vrend_create_surface(ctx, handle, res_handle, format, val0, val1, sample_count);
626 }
627
vrend_decode_create_surface(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)628 static int vrend_decode_create_surface(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
629 {
630 if (length != VIRGL_OBJ_SURFACE_SIZE)
631 return EINVAL;
632
633 return vrend_decode_create_surface_common(ctx, buf, handle, 0);
634 }
635
vrend_decode_create_msaa_surface(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)636 static int vrend_decode_create_msaa_surface(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
637 {
638 if (length != VIRGL_OBJ_MSAA_SURFACE_SIZE)
639 return EINVAL;
640
641 uint32_t sample_count = get_buf_entry(buf, VIRGL_OBJ_SURFACE_SAMPLE_COUNT);
642 return vrend_decode_create_surface_common(ctx, buf, handle, sample_count);
643 }
644
vrend_decode_create_sampler_view(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)645 static int vrend_decode_create_sampler_view(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
646 {
647 uint32_t res_handle, format, val0, val1, swizzle_packed;
648
649 if (length != VIRGL_OBJ_SAMPLER_VIEW_SIZE)
650 return EINVAL;
651
652 res_handle = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_RES_HANDLE);
653 format = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_FORMAT);
654 val0 = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_FIRST_ELEMENT);
655 val1 = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_LAST_ELEMENT);
656 swizzle_packed = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE);
657 return vrend_create_sampler_view(ctx, handle, res_handle, format, val0, val1,swizzle_packed);
658 }
659
vrend_decode_create_sampler_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)660 static int vrend_decode_create_sampler_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
661 {
662 struct pipe_sampler_state state;
663 int i;
664 uint32_t tmp;
665
666 if (length != VIRGL_OBJ_SAMPLER_STATE_SIZE)
667 return EINVAL;
668 tmp = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_S0);
669 state.wrap_s = tmp & 0x7;
670 state.wrap_t = (tmp >> 3) & 0x7;
671 state.wrap_r = (tmp >> 6) & 0x7;
672 state.min_img_filter = (tmp >> 9) & 0x3;
673 state.min_mip_filter = (tmp >> 11) & 0x3;
674 state.mag_img_filter = (tmp >> 13) & 0x3;
675 state.compare_mode = (tmp >> 15) & 0x1;
676 state.compare_func = (tmp >> 16) & 0x7;
677 state.seamless_cube_map = (tmp >> 19) & 0x1;
678 state.max_anisotropy = (float)((tmp >> 20) & 0x3f);
679
680 state.lod_bias = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS));
681 state.min_lod = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_MIN_LOD));
682 state.max_lod = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_MAX_LOD));
683
684 for (i = 0; i < 4; i++)
685 state.border_color.ui[i] = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_BORDER_COLOR(i));
686
687 if (state.min_mip_filter != PIPE_TEX_MIPFILTER_NONE &&
688 state.min_mip_filter != PIPE_TEX_MIPFILTER_LINEAR &&
689 state.min_mip_filter != PIPE_TEX_MIPFILTER_NEAREST)
690 return EINVAL;
691
692 return vrend_create_sampler_state(ctx, handle, &state);
693 }
694
vrend_decode_create_ve(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)695 static int vrend_decode_create_ve(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
696 {
697 struct pipe_vertex_element *ve = NULL;
698 int num_elements;
699 int i;
700 int ret;
701
702 if (length < 1)
703 return EINVAL;
704
705 if ((length - 1) % 4)
706 return EINVAL;
707
708 num_elements = (length - 1) / 4;
709
710 if (num_elements) {
711 ve = calloc(num_elements, sizeof(struct pipe_vertex_element));
712
713 if (!ve)
714 return ENOMEM;
715
716 for (i = 0; i < num_elements; i++) {
717 ve[i].src_offset = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_OFFSET(i));
718 ve[i].instance_divisor = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_INSTANCE_DIVISOR(i));
719 ve[i].vertex_buffer_index = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_VERTEX_BUFFER_INDEX(i));
720
721 if (ve[i].vertex_buffer_index >= PIPE_MAX_ATTRIBS) {
722 FREE(ve);
723 return EINVAL;
724 }
725
726 ve[i].src_format = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_FORMAT(i));
727 }
728 }
729
730 ret = vrend_create_vertex_elements_state(ctx, handle, num_elements, ve);
731
732 FREE(ve);
733 return ret;
734 }
735
vrend_decode_create_query(struct vrend_context * ctx,const uint32_t * buf,uint32_t handle,uint16_t length)736 static int vrend_decode_create_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
737 {
738 uint32_t query_type;
739 uint32_t query_index;
740 uint32_t res_handle;
741 uint32_t offset;
742 uint32_t tmp;
743
744 if (length != VIRGL_OBJ_QUERY_SIZE)
745 return EINVAL;
746
747 tmp = get_buf_entry(buf, VIRGL_OBJ_QUERY_TYPE_INDEX);
748 query_type = VIRGL_OBJ_QUERY_TYPE(tmp);
749 query_index = (tmp >> 16) & 0xffff;
750
751 offset = get_buf_entry(buf, VIRGL_OBJ_QUERY_OFFSET);
752 res_handle = get_buf_entry(buf, VIRGL_OBJ_QUERY_RES_HANDLE);
753
754 return vrend_create_query(ctx, handle, query_type, query_index, res_handle, offset);
755 }
756
vrend_decode_create_object(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)757 static int vrend_decode_create_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
758 {
759 if (length < 1)
760 return EINVAL;
761
762 uint32_t header = get_buf_entry(buf, VIRGL_OBJ_CREATE_HEADER);
763 uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_CREATE_HANDLE);
764 uint8_t obj_type = (header >> 8) & 0xff;
765 int ret = 0;
766
767 if (handle == 0)
768 return EINVAL;
769
770 VREND_DEBUG(dbg_object, ctx," CREATE %-18s handle:0x%x len:%d\n",
771 vrend_get_object_type_name(obj_type), handle, length);
772
773 TRACE_SCOPE(vrend_get_object_type_name(obj_type));
774
775 switch (obj_type){
776 case VIRGL_OBJECT_BLEND:
777 ret = vrend_decode_create_blend(ctx, buf, handle, length);
778 break;
779 case VIRGL_OBJECT_DSA:
780 ret = vrend_decode_create_dsa(ctx, buf, handle, length);
781 break;
782 case VIRGL_OBJECT_RASTERIZER:
783 ret = vrend_decode_create_rasterizer(ctx, buf, handle, length);
784 break;
785 case VIRGL_OBJECT_SHADER:
786 ret = vrend_decode_create_shader(ctx, buf, handle, length);
787 break;
788 case VIRGL_OBJECT_VERTEX_ELEMENTS:
789 ret = vrend_decode_create_ve(ctx, buf, handle, length);
790 break;
791 case VIRGL_OBJECT_SURFACE:
792 ret = vrend_decode_create_surface(ctx, buf, handle, length);
793 break;
794 case VIRGL_OBJECT_SAMPLER_VIEW:
795 ret = vrend_decode_create_sampler_view(ctx, buf, handle, length);
796 break;
797 case VIRGL_OBJECT_SAMPLER_STATE:
798 ret = vrend_decode_create_sampler_state(ctx, buf, handle, length);
799 break;
800 case VIRGL_OBJECT_QUERY:
801 ret = vrend_decode_create_query(ctx, buf, handle, length);
802 break;
803 case VIRGL_OBJECT_STREAMOUT_TARGET:
804 ret = vrend_decode_create_stream_output_target(ctx, buf, handle, length);
805 break;
806 case VIRGL_OBJECT_MSAA_SURFACE:
807 ret = vrend_decode_create_msaa_surface(ctx, buf, handle, length);
808 break;
809 default:
810 return EINVAL;
811 }
812
813 return ret;
814 }
815
vrend_decode_bind_object(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)816 static int vrend_decode_bind_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
817 {
818 if (length != 1)
819 return EINVAL;
820
821 uint32_t header = get_buf_entry(buf, VIRGL_OBJ_BIND_HEADER);
822 uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_BIND_HANDLE);
823 uint8_t obj_type = (header >> 8) & 0xff;
824
825 VREND_DEBUG(dbg_object, ctx,
826 " BIND %-20s handle:0x%x len:%d\n",
827 vrend_get_object_type_name(obj_type), handle, length);
828
829 switch (obj_type) {
830 case VIRGL_OBJECT_BLEND:
831 vrend_object_bind_blend(ctx, handle);
832 break;
833 case VIRGL_OBJECT_DSA:
834 vrend_object_bind_dsa(ctx, handle);
835 break;
836 case VIRGL_OBJECT_RASTERIZER:
837 vrend_object_bind_rasterizer(ctx, handle);
838 break;
839 case VIRGL_OBJECT_VERTEX_ELEMENTS:
840 vrend_bind_vertex_elements_state(ctx, handle);
841 break;
842 default:
843 return EINVAL;
844 }
845
846 return 0;
847 }
848
vrend_decode_destroy_object(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)849 static int vrend_decode_destroy_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
850 {
851 if (length != 1)
852 return EINVAL;
853
854 uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_DESTROY_HANDLE);
855
856 VREND_DEBUG_EXT(dbg_object, ctx,
857 uint32_t obj = (get_buf_entry(buf, 0) >> 8) & 0xFF;
858 vrend_printf(" DESTROY %-17s handle:0x%x\n",
859 vrend_get_object_type_name(obj), handle));
860
861 vrend_renderer_object_destroy(ctx, handle);
862 return 0;
863 }
864
vrend_decode_set_stencil_ref(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)865 static int vrend_decode_set_stencil_ref(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
866 {
867 if (length != VIRGL_SET_STENCIL_REF_SIZE)
868 return EINVAL;
869
870 struct pipe_stencil_ref ref;
871 uint32_t val = get_buf_entry(buf, VIRGL_SET_STENCIL_REF);
872
873 ref.ref_value[0] = val & 0xff;
874 ref.ref_value[1] = (val >> 8) & 0xff;
875 vrend_set_stencil_ref(ctx, &ref);
876 return 0;
877 }
878
vrend_decode_set_blend_color(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)879 static int vrend_decode_set_blend_color(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
880 {
881 struct pipe_blend_color color;
882 int i;
883
884 if (length != VIRGL_SET_BLEND_COLOR_SIZE)
885 return EINVAL;
886
887 for (i = 0; i < 4; i++)
888 color.color[i] = uif(get_buf_entry(buf, VIRGL_SET_BLEND_COLOR(i)));
889
890 vrend_set_blend_color(ctx, &color);
891 return 0;
892 }
893
vrend_decode_set_scissor_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)894 static int vrend_decode_set_scissor_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
895 {
896 struct pipe_scissor_state ss[PIPE_MAX_VIEWPORTS];
897 uint32_t temp;
898 int32_t num_scissor;
899 uint32_t start_slot;
900 int s;
901 if (length < 1)
902 return EINVAL;
903
904 if ((length - 1) % 2)
905 return EINVAL;
906
907 num_scissor = (length - 1) / 2;
908 if (num_scissor > PIPE_MAX_VIEWPORTS)
909 return EINVAL;
910
911 start_slot = get_buf_entry(buf, VIRGL_SET_SCISSOR_START_SLOT);
912
913 for (s = 0; s < num_scissor; s++) {
914 temp = get_buf_entry(buf, VIRGL_SET_SCISSOR_MINX_MINY(s));
915 ss[s].minx = temp & 0xffff;
916 ss[s].miny = (temp >> 16) & 0xffff;
917
918 temp = get_buf_entry(buf, VIRGL_SET_SCISSOR_MAXX_MAXY(s));
919 ss[s].maxx = temp & 0xffff;
920 ss[s].maxy = (temp >> 16) & 0xffff;
921 }
922
923 vrend_set_scissor_state(ctx, start_slot, num_scissor, ss);
924 return 0;
925 }
926
vrend_decode_set_polygon_stipple(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)927 static int vrend_decode_set_polygon_stipple(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
928 {
929 struct pipe_poly_stipple ps;
930 int i;
931
932 if (length != VIRGL_POLYGON_STIPPLE_SIZE)
933 return EINVAL;
934
935 for (i = 0; i < 32; i++)
936 ps.stipple[i] = get_buf_entry(buf, VIRGL_POLYGON_STIPPLE_P0 + i);
937
938 vrend_set_polygon_stipple(ctx, &ps);
939 return 0;
940 }
941
vrend_decode_set_clip_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)942 static int vrend_decode_set_clip_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
943 {
944 struct pipe_clip_state clip;
945 int i, j;
946
947 if (length != VIRGL_SET_CLIP_STATE_SIZE)
948 return EINVAL;
949
950 for (i = 0; i < 8; i++)
951 for (j = 0; j < 4; j++)
952 clip.ucp[i][j] = uif(get_buf_entry(buf, VIRGL_SET_CLIP_STATE_C0 + (i * 4) + j));
953 vrend_set_clip_state(ctx, &clip);
954 return 0;
955 }
956
vrend_decode_set_sample_mask(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)957 static int vrend_decode_set_sample_mask(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
958 {
959 unsigned mask;
960
961 if (length != VIRGL_SET_SAMPLE_MASK_SIZE)
962 return EINVAL;
963 mask = get_buf_entry(buf, VIRGL_SET_SAMPLE_MASK_MASK);
964 vrend_set_sample_mask(ctx, mask);
965 return 0;
966 }
967
vrend_decode_set_min_samples(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)968 static int vrend_decode_set_min_samples(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
969 {
970 unsigned min_samples;
971
972 if (length != VIRGL_SET_MIN_SAMPLES_SIZE)
973 return EINVAL;
974 min_samples = get_buf_entry(buf, VIRGL_SET_MIN_SAMPLES_MASK);
975 vrend_set_min_samples(ctx, min_samples);
976 return 0;
977 }
978
vrend_decode_resource_copy_region(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)979 static int vrend_decode_resource_copy_region(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
980 {
981 struct pipe_box box;
982 uint32_t dst_handle, src_handle;
983 uint32_t dst_level, dstx, dsty, dstz;
984 uint32_t src_level;
985
986 if (length != VIRGL_CMD_RESOURCE_COPY_REGION_SIZE)
987 return EINVAL;
988
989 dst_handle = get_buf_entry(buf, VIRGL_CMD_RCR_DST_RES_HANDLE);
990 dst_level = get_buf_entry(buf, VIRGL_CMD_RCR_DST_LEVEL);
991 dstx = get_buf_entry(buf, VIRGL_CMD_RCR_DST_X);
992 dsty = get_buf_entry(buf, VIRGL_CMD_RCR_DST_Y);
993 dstz = get_buf_entry(buf, VIRGL_CMD_RCR_DST_Z);
994 src_handle = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_RES_HANDLE);
995 src_level = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_LEVEL);
996 box.x = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_X);
997 box.y = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_Y);
998 box.z = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_Z);
999 box.width = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_W);
1000 box.height = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_H);
1001 box.depth = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_D);
1002
1003 vrend_renderer_resource_copy_region(ctx, dst_handle,
1004 dst_level, dstx, dsty, dstz,
1005 src_handle, src_level,
1006 &box);
1007 return 0;
1008 }
1009
1010
vrend_decode_blit(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1011 static int vrend_decode_blit(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1012 {
1013 struct pipe_blit_info info;
1014 uint32_t dst_handle, src_handle, temp;
1015
1016 if (length != VIRGL_CMD_BLIT_SIZE)
1017 return EINVAL;
1018 temp = get_buf_entry(buf, VIRGL_CMD_BLIT_S0);
1019 info.mask = temp & 0xff;
1020 info.filter = (temp >> 8) & 0x3;
1021 info.scissor_enable = (temp >> 10) & 0x1;
1022 info.render_condition_enable = (temp >> 11) & 0x1;
1023 info.alpha_blend = (temp >> 12) & 0x1;
1024 temp = get_buf_entry(buf, VIRGL_CMD_BLIT_SCISSOR_MINX_MINY);
1025 info.scissor.minx = temp & 0xffff;
1026 info.scissor.miny = (temp >> 16) & 0xffff;
1027 temp = get_buf_entry(buf, VIRGL_CMD_BLIT_SCISSOR_MAXX_MAXY);
1028 info.scissor.maxx = temp & 0xffff;
1029 info.scissor.maxy = (temp >> 16) & 0xffff;
1030 dst_handle = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_RES_HANDLE);
1031 info.dst.level = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_LEVEL);
1032 info.dst.format = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_FORMAT);
1033 info.dst.box.x = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_X);
1034 info.dst.box.y = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_Y);
1035 info.dst.box.z = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_Z);
1036 info.dst.box.width = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_W);
1037 info.dst.box.height = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_H);
1038 info.dst.box.depth = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_D);
1039
1040 src_handle = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_RES_HANDLE);
1041 info.src.level = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_LEVEL);
1042 info.src.format = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_FORMAT);
1043 info.src.box.x = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_X);
1044 info.src.box.y = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_Y);
1045 info.src.box.z = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_Z);
1046 info.src.box.width = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_W);
1047 info.src.box.height = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_H);
1048 info.src.box.depth = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_D);
1049
1050 vrend_renderer_blit(ctx, dst_handle, src_handle, &info);
1051 return 0;
1052 }
1053
vrend_decode_bind_sampler_states(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1054 static int vrend_decode_bind_sampler_states(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1055 {
1056 if (length < 2)
1057 return EINVAL;
1058
1059 uint32_t shader_type = get_buf_entry(buf, VIRGL_BIND_SAMPLER_STATES_SHADER_TYPE);
1060 uint32_t start_slot = get_buf_entry(buf, VIRGL_BIND_SAMPLER_STATES_START_SLOT);
1061 uint32_t num_states = length - 2;
1062
1063 if (shader_type >= PIPE_SHADER_TYPES)
1064 return EINVAL;
1065
1066 vrend_bind_sampler_states(ctx, shader_type, start_slot, num_states,
1067 get_buf_ptr(buf, VIRGL_BIND_SAMPLER_STATES_S0_HANDLE));
1068 return 0;
1069 }
1070
vrend_decode_begin_query(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1071 static int vrend_decode_begin_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1072 {
1073 if (length != 1)
1074 return EINVAL;
1075
1076 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_BEGIN_HANDLE);
1077
1078 return vrend_begin_query(ctx, handle);
1079 }
1080
vrend_decode_end_query(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1081 static int vrend_decode_end_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1082 {
1083 if (length != 1)
1084 return EINVAL;
1085
1086 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_END_HANDLE);
1087
1088 return vrend_end_query(ctx, handle);
1089 }
1090
vrend_decode_get_query_result(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1091 static int vrend_decode_get_query_result(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1092 {
1093 if (length != 2)
1094 return EINVAL;
1095
1096 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_HANDLE);
1097 uint32_t wait = get_buf_entry(buf, VIRGL_QUERY_RESULT_WAIT);
1098
1099 vrend_get_query_result(ctx, handle, wait);
1100 return 0;
1101 }
1102
vrend_decode_get_query_result_qbo(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1103 static int vrend_decode_get_query_result_qbo(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1104 {
1105 if (length != VIRGL_QUERY_RESULT_QBO_SIZE)
1106 return EINVAL;
1107
1108 uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_HANDLE);
1109 uint32_t qbo_handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_QBO_HANDLE);
1110 uint32_t wait = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_WAIT);
1111 uint32_t result_type = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_RESULT_TYPE);
1112 uint32_t offset = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_OFFSET);
1113 int32_t index = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_INDEX);
1114
1115 vrend_get_query_result_qbo(ctx, handle, qbo_handle, wait, result_type, offset, index);
1116 return 0;
1117 }
1118
vrend_decode_set_render_condition(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1119 static int vrend_decode_set_render_condition(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1120 {
1121 if (length != VIRGL_RENDER_CONDITION_SIZE)
1122 return EINVAL;
1123
1124 uint32_t handle = get_buf_entry(buf, VIRGL_RENDER_CONDITION_HANDLE);
1125 bool condition = get_buf_entry(buf, VIRGL_RENDER_CONDITION_CONDITION) & 1;
1126 uint mode = get_buf_entry(buf, VIRGL_RENDER_CONDITION_MODE);
1127
1128 vrend_render_condition(ctx, handle, condition, mode);
1129 return 0;
1130 }
1131
vrend_decode_set_sub_ctx(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1132 static int vrend_decode_set_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1133 {
1134 if (length != 1)
1135 return EINVAL;
1136
1137 uint32_t ctx_sub_id = get_buf_entry(buf, 1);
1138
1139 vrend_renderer_set_sub_ctx(ctx, ctx_sub_id);
1140 return 0;
1141 }
1142
vrend_decode_create_sub_ctx(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1143 static int vrend_decode_create_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1144 {
1145 if (length != 1)
1146 return EINVAL;
1147
1148 uint32_t ctx_sub_id = get_buf_entry(buf, 1);
1149
1150 vrend_renderer_create_sub_ctx(ctx, ctx_sub_id);
1151 return 0;
1152 }
1153
vrend_decode_destroy_sub_ctx(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1154 static int vrend_decode_destroy_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1155 {
1156 if (length != 1)
1157 return EINVAL;
1158
1159 uint32_t ctx_sub_id = get_buf_entry(buf, 1);
1160
1161 vrend_renderer_destroy_sub_ctx(ctx, ctx_sub_id);
1162 return 0;
1163 }
1164
vrend_decode_bind_shader(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1165 static int vrend_decode_bind_shader(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1166 {
1167 uint32_t handle, type;
1168 if (length != VIRGL_BIND_SHADER_SIZE)
1169 return EINVAL;
1170
1171 handle = get_buf_entry(buf, VIRGL_BIND_SHADER_HANDLE);
1172 type = get_buf_entry(buf, VIRGL_BIND_SHADER_TYPE);
1173
1174 vrend_bind_shader(ctx, handle, type);
1175 return 0;
1176 }
1177
vrend_decode_set_tess_state(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1178 static int vrend_decode_set_tess_state(struct vrend_context *ctx,
1179 const uint32_t *buf, uint32_t length)
1180 {
1181 float tess_factors[6];
1182 int i;
1183
1184 if (length != VIRGL_TESS_STATE_SIZE)
1185 return EINVAL;
1186
1187 for (i = 0; i < 6; i++) {
1188 tess_factors[i] = uif(get_buf_entry(buf, i + 1));
1189 }
1190 vrend_set_tess_state(ctx, tess_factors);
1191 return 0;
1192 }
1193
vrend_decode_set_shader_buffers(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1194 static int vrend_decode_set_shader_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1195 {
1196 uint32_t num_ssbo;
1197 uint32_t shader_type, start_slot;
1198
1199 if (length < 2)
1200 return EINVAL;
1201
1202 num_ssbo = (length - 2) / VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE;
1203 shader_type = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_SHADER_TYPE);
1204 start_slot = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_START_SLOT);
1205 if (shader_type >= PIPE_SHADER_TYPES)
1206 return EINVAL;
1207
1208 if (num_ssbo < 1)
1209 return 0;
1210
1211 if (start_slot > PIPE_MAX_SHADER_BUFFERS ||
1212 num_ssbo > PIPE_MAX_SHADER_BUFFERS - start_slot)
1213 return EINVAL;
1214
1215 for (uint32_t i = 0; i < num_ssbo; i++) {
1216 uint32_t offset = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_OFFSET(i));
1217 uint32_t buf_len = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_LENGTH(i));
1218 uint32_t handle = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_RES_HANDLE(i));
1219 vrend_set_single_ssbo(ctx, shader_type, start_slot + i, offset, buf_len,
1220 handle);
1221 }
1222 return 0;
1223 }
1224
vrend_decode_set_atomic_buffers(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1225 static int vrend_decode_set_atomic_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1226 {
1227 uint32_t num_abo;
1228 uint32_t start_slot;
1229
1230 if (length < 2)
1231 return EINVAL;
1232
1233 num_abo = (length - 1) / VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE;
1234 start_slot = get_buf_entry(buf, VIRGL_SET_ATOMIC_BUFFER_START_SLOT);
1235 if (num_abo < 1)
1236 return 0;
1237
1238 if (num_abo > PIPE_MAX_HW_ATOMIC_BUFFERS ||
1239 start_slot > PIPE_MAX_HW_ATOMIC_BUFFERS ||
1240 start_slot > PIPE_MAX_HW_ATOMIC_BUFFERS - num_abo)
1241 return EINVAL;
1242
1243 for (uint32_t i = 0; i < num_abo; i++) {
1244 uint32_t offset = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2);
1245 uint32_t buf_len = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3);
1246 uint32_t handle = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4);
1247 vrend_set_single_abo(ctx, start_slot + i, offset, buf_len, handle);
1248 }
1249
1250 return 0;
1251 }
1252
vrend_decode_set_shader_images(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1253 static int vrend_decode_set_shader_images(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1254 {
1255 uint32_t num_images;
1256 uint32_t shader_type, start_slot;
1257 if (length < 2)
1258 return EINVAL;
1259
1260 num_images = (length - 2) / VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE;
1261 shader_type = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_SHADER_TYPE);
1262 start_slot = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_START_SLOT);
1263 if (shader_type >= PIPE_SHADER_TYPES)
1264 return EINVAL;
1265
1266 if (num_images < 1) {
1267 return 0;
1268 }
1269 if (start_slot > PIPE_MAX_SHADER_IMAGES ||
1270 start_slot > PIPE_MAX_SHADER_IMAGES - num_images)
1271 return EINVAL;
1272
1273 for (uint32_t i = 0; i < num_images; i++) {
1274 uint32_t format = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_FORMAT(i));
1275 uint32_t access = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_ACCESS(i));
1276 uint32_t layer_offset = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_LAYER_OFFSET(i));
1277 uint32_t level_size = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_LEVEL_SIZE(i));
1278 uint32_t handle = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_RES_HANDLE(i));
1279 vrend_set_single_image_view(ctx, shader_type, start_slot + i, format, access,
1280 layer_offset, level_size, handle);
1281 }
1282 return 0;
1283 }
1284
vrend_decode_memory_barrier(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1285 static int vrend_decode_memory_barrier(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1286 {
1287 if (length != VIRGL_MEMORY_BARRIER_SIZE)
1288 return EINVAL;
1289
1290 unsigned flags = get_buf_entry(buf, VIRGL_MEMORY_BARRIER_FLAGS);
1291 vrend_memory_barrier(ctx, flags);
1292 return 0;
1293 }
1294
vrend_decode_launch_grid(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1295 static int vrend_decode_launch_grid(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1296 {
1297 uint32_t block[3], grid[3];
1298 uint32_t indirect_handle, indirect_offset;
1299 if (length != VIRGL_LAUNCH_GRID_SIZE)
1300 return EINVAL;
1301
1302 block[0] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_X);
1303 block[1] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_Y);
1304 block[2] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_Z);
1305 grid[0] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_X);
1306 grid[1] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_Y);
1307 grid[2] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_Z);
1308 indirect_handle = get_buf_entry(buf, VIRGL_LAUNCH_INDIRECT_HANDLE);
1309 indirect_offset = get_buf_entry(buf, VIRGL_LAUNCH_INDIRECT_OFFSET);
1310 vrend_launch_grid(ctx, block, grid, indirect_handle, indirect_offset);
1311 return 0;
1312 }
1313
vrend_decode_set_streamout_targets(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1314 static int vrend_decode_set_streamout_targets(struct vrend_context *ctx,
1315 const uint32_t *buf, uint32_t length)
1316 {
1317 uint32_t handles[16];
1318 uint32_t num_handles = length - 1;
1319 uint32_t append_bitmask;
1320 uint i;
1321
1322 if (length < 1)
1323 return EINVAL;
1324 if (num_handles > ARRAY_SIZE(handles))
1325 return EINVAL;
1326
1327 append_bitmask = get_buf_entry(buf, VIRGL_SET_STREAMOUT_TARGETS_APPEND_BITMASK);
1328 for (i = 0; i < num_handles; i++)
1329 handles[i] = get_buf_entry(buf, VIRGL_SET_STREAMOUT_TARGETS_H0 + i);
1330 vrend_set_streamout_targets(ctx, append_bitmask, num_handles, handles);
1331 return 0;
1332 }
1333
vrend_decode_texture_barrier(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1334 static int vrend_decode_texture_barrier(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1335 {
1336 if (length != VIRGL_TEXTURE_BARRIER_SIZE)
1337 return EINVAL;
1338
1339 unsigned flags = get_buf_entry(buf, VIRGL_TEXTURE_BARRIER_FLAGS);
1340 vrend_texture_barrier(ctx, flags);
1341 return 0;
1342 }
1343
vrend_decode_set_debug_mask(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1344 static int vrend_decode_set_debug_mask(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1345 {
1346 char *flagstring;
1347 int slen = sizeof(uint32_t) * length;
1348
1349 if (length < VIRGL_SET_DEBUG_FLAGS_MIN_SIZE)
1350 return EINVAL;
1351
1352 const uint32_t *flag_buf = get_buf_ptr(buf, VIRGL_SET_DEBUG_FLAGSTRING_OFFSET);
1353 flagstring = malloc(slen+1);
1354
1355 if (!flagstring) {
1356 return ENOMEM;
1357 }
1358
1359 memcpy(flagstring, flag_buf, slen);
1360 flagstring[slen] = 0;
1361 vrend_context_set_debug_flags(ctx, flagstring);
1362
1363 free(flagstring);
1364
1365 return 0;
1366 }
1367
vrend_decode_set_tweaks(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1368 static int vrend_decode_set_tweaks(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1369 {
1370 VREND_DEBUG(dbg_tweak, NULL, "Received TWEAK set command\n");
1371
1372 if (length < VIRGL_SET_TWEAKS_SIZE)
1373 return EINVAL;
1374
1375 uint32_t tweak_id = get_buf_entry(buf, VIRGL_SET_TWEAKS_ID);
1376 uint32_t tweak_value = get_buf_entry(buf, VIRGL_SET_TWEAKS_VALUE);
1377
1378 vrend_set_active_tweaks(vrend_get_context_tweaks(ctx), tweak_id, tweak_value);
1379 return 0;
1380 }
1381
1382
vrend_decode_transfer3d(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1383 static int vrend_decode_transfer3d(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1384 {
1385 struct pipe_box box;
1386 uint32_t dst_handle;
1387 struct vrend_transfer_info info;
1388
1389 if (length < VIRGL_TRANSFER3D_SIZE)
1390 return EINVAL;
1391
1392 memset(&info, 0, sizeof(info));
1393 info.box = &box;
1394 vrend_decode_transfer_common(buf, &dst_handle, &info);
1395 info.offset = get_buf_entry(buf, VIRGL_TRANSFER3D_DATA_OFFSET);
1396 int transfer_mode = get_buf_entry(buf, VIRGL_TRANSFER3D_DIRECTION);
1397
1398 if (transfer_mode != VIRGL_TRANSFER_TO_HOST &&
1399 transfer_mode != VIRGL_TRANSFER_FROM_HOST)
1400 return EINVAL;
1401
1402 return vrend_renderer_transfer_iov(ctx, dst_handle, &info,
1403 transfer_mode);
1404 }
1405
vrend_decode_copy_transfer3d(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1406 static int vrend_decode_copy_transfer3d(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1407 {
1408 struct pipe_box box;
1409 struct vrend_transfer_info info;
1410 uint32_t dst_handle;
1411 uint32_t src_handle;
1412
1413 if (length != VIRGL_COPY_TRANSFER3D_SIZE)
1414 return EINVAL;
1415
1416 memset(&info, 0, sizeof(info));
1417 info.box = &box;
1418 vrend_decode_transfer_common(buf, &dst_handle, &info);
1419 info.offset = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET);
1420 info.synchronized = (get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SYNCHRONIZED) != 0);
1421
1422 src_handle = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE);
1423
1424 return vrend_renderer_copy_transfer3d(ctx, dst_handle, src_handle,
1425 &info);
1426 }
1427
vrend_decode_pipe_resource_create(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1428 static int vrend_decode_pipe_resource_create(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1429 {
1430 struct vrend_renderer_resource_create_args args = { 0 };
1431 uint32_t blob_id;
1432
1433 if (length != VIRGL_PIPE_RES_CREATE_SIZE)
1434 return EINVAL;
1435
1436 args.target = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_TARGET);
1437 args.format = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_FORMAT);
1438 args.bind = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_BIND);
1439 args.width = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_WIDTH);
1440 args.height = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_HEIGHT);
1441 args.depth = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_DEPTH);
1442 args.array_size = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_ARRAY_SIZE);
1443 args.last_level = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_LAST_LEVEL);
1444 args.nr_samples = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_NR_SAMPLES);
1445 args.flags = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_FLAGS);
1446 blob_id = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_BLOB_ID);
1447
1448 return vrend_renderer_pipe_resource_create(ctx, blob_id, &args);
1449 }
1450
vrend_decode_pipe_resource_set_type(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1451 static int vrend_decode_pipe_resource_set_type(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1452 {
1453 struct vrend_renderer_resource_set_type_args args = { 0 };
1454 uint32_t res_id;
1455
1456 if (length >= VIRGL_PIPE_RES_SET_TYPE_SIZE(0))
1457 args.plane_count = (length - VIRGL_PIPE_RES_SET_TYPE_SIZE(0)) / 2;
1458
1459 if (length != VIRGL_PIPE_RES_SET_TYPE_SIZE(args.plane_count) ||
1460 !args.plane_count || args.plane_count > VIRGL_GBM_MAX_PLANES)
1461 return EINVAL;
1462
1463 res_id = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE);
1464 args.format = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_FORMAT);
1465 args.bind = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_BIND);
1466 args.width = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_WIDTH);
1467 args.height = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_HEIGHT);
1468 args.usage = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_USAGE);
1469 args.modifier = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO);
1470 args.modifier |= (uint64_t)get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI) << 32;
1471 for (uint32_t i = 0; i < args.plane_count; i++) {
1472 args.plane_strides[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i));
1473 args.plane_offsets[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i));
1474 }
1475
1476 return vrend_renderer_pipe_resource_set_type(ctx, res_id, &args);
1477 }
1478
1479 static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
1480 uint32_t ctx_id);
1481
vrend_decode_ctx_fence_retire(void * fence_cookie,void * retire_data)1482 static void vrend_decode_ctx_fence_retire(void *fence_cookie,
1483 void *retire_data)
1484 {
1485 struct vrend_decode_ctx *dctx = retire_data;
1486 dctx->base.fence_retire(&dctx->base, 0, fence_cookie);
1487 }
1488
vrend_renderer_context_create(uint32_t handle,uint32_t nlen,const char * debug_name)1489 struct virgl_context *vrend_renderer_context_create(uint32_t handle,
1490 uint32_t nlen,
1491 const char *debug_name)
1492 {
1493 struct vrend_decode_ctx *dctx;
1494
1495 dctx = malloc(sizeof(struct vrend_decode_ctx));
1496 if (!dctx)
1497 return NULL;
1498
1499 vrend_decode_ctx_init_base(dctx, handle);
1500
1501 dctx->grctx = vrend_create_context(handle, nlen, debug_name);
1502 if (!dctx->grctx) {
1503 free(dctx);
1504 return NULL;
1505 }
1506
1507 vrend_renderer_set_fence_retire(dctx->grctx,
1508 vrend_decode_ctx_fence_retire,
1509 dctx);
1510
1511 return &dctx->base;
1512 }
1513
vrend_decode_ctx_destroy(struct virgl_context * ctx)1514 static void vrend_decode_ctx_destroy(struct virgl_context *ctx)
1515 {
1516 TRACE_FUNC();
1517 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1518
1519 vrend_destroy_context(dctx->grctx);
1520 free(dctx);
1521 }
1522
vrend_decode_ctx_attach_resource(struct virgl_context * ctx,struct virgl_resource * res)1523 static void vrend_decode_ctx_attach_resource(struct virgl_context *ctx,
1524 struct virgl_resource *res)
1525 {
1526 TRACE_FUNC();
1527 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1528 vrend_renderer_attach_res_ctx(dctx->grctx, res);
1529 }
1530
vrend_decode_ctx_detach_resource(struct virgl_context * ctx,struct virgl_resource * res)1531 static void vrend_decode_ctx_detach_resource(struct virgl_context *ctx,
1532 struct virgl_resource *res)
1533 {
1534 TRACE_FUNC();
1535 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1536 vrend_renderer_detach_res_ctx(dctx->grctx, res);
1537 }
1538
vrend_decode_ctx_transfer_3d(struct virgl_context * ctx,struct virgl_resource * res,const struct vrend_transfer_info * info,int transfer_mode)1539 static int vrend_decode_ctx_transfer_3d(struct virgl_context *ctx,
1540 struct virgl_resource *res,
1541 const struct vrend_transfer_info *info,
1542 int transfer_mode)
1543 {
1544 TRACE_FUNC();
1545 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1546 return vrend_renderer_transfer_iov(dctx->grctx, res->res_id, info,
1547 transfer_mode);
1548 }
1549
vrend_decode_ctx_get_blob(struct virgl_context * ctx,uint64_t blob_id,UNUSED uint32_t blob_flags,struct virgl_context_blob * blob)1550 static int vrend_decode_ctx_get_blob(struct virgl_context *ctx,
1551 uint64_t blob_id,
1552 UNUSED uint32_t blob_flags,
1553 struct virgl_context_blob *blob)
1554 {
1555 TRACE_FUNC();
1556 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1557
1558 blob->type = VIRGL_RESOURCE_FD_INVALID;
1559 /* this transfers ownership and blob_id is no longer valid */
1560 blob->u.pipe_resource = vrend_get_blob_pipe(dctx->grctx, blob_id);
1561 if (!blob->u.pipe_resource)
1562 return -EINVAL;
1563
1564 blob->map_info = vrend_renderer_resource_get_map_info(blob->u.pipe_resource);
1565 return 0;
1566 }
1567
vrend_decode_get_memory_info(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1568 static int vrend_decode_get_memory_info(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1569 {
1570 TRACE_FUNC();
1571 if (length != 1)
1572 return EINVAL;
1573
1574 uint32_t res_handle = get_buf_entry(buf, 1);
1575
1576 vrend_renderer_get_meminfo(ctx, res_handle);
1577
1578 return 0;
1579 }
1580
vrend_decode_send_string_marker(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1581 static int vrend_decode_send_string_marker(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1582 {
1583 uint32_t buf_len = sizeof(uint32_t) * (length - 1);
1584
1585 if (length < VIRGL_SEND_STRING_MARKER_MIN_SIZE) {
1586 fprintf(stderr, "minimal command length not okay\n");
1587 return EINVAL;
1588 }
1589
1590 uint32_t str_len = get_buf_entry(buf, VIRGL_SEND_STRING_MARKER_STRING_SIZE);
1591 if (str_len > buf_len) {
1592 fprintf(stderr, "String len %u > buf_len %u\n", str_len, buf_len);
1593 return EINVAL;
1594 }
1595
1596 vrend_context_emit_string_marker(ctx, str_len, get_buf_ptr(buf, VIRGL_SEND_STRING_MARKER_OFFSET));
1597
1598 return 0;
1599 }
1600
1601 typedef int (*vrend_decode_callback)(struct vrend_context *ctx, const uint32_t *buf, uint32_t length);
1602
vrend_decode_dummy(struct vrend_context * ctx,const uint32_t * buf,uint32_t length)1603 static int vrend_decode_dummy(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
1604 {
1605 (void)ctx;
1606 (void)buf;
1607 (void)length;
1608 return 0;
1609 }
1610
1611 static const vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
1612 [VIRGL_CCMD_NOP] = vrend_decode_dummy,
1613 [VIRGL_CCMD_CREATE_OBJECT] = vrend_decode_create_object,
1614 [VIRGL_CCMD_BIND_OBJECT] = vrend_decode_bind_object,
1615 [VIRGL_CCMD_DESTROY_OBJECT] = vrend_decode_destroy_object,
1616 [VIRGL_CCMD_CLEAR] = vrend_decode_clear,
1617 [VIRGL_CCMD_CLEAR_TEXTURE] = vrend_decode_clear_texture,
1618 [VIRGL_CCMD_DRAW_VBO] = vrend_decode_draw_vbo,
1619 [VIRGL_CCMD_SET_FRAMEBUFFER_STATE] = vrend_decode_set_framebuffer_state,
1620 [VIRGL_CCMD_SET_VERTEX_BUFFERS] = vrend_decode_set_vertex_buffers,
1621 [VIRGL_CCMD_RESOURCE_INLINE_WRITE] = vrend_decode_resource_inline_write,
1622 [VIRGL_CCMD_SET_VIEWPORT_STATE] = vrend_decode_set_viewport_state,
1623 [VIRGL_CCMD_SET_SAMPLER_VIEWS] = vrend_decode_set_sampler_views,
1624 [VIRGL_CCMD_SET_INDEX_BUFFER] = vrend_decode_set_index_buffer,
1625 [VIRGL_CCMD_SET_CONSTANT_BUFFER] = vrend_decode_set_constant_buffer,
1626 [VIRGL_CCMD_SET_STENCIL_REF] = vrend_decode_set_stencil_ref,
1627 [VIRGL_CCMD_SET_BLEND_COLOR] = vrend_decode_set_blend_color,
1628 [VIRGL_CCMD_SET_SCISSOR_STATE] = vrend_decode_set_scissor_state,
1629 [VIRGL_CCMD_BLIT] = vrend_decode_blit,
1630 [VIRGL_CCMD_RESOURCE_COPY_REGION] = vrend_decode_resource_copy_region,
1631 [VIRGL_CCMD_BIND_SAMPLER_STATES] = vrend_decode_bind_sampler_states,
1632 [VIRGL_CCMD_BEGIN_QUERY] = vrend_decode_begin_query,
1633 [VIRGL_CCMD_END_QUERY] = vrend_decode_end_query,
1634 [VIRGL_CCMD_GET_QUERY_RESULT] = vrend_decode_get_query_result,
1635 [VIRGL_CCMD_SET_POLYGON_STIPPLE] = vrend_decode_set_polygon_stipple,
1636 [VIRGL_CCMD_SET_CLIP_STATE] = vrend_decode_set_clip_state,
1637 [VIRGL_CCMD_SET_SAMPLE_MASK] = vrend_decode_set_sample_mask,
1638 [VIRGL_CCMD_SET_MIN_SAMPLES] = vrend_decode_set_min_samples,
1639 [VIRGL_CCMD_SET_STREAMOUT_TARGETS] = vrend_decode_set_streamout_targets,
1640 [VIRGL_CCMD_SET_RENDER_CONDITION] = vrend_decode_set_render_condition,
1641 [VIRGL_CCMD_SET_UNIFORM_BUFFER] = vrend_decode_set_uniform_buffer,
1642 [VIRGL_CCMD_SET_SUB_CTX] = vrend_decode_set_sub_ctx,
1643 [VIRGL_CCMD_CREATE_SUB_CTX] = vrend_decode_create_sub_ctx,
1644 [VIRGL_CCMD_DESTROY_SUB_CTX] = vrend_decode_destroy_sub_ctx,
1645 [VIRGL_CCMD_BIND_SHADER] = vrend_decode_bind_shader,
1646 [VIRGL_CCMD_SET_TESS_STATE] = vrend_decode_set_tess_state,
1647 [VIRGL_CCMD_SET_SHADER_BUFFERS] = vrend_decode_set_shader_buffers,
1648 [VIRGL_CCMD_SET_SHADER_IMAGES] = vrend_decode_set_shader_images,
1649 [VIRGL_CCMD_SET_ATOMIC_BUFFERS] = vrend_decode_set_atomic_buffers,
1650 [VIRGL_CCMD_MEMORY_BARRIER] = vrend_decode_memory_barrier,
1651 [VIRGL_CCMD_LAUNCH_GRID] = vrend_decode_launch_grid,
1652 [VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH] = vrend_decode_set_framebuffer_state_no_attach,
1653 [VIRGL_CCMD_TEXTURE_BARRIER] = vrend_decode_texture_barrier,
1654 [VIRGL_CCMD_SET_DEBUG_FLAGS] = vrend_decode_set_debug_mask,
1655 [VIRGL_CCMD_GET_QUERY_RESULT_QBO] = vrend_decode_get_query_result_qbo,
1656 [VIRGL_CCMD_TRANSFER3D] = vrend_decode_transfer3d,
1657 [VIRGL_CCMD_COPY_TRANSFER3D] = vrend_decode_copy_transfer3d,
1658 [VIRGL_CCMD_END_TRANSFERS] = vrend_decode_dummy,
1659 [VIRGL_CCMD_SET_TWEAKS] = vrend_decode_set_tweaks,
1660 [VIRGL_CCMD_PIPE_RESOURCE_CREATE] = vrend_decode_pipe_resource_create,
1661 [VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE] = vrend_decode_pipe_resource_set_type,
1662 [VIRGL_CCMD_GET_MEMORY_INFO] = vrend_decode_get_memory_info,
1663 [VIRGL_CCMD_SEND_STRING_MARKER] = vrend_decode_send_string_marker,
1664 };
1665
vrend_decode_ctx_submit_cmd(struct virgl_context * ctx,const void * buffer,size_t size)1666 static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
1667 const void *buffer,
1668 size_t size)
1669 {
1670 TRACE_FUNC();
1671 struct vrend_decode_ctx *gdctx = (struct vrend_decode_ctx *)ctx;
1672 bool bret;
1673 int ret;
1674
1675 bret = vrend_hw_switch_context(gdctx->grctx, true);
1676 if (bret == false)
1677 return EINVAL;
1678
1679 const uint32_t *typed_buf = (const uint32_t *)buffer;
1680 const uint32_t buf_total = (uint32_t)(size / sizeof(uint32_t));
1681 uint32_t buf_offset = 0;
1682
1683 while (buf_offset < buf_total) {
1684 #ifndef NDEBUG
1685 const uint32_t cur_offset = buf_offset;
1686 #endif
1687
1688 const uint32_t *buf = &typed_buf[buf_offset];
1689 uint32_t len = *buf >> 16;
1690 uint32_t cmd = *buf & 0xff;
1691
1692 if (cmd >= VIRGL_MAX_COMMANDS)
1693 return EINVAL;
1694
1695 buf_offset += len + 1;
1696
1697 ret = 0;
1698 /* check if the guest is doing something bad */
1699 if (buf_offset > buf_total) {
1700 vrend_report_buffer_error(gdctx->grctx, 0);
1701 break;
1702 }
1703
1704 VREND_DEBUG(dbg_cmd, gdctx->grctx, "%-4d %-20s len:%d\n",
1705 cur_offset, vrend_get_comand_name(cmd), len);
1706
1707 TRACE_SCOPE_SLOW(vrend_get_comand_name(cmd));
1708
1709 ret = decode_table[cmd](gdctx->grctx, buf, len);
1710 if (ret) {
1711 if (ret == EINVAL)
1712 vrend_report_buffer_error(gdctx->grctx, *buf);
1713 return ret;
1714 }
1715 }
1716 return 0;
1717 }
1718
vrend_decode_ctx_get_fencing_fd(UNUSED struct virgl_context * ctx)1719 static int vrend_decode_ctx_get_fencing_fd(UNUSED struct virgl_context *ctx)
1720 {
1721 return vrend_renderer_get_poll_fd();
1722 }
1723
vrend_decode_ctx_retire_fences(UNUSED struct virgl_context * ctx)1724 static void vrend_decode_ctx_retire_fences(UNUSED struct virgl_context *ctx)
1725 {
1726 vrend_renderer_check_fences();
1727 }
1728
vrend_decode_ctx_submit_fence(struct virgl_context * ctx,uint32_t flags,uint64_t queue_id,void * fence_cookie)1729 static int vrend_decode_ctx_submit_fence(struct virgl_context *ctx,
1730 uint32_t flags,
1731 uint64_t queue_id,
1732 void *fence_cookie)
1733 {
1734 struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
1735
1736 if (queue_id)
1737 return -EINVAL;
1738
1739 return vrend_renderer_create_fence(dctx->grctx, flags, fence_cookie);
1740 }
1741
vrend_decode_ctx_init_base(struct vrend_decode_ctx * dctx,uint32_t ctx_id)1742 static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
1743 uint32_t ctx_id)
1744 {
1745 struct virgl_context *ctx = &dctx->base ;
1746
1747 for (unsigned i = 0; i < VIRGL_MAX_COMMANDS; ++i)
1748 assert(decode_table[i]);
1749
1750 ctx->ctx_id = ctx_id;
1751 ctx->destroy = vrend_decode_ctx_destroy;
1752 ctx->attach_resource = vrend_decode_ctx_attach_resource;
1753 ctx->detach_resource = vrend_decode_ctx_detach_resource;
1754 ctx->transfer_3d = vrend_decode_ctx_transfer_3d;
1755 ctx->get_blob = vrend_decode_ctx_get_blob;
1756 ctx->get_blob_done = NULL;
1757 ctx->submit_cmd = vrend_decode_ctx_submit_cmd;
1758
1759 ctx->get_fencing_fd = vrend_decode_ctx_get_fencing_fd;
1760 ctx->retire_fences = vrend_decode_ctx_retire_fences;
1761 ctx->submit_fence = vrend_decode_ctx_submit_fence;
1762 }
1763