1 /**************************************************************************
2 *
3 * Copyright (C) 2014 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 **************************************************************************/
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <unistd.h>
29 #include <stdio.h>
30 #include <errno.h>
31 #include "pipe/p_shader_tokens.h"
32
33 #include "pipe/p_context.h"
34 #include "pipe/p_defines.h"
35 #include "pipe/p_screen.h"
36 #include "pipe/p_state.h"
37 #include "util/u_inlines.h"
38 #include "util/u_memory.h"
39 #include "util/u_dual_blend.h"
40
41 #include "os/os_thread.h"
42 #include "util/u_double_list.h"
43 #include "util/u_format.h"
44 #include "tgsi/tgsi_parse.h"
45
46 #include "vrend_object.h"
47 #include "vrend_shader.h"
48
49 #include "vrend_renderer.h"
50
51 #include "virgl_hw.h"
52
53 #include "tgsi/tgsi_text.h"
54
55 #ifdef HAVE_EVENTFD
56 #include <sys/eventfd.h>
57 #endif
58
59 /* debugging aid to dump shaders */
60 int vrend_dump_shaders;
61
62 /* debugging via KHR_debug extension */
63 int vrend_use_debug_cb = 0;
64
65 struct vrend_if_cbs *vrend_clicbs;
66
67 struct vrend_fence {
68 uint32_t fence_id;
69 uint32_t ctx_id;
70 GLsync syncobj;
71 struct list_head fences;
72 };
73
74 struct vrend_query {
75 struct list_head waiting_queries;
76
77 GLuint id;
78 GLuint type;
79 GLuint index;
80 GLuint gltype;
81 int ctx_id;
82 struct vrend_resource *res;
83 uint64_t current_total;
84 };
85
86 struct global_error_state {
87 enum virgl_errors last_error;
88 };
89
90 enum features_id
91 {
92 feat_arb_or_gles_ext_texture_buffer,
93 feat_arb_robustness,
94 feat_base_instance,
95 feat_barrier,
96 feat_bit_encoding,
97 feat_compute_shader,
98 feat_copy_image,
99 feat_conditional_render_inverted,
100 feat_cube_map_array,
101 feat_debug_cb,
102 feat_draw_instance,
103 feat_dual_src_blend,
104 feat_fb_no_attach,
105 feat_framebuffer_fetch,
106 feat_geometry_shader,
107 feat_gl_conditional_render,
108 feat_gl_prim_restart,
109 feat_gles_khr_robustness,
110 feat_gles31_vertex_attrib_binding,
111 feat_images,
112 feat_indep_blend,
113 feat_indep_blend_func,
114 feat_indirect_draw,
115 feat_mesa_invert,
116 feat_ms_scaled_blit,
117 feat_multisample,
118 feat_nv_conditional_render,
119 feat_nv_prim_restart,
120 feat_polygon_offset_clamp,
121 feat_robust_buffer_access,
122 feat_sample_mask,
123 feat_sample_shading,
124 feat_samplers,
125 feat_shader_clock,
126 feat_ssbo,
127 feat_ssbo_barrier,
128 feat_stencil_texturing,
129 feat_storage_multisample,
130 feat_tessellation,
131 feat_texture_array,
132 feat_texture_barrier,
133 feat_texture_buffer_range,
134 feat_texture_gather,
135 feat_texture_multisample,
136 feat_texture_srgb_decode,
137 feat_texture_storage,
138 feat_texture_view,
139 feat_transform_feedback,
140 feat_transform_feedback2,
141 feat_transform_feedback3,
142 feat_transform_feedback_overflow_query,
143 feat_txqs,
144 feat_ubo,
145 feat_viewport_array,
146 feat_last,
147 };
148
149 #define FEAT_MAX_EXTS 4
150 #define UNAVAIL INT_MAX
151
152 static const struct {
153 int gl_ver;
154 int gles_ver;
155 const char *gl_ext[FEAT_MAX_EXTS];
156 } feature_list[] = {
157 [feat_arb_or_gles_ext_texture_buffer] = { 31, UNAVAIL, { "GL_ARB_texture_buffer_object", "GL_EXT_texture_buffer", NULL } },
158 [feat_arb_robustness] = { UNAVAIL, UNAVAIL, { "GL_ARB_robustness" } },
159 [feat_base_instance] = { 42, UNAVAIL, { "GL_ARB_base_instance", "GL_EXT_base_instance" } },
160 [feat_barrier] = { 42, 31, {} },
161 [feat_bit_encoding] = { 33, UNAVAIL, { "GL_ARB_shader_bit_encoding" } },
162 [feat_compute_shader] = { 43, 31, { "GL_ARB_compute_shader" } },
163 [feat_copy_image] = { 43, 32, { "GL_ARB_copy_image", "GL_EXT_copy_image", "GL_OES_copy_image" } },
164 [feat_conditional_render_inverted] = { 45, UNAVAIL, { "GL_ARB_conditional_render_inverted" } },
165 [feat_cube_map_array] = { 40, UNAVAIL, { "GL_ARB_texture_cube_map_array", "GL_EXT_texture_cube_map_array", "GL_OES_texture_cube_map_array" } },
166 [feat_debug_cb] = { UNAVAIL, UNAVAIL, {} }, /* special case */
167 [feat_draw_instance] = { 31, 30, { "GL_ARB_draw_instanced" } },
168 [feat_dual_src_blend] = { 33, UNAVAIL, { "GL_ARB_blend_func_extended" } },
169 [feat_fb_no_attach] = { 43, 31, { "GL_ARB_framebuffer_no_attachments" } },
170 [feat_framebuffer_fetch] = { UNAVAIL, UNAVAIL, { "GL_EXT_shader_framebuffer_fetch" } },
171 [feat_geometry_shader] = { 32, 32, {"GL_EXT_geometry_shader", "GL_OES_geometry_shader"} },
172 [feat_gl_conditional_render] = { 30, UNAVAIL, {} },
173 [feat_gl_prim_restart] = { 31, 30, {} },
174 [feat_gles_khr_robustness] = { UNAVAIL, UNAVAIL, { "GL_KHR_robustness" } },
175 [feat_gles31_vertex_attrib_binding] = { 43, 31, { "GL_ARB_vertex_attrib_binding" } },
176 [feat_images] = { 42, 31, { "GL_ARB_shader_image_load_store" } },
177 [feat_indep_blend] = { 30, UNAVAIL, { "GL_EXT_draw_buffers2" } },
178 [feat_indep_blend_func] = { 40, UNAVAIL, { "GL_ARB_draw_buffers_blend" } },
179 [feat_indirect_draw] = { 40, 31, { "GL_ARB_draw_indirect" } },
180 [feat_mesa_invert] = { UNAVAIL, UNAVAIL, { "GL_MESA_pack_invert" } },
181 [feat_ms_scaled_blit] = { UNAVAIL, UNAVAIL, { "GL_EXT_framebuffer_multisample_blit_scaled" } },
182 [feat_multisample] = { 32, 30, { "GL_ARB_texture_multisample" } },
183 [feat_nv_conditional_render] = { UNAVAIL, UNAVAIL, { "GL_NV_conditional_render" } },
184 [feat_nv_prim_restart] = { UNAVAIL, UNAVAIL, { "GL_NV_primitive_restart" } },
185 [feat_polygon_offset_clamp] = { 46, UNAVAIL, { "GL_ARB_polygon_offset_clamp" } },
186 [feat_robust_buffer_access] = { 43, UNAVAIL, { "GL_ARB_robust_buffer_access_behaviour" } },
187 [feat_sample_mask] = { 32, 31, { "GL_ARB_texture_multisample" } },
188 [feat_sample_shading] = { 40, UNAVAIL, { "GL_ARB_sample_shading" } },
189 [feat_samplers] = { 33, 30, { "GL_ARB_sampler_objects" } },
190 [feat_shader_clock] = { UNAVAIL, UNAVAIL, { "GL_ARB_shader_clock" } },
191 [feat_ssbo] = { 43, 31, { "GL_ARB_shader_storage_buffer_object" } },
192 [feat_ssbo_barrier] = { 43, 31, {} },
193 [feat_stencil_texturing] = { 43, 31, { "GL_ARB_stencil_texturing" } },
194 [feat_storage_multisample] = { 43, 31, { "GL_ARB_texture_storage_multisample" } },
195 [feat_tessellation] = { 40, UNAVAIL, { "GL_ARB_tessellation_shader" } },
196 [feat_texture_array] = { 30, 30, { "GL_EXT_texture_array" } },
197 [feat_texture_barrier] = { 45, UNAVAIL, { "GL_ARB_texture_barrier" } },
198 [feat_texture_buffer_range] = { 43, UNAVAIL, { "GL_ARB_texture_buffer_range" } },
199 [feat_texture_gather] = { 40, 31, { "GL_ARB_texture_gather" } },
200 [feat_texture_multisample] = { 32, 30, { "GL_ARB_texture_multisample" } },
201 [feat_texture_srgb_decode] = { UNAVAIL, UNAVAIL, { "GL_EXT_texture_sRGB_decode" } },
202 [feat_texture_storage] = { 42, 30, { "GL_ARB_texture_storage" } },
203 [feat_texture_view] = { 43, UNAVAIL, { "GL_ARB_texture_view" } },
204 [feat_transform_feedback] = { 30, 30, { "GL_EXT_transform_feedback" } },
205 [feat_transform_feedback2] = { 40, 30, { "GL_ARB_transform_feedback2" } },
206 [feat_transform_feedback3] = { 40, UNAVAIL, { "GL_ARB_transform_feedback3" } },
207 [feat_transform_feedback_overflow_query] = { 46, UNAVAIL, { "GL_ARB_transform_feedback_overflow_query" } },
208 [feat_txqs] = { 45, UNAVAIL, { "GL_ARB_shader_texture_image_samples" } },
209 [feat_ubo] = { 31, 30, { "GL_ARB_uniform_buffer_object" } },
210 [feat_viewport_array] = { 41, UNAVAIL, { "GL_ARB_viewport_array" } },
211 };
212
213 struct global_renderer_state {
214 int gl_major_ver;
215 int gl_minor_ver;
216
217 struct vrend_context *current_ctx;
218 struct vrend_context *current_hw_ctx;
219 struct list_head waiting_query_list;
220
221 bool inited;
222 bool use_gles;
223 bool use_core_profile;
224
225 bool features[feat_last];
226
227 /* these appeared broken on at least one driver */
228 bool use_explicit_locations;
229 uint32_t max_uniform_blocks;
230 uint32_t max_draw_buffers;
231 struct list_head active_ctx_list;
232
233 /* threaded sync */
234 bool stop_sync_thread;
235 int eventfd;
236
237 pipe_mutex fence_mutex;
238 struct list_head fence_list;
239 struct list_head fence_wait_list;
240 pipe_condvar fence_cond;
241
242 pipe_thread sync_thread;
243 virgl_gl_context sync_context;
244 };
245
246 static struct global_renderer_state vrend_state;
247
has_feature(enum features_id feature_id)248 static inline bool has_feature(enum features_id feature_id)
249 {
250 return vrend_state.features[feature_id];
251 }
252
set_feature(enum features_id feature_id)253 static inline void set_feature(enum features_id feature_id)
254 {
255 vrend_state.features[feature_id] = true;
256 }
257
258 struct vrend_linked_shader_program {
259 struct list_head head;
260 struct list_head sl[PIPE_SHADER_TYPES];
261 GLuint id;
262
263 bool dual_src_linked;
264 struct vrend_shader *ss[PIPE_SHADER_TYPES];
265
266 uint32_t samplers_used_mask[PIPE_SHADER_TYPES];
267 GLuint *samp_locs[PIPE_SHADER_TYPES];
268
269 GLuint *shadow_samp_mask_locs[PIPE_SHADER_TYPES];
270 GLuint *shadow_samp_add_locs[PIPE_SHADER_TYPES];
271
272 GLint *const_locs[PIPE_SHADER_TYPES];
273
274 GLuint *attrib_locs;
275 uint32_t shadow_samp_mask[PIPE_SHADER_TYPES];
276
277 GLuint *ubo_locs[PIPE_SHADER_TYPES];
278 GLuint vs_ws_adjust_loc;
279
280 GLint fs_stipple_loc;
281
282 GLuint clip_locs[8];
283
284 uint32_t images_used_mask[PIPE_SHADER_TYPES];
285 GLint *img_locs[PIPE_SHADER_TYPES];
286
287 uint32_t ssbo_used_mask[PIPE_SHADER_TYPES];
288 GLuint *ssbo_locs[PIPE_SHADER_TYPES];
289 };
290
291 struct vrend_shader {
292 struct vrend_shader *next_variant;
293 struct vrend_shader_selector *sel;
294
295 GLchar *glsl_prog;
296 GLuint id;
297 GLuint compiled_fs_id;
298 struct vrend_shader_key key;
299 struct list_head programs;
300 };
301
302 struct vrend_shader_selector {
303 struct pipe_reference reference;
304
305 unsigned num_shaders;
306 unsigned type;
307 struct vrend_shader_info sinfo;
308
309 struct vrend_shader *current;
310 struct tgsi_token *tokens;
311
312 uint32_t req_local_mem;
313 char *tmp_buf;
314 uint32_t buf_len;
315 uint32_t buf_offset;
316 };
317
318 struct vrend_texture {
319 struct vrend_resource base;
320 struct pipe_sampler_state state;
321 };
322
323 struct vrend_surface {
324 struct pipe_reference reference;
325 GLuint id;
326 GLuint res_handle;
327 GLuint format;
328 GLuint val0, val1;
329 struct vrend_resource *texture;
330 };
331
332 struct vrend_sampler_state {
333 struct pipe_sampler_state base;
334 GLuint id;
335 };
336
337 struct vrend_so_target {
338 struct pipe_reference reference;
339 GLuint res_handle;
340 unsigned buffer_offset;
341 unsigned buffer_size;
342 struct vrend_resource *buffer;
343 struct vrend_sub_context *sub_ctx;
344 };
345
346 struct vrend_sampler_view {
347 struct pipe_reference reference;
348 GLuint id;
349 GLuint format;
350 GLenum target;
351 GLuint val0, val1;
352 GLuint gl_swizzle_r;
353 GLuint gl_swizzle_g;
354 GLuint gl_swizzle_b;
355 GLuint gl_swizzle_a;
356 GLenum cur_swizzle_r;
357 GLenum cur_swizzle_g;
358 GLenum cur_swizzle_b;
359 GLenum cur_swizzle_a;
360 GLuint cur_base, cur_max;
361 GLenum depth_texture_mode;
362 GLuint srgb_decode;
363 GLuint cur_srgb_decode;
364 struct vrend_resource *texture;
365 };
366
367 struct vrend_image_view {
368 GLuint id;
369 GLenum access;
370 GLenum format;
371 union {
372 struct {
373 unsigned first_layer:16; /**< first layer to use for array textures */
374 unsigned last_layer:16; /**< last layer to use for array textures */
375 unsigned level:8; /**< mipmap level to use */
376 } tex;
377 struct {
378 unsigned offset; /**< offset in bytes */
379 unsigned size; /**< size of the accessible sub-range in bytes */
380 } buf;
381 } u;
382 struct vrend_resource *texture;
383 };
384
385 struct vrend_ssbo {
386 struct vrend_resource *res;
387 unsigned buffer_size;
388 unsigned buffer_offset;
389 };
390
391 struct vrend_vertex_element {
392 struct pipe_vertex_element base;
393 GLenum type;
394 GLboolean norm;
395 GLuint nr_chan;
396 };
397
398 struct vrend_vertex_element_array {
399 unsigned count;
400 struct vrend_vertex_element elements[PIPE_MAX_ATTRIBS];
401 GLuint id;
402 };
403
404 struct vrend_constants {
405 unsigned int *consts;
406 uint32_t num_consts;
407 };
408
409 struct vrend_shader_view {
410 int num_views;
411 struct vrend_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
412 uint32_t res_id[PIPE_MAX_SHADER_SAMPLER_VIEWS];
413 uint32_t old_ids[PIPE_MAX_SHADER_SAMPLER_VIEWS];
414 };
415
416 struct vrend_viewport {
417 GLint cur_x, cur_y;
418 GLsizei width, height;
419 GLclampd near_val, far_val;
420 };
421
422 /* create a streamout object to support pause/resume */
423 struct vrend_streamout_object {
424 GLuint id;
425 uint32_t num_targets;
426 uint32_t handles[16];
427 struct list_head head;
428 int xfb_state;
429 struct vrend_so_target *so_targets[16];
430 };
431
432 #define XFB_STATE_OFF 0
433 #define XFB_STATE_STARTED_NEED_BEGIN 1
434 #define XFB_STATE_STARTED 2
435 #define XFB_STATE_PAUSED 3
436
437 struct vrend_sub_context {
438 struct list_head head;
439
440 virgl_gl_context gl_context;
441
442 int sub_ctx_id;
443
444 GLuint vaoid;
445 uint32_t enabled_attribs_bitmask;
446
447 struct list_head programs;
448 struct util_hash_table *object_hash;
449
450 struct vrend_vertex_element_array *ve;
451 int num_vbos;
452 int old_num_vbos; /* for cleaning up */
453 struct pipe_vertex_buffer vbo[PIPE_MAX_ATTRIBS];
454 uint32_t vbo_res_ids[PIPE_MAX_ATTRIBS];
455
456 struct pipe_index_buffer ib;
457 uint32_t index_buffer_res_id;
458
459 bool vbo_dirty;
460 bool shader_dirty;
461 bool cs_shader_dirty;
462 bool sampler_state_dirty;
463 bool stencil_state_dirty;
464 bool image_state_dirty;
465
466 uint32_t long_shader_in_progress_handle[PIPE_SHADER_TYPES];
467 struct vrend_shader_selector *shaders[PIPE_SHADER_TYPES];
468 struct vrend_linked_shader_program *prog;
469
470 int prog_ids[PIPE_SHADER_TYPES];
471 struct vrend_shader_view views[PIPE_SHADER_TYPES];
472
473 struct vrend_constants consts[PIPE_SHADER_TYPES];
474 bool const_dirty[PIPE_SHADER_TYPES];
475 struct vrend_sampler_state *sampler_state[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
476
477 struct pipe_constant_buffer cbs[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
478 uint32_t const_bufs_used_mask[PIPE_SHADER_TYPES];
479
480 int num_sampler_states[PIPE_SHADER_TYPES];
481
482 uint32_t fb_id;
483 int nr_cbufs, old_nr_cbufs;
484 struct vrend_surface *zsurf;
485 struct vrend_surface *surf[PIPE_MAX_COLOR_BUFS];
486
487 struct vrend_viewport vps[PIPE_MAX_VIEWPORTS];
488 float depth_transform, depth_scale;
489 /* viewport is negative */
490 uint32_t scissor_state_dirty;
491 uint32_t viewport_state_dirty;
492
493 uint32_t fb_height;
494
495 struct pipe_scissor_state ss[PIPE_MAX_VIEWPORTS];
496
497 struct pipe_blend_state blend_state;
498 struct pipe_depth_stencil_alpha_state dsa_state;
499 struct pipe_rasterizer_state rs_state;
500
501 uint8_t stencil_refs[2];
502 bool viewport_is_negative;
503 /* this is set if the contents of the FBO look upside down when viewed
504 with 0,0 as the bottom corner */
505 bool inverted_fbo_content;
506
507 GLuint blit_fb_ids[2];
508
509 struct pipe_depth_stencil_alpha_state *dsa;
510
511 struct pipe_clip_state ucp_state;
512
513 bool blend_enabled;
514 bool depth_test_enabled;
515 bool alpha_test_enabled;
516 bool stencil_test_enabled;
517
518 GLuint program_id;
519 int last_shader_idx;
520
521 struct pipe_rasterizer_state hw_rs_state;
522 struct pipe_blend_state hw_blend_state;
523
524 struct list_head streamout_list;
525 struct vrend_streamout_object *current_so;
526
527 struct pipe_blend_color blend_color;
528
529 uint32_t cond_render_q_id;
530 GLenum cond_render_gl_mode;
531
532 struct vrend_image_view image_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
533 uint32_t images_used_mask[PIPE_SHADER_TYPES];
534
535 struct vrend_ssbo ssbo[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
536 uint32_t ssbo_used_mask[PIPE_SHADER_TYPES];
537 };
538
539 struct vrend_context {
540 char debug_name[64];
541
542 struct list_head sub_ctxs;
543
544 struct vrend_sub_context *sub;
545 struct vrend_sub_context *sub0;
546
547 int ctx_id;
548 /* has this ctx gotten an error? */
549 bool in_error;
550 bool ctx_switch_pending;
551 bool pstip_inited;
552
553 GLuint pstipple_tex_id;
554
555 enum virgl_ctx_errors last_error;
556
557 /* resource bounds to this context */
558 struct util_hash_table *res_hash;
559
560 struct list_head active_nontimer_query_list;
561 struct list_head ctx_entry;
562
563 struct vrend_shader_cfg shader_cfg;
564 };
565
566 static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle);
567 static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause);
568 static void vrend_update_viewport_state(struct vrend_context *ctx);
569 static void vrend_update_scissor_state(struct vrend_context *ctx);
570 static void vrend_destroy_query_object(void *obj_ptr);
571 static void vrend_finish_context_switch(struct vrend_context *ctx);
572 static void vrend_patch_blend_state(struct vrend_context *ctx);
573 static void vrend_update_frontface_state(struct vrend_context *ctx);
574 static void vrender_get_glsl_version(int *glsl_version);
575 static void vrend_destroy_resource_object(void *obj_ptr);
576 static void vrend_renderer_detach_res_ctx_p(struct vrend_context *ctx, int res_handle);
577 static void vrend_destroy_program(struct vrend_linked_shader_program *ent);
578 static void vrend_apply_sampler_state(struct vrend_context *ctx,
579 struct vrend_resource *res,
580 uint32_t shader_type,
581 int id, int sampler_id, uint32_t srgb_decode);
582 static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples);
583
584 void vrend_update_stencil_state(struct vrend_context *ctx);
585
586 static struct vrend_format_table tex_conv_table[VIRGL_FORMAT_MAX];
587
vrend_format_can_sample(enum virgl_formats format)588 static inline bool vrend_format_can_sample(enum virgl_formats format)
589 {
590 return tex_conv_table[format].bindings & VIRGL_BIND_SAMPLER_VIEW;
591 }
vrend_format_can_render(enum virgl_formats format)592 static inline bool vrend_format_can_render(enum virgl_formats format)
593 {
594 return tex_conv_table[format].bindings & VIRGL_BIND_RENDER_TARGET;
595 }
596
vrend_format_is_ds(enum virgl_formats format)597 static inline bool vrend_format_is_ds(enum virgl_formats format)
598 {
599 return tex_conv_table[format].bindings & VIRGL_BIND_DEPTH_STENCIL;
600 }
601
vrend_is_ds_format(enum virgl_formats format)602 bool vrend_is_ds_format(enum virgl_formats format)
603 {
604 return vrend_format_is_ds(format);
605 }
606
vrend_format_is_emulated_alpha(enum virgl_formats format)607 bool vrend_format_is_emulated_alpha(enum virgl_formats format)
608 {
609 if (!vrend_state.use_core_profile)
610 return false;
611 return (format == VIRGL_FORMAT_A8_UNORM ||
612 format == VIRGL_FORMAT_A16_UNORM);
613 }
614
vrend_format_needs_swizzle(enum virgl_formats format)615 static bool vrend_format_needs_swizzle(enum virgl_formats format)
616 {
617 return tex_conv_table[format].flags & VIRGL_BIND_NEED_SWIZZLE;
618 }
619
pipe_shader_to_prefix(int shader_type)620 static inline const char *pipe_shader_to_prefix(int shader_type)
621 {
622 switch (shader_type) {
623 case PIPE_SHADER_VERTEX: return "vs";
624 case PIPE_SHADER_FRAGMENT: return "fs";
625 case PIPE_SHADER_GEOMETRY: return "gs";
626 case PIPE_SHADER_TESS_CTRL: return "tc";
627 case PIPE_SHADER_TESS_EVAL: return "te";
628 case PIPE_SHADER_COMPUTE: return "cs";
629 default:
630 return NULL;
631 };
632 }
633
634 static const char *vrend_ctx_error_strings[] = { "None", "Unknown", "Illegal shader", "Illegal handle", "Illegal resource", "Illegal surface", "Illegal vertex format", "Illegal command buffer" };
635
__report_context_error(const char * fname,struct vrend_context * ctx,enum virgl_ctx_errors error,uint32_t value)636 static void __report_context_error(const char *fname, struct vrend_context *ctx, enum virgl_ctx_errors error, uint32_t value)
637 {
638 ctx->in_error = true;
639 ctx->last_error = error;
640 fprintf(stderr,"%s: context error reported %d \"%s\" %s %d\n", fname, ctx->ctx_id, ctx->debug_name, vrend_ctx_error_strings[error], value);
641 }
642 #define report_context_error(ctx, error, value) __report_context_error(__func__, ctx, error, value)
643
vrend_report_buffer_error(struct vrend_context * ctx,int cmd)644 void vrend_report_buffer_error(struct vrend_context *ctx, int cmd)
645 {
646 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, cmd);
647 }
648
649 #define CORE_PROFILE_WARN_NONE 0
650 #define CORE_PROFILE_WARN_STIPPLE 1
651 #define CORE_PROFILE_WARN_POLYGON_MODE 2
652 #define CORE_PROFILE_WARN_TWO_SIDE 3
653 #define CORE_PROFILE_WARN_CLAMP 4
654 #define CORE_PROFILE_WARN_SHADE_MODEL 5
655
656 static const char *vrend_core_profile_warn_strings[] = { "None", "Stipple", "Polygon Mode", "Two Side", "Clamping", "Shade Model" };
657
__report_core_warn(const char * fname,struct vrend_context * ctx,enum virgl_ctx_errors error,uint32_t value)658 static void __report_core_warn(const char *fname, struct vrend_context *ctx, enum virgl_ctx_errors error, uint32_t value)
659 {
660 fprintf(stderr,"%s: core profile violation reported %d \"%s\" %s %d\n", fname, ctx->ctx_id, ctx->debug_name, vrend_core_profile_warn_strings[error], value);
661 }
662 #define report_core_warn(ctx, error, value) __report_core_warn(__func__, ctx, error, value)
663
664
665 #define GLES_WARN_NONE 0
666 #define GLES_WARN_STIPPLE 1
667 #define GLES_WARN_POLYGON_MODE 2
668 #define GLES_WARN_DEPTH_RANGE 3
669 #define GLES_WARN_POINT_SIZE 4
670 #define GLES_WARN_LOD_BIAS 5
671 //#define GLES_WARN_ free slot 6
672 #define GLES_WARN_TEXTURE_RECT 7
673 #define GLES_WARN_OFFSET_LINE 8
674 #define GLES_WARN_OFFSET_POINT 9
675 #define GLES_WARN_DEPTH_CLIP 10
676 #define GLES_WARN_FLATSHADE_FIRST 11
677 #define GLES_WARN_LINE_SMOOTH 12
678 #define GLES_WARN_POLY_SMOOTH 13
679 #define GLES_WARN_DEPTH_CLEAR 14
680 #define GLES_WARN_LOGIC_OP 15
681 #define GLES_WARN_TIMESTAMP 16
682
683 static const char *vrend_gles_warn_strings[] = {
684 "None", "Stipple", "Polygon Mode", "Depth Range", "Point Size", "Lod Bias",
685 "<<WARNING #6>>", "Texture Rect", "Offset Line", "Offset Point",
686 "Depth Clip", "Flatshade First", "Line Smooth", "Poly Smooth",
687 "Depth Clear", "LogicOp", "GL_TIMESTAMP"
688 };
689
__report_gles_warn(const char * fname,struct vrend_context * ctx,enum virgl_ctx_errors error,uint32_t value)690 static void __report_gles_warn(const char *fname, struct vrend_context *ctx, enum virgl_ctx_errors error, uint32_t value)
691 {
692 int id = ctx ? ctx->ctx_id : -1;
693 const char *name = ctx ? ctx->debug_name : "NO_CONTEXT";
694 fprintf(stderr,"%s: gles violation reported %d \"%s\" %s %d\n", fname, id, name, vrend_gles_warn_strings[error], value);
695 }
696 #define report_gles_warn(ctx, error, value) __report_gles_warn(__func__, ctx, error, value)
697
__report_gles_missing_func(const char * fname,struct vrend_context * ctx,const char * missf)698 static void __report_gles_missing_func(const char *fname, struct vrend_context *ctx, const char *missf)
699 {
700 int id = ctx ? ctx->ctx_id : -1;
701 const char *name = ctx ? ctx->debug_name : "NO_CONTEXT";
702 fprintf(stderr,"%s: gles violation reported %d \"%s\" %s is missing\n", fname, id, name, missf);
703 }
704 #define report_gles_missing_func(ctx, missf) __report_gles_missing_func(__func__, ctx, missf)
705
init_features(int gl_ver,int gles_ver)706 static void init_features(int gl_ver, int gles_ver)
707 {
708 for (enum features_id id = 0; id < feat_last; id++) {
709 if (gl_ver >= feature_list[id].gl_ver ||
710 gles_ver >= feature_list[id].gles_ver)
711 set_feature(id);
712 else {
713 for (uint32_t i = 0; i < FEAT_MAX_EXTS; i++) {
714 if (!feature_list[id].gl_ext[i])
715 break;
716 if (epoxy_has_gl_extension(feature_list[id].gl_ext[i])) {
717 set_feature(id);
718 break;
719 }
720 }
721 }
722 }
723 }
724
vrend_destroy_surface(struct vrend_surface * surf)725 static void vrend_destroy_surface(struct vrend_surface *surf)
726 {
727 if (surf->id != surf->texture->id)
728 glDeleteTextures(1, &surf->id);
729 vrend_resource_reference(&surf->texture, NULL);
730 free(surf);
731 }
732
733 static inline void
vrend_surface_reference(struct vrend_surface ** ptr,struct vrend_surface * surf)734 vrend_surface_reference(struct vrend_surface **ptr, struct vrend_surface *surf)
735 {
736 struct vrend_surface *old_surf = *ptr;
737
738 if (pipe_reference(&(*ptr)->reference, &surf->reference))
739 vrend_destroy_surface(old_surf);
740 *ptr = surf;
741 }
742
vrend_destroy_sampler_view(struct vrend_sampler_view * samp)743 static void vrend_destroy_sampler_view(struct vrend_sampler_view *samp)
744 {
745 if (samp->texture->id != samp->id)
746 glDeleteTextures(1, &samp->id);
747 vrend_resource_reference(&samp->texture, NULL);
748 free(samp);
749 }
750
751 static inline void
vrend_sampler_view_reference(struct vrend_sampler_view ** ptr,struct vrend_sampler_view * view)752 vrend_sampler_view_reference(struct vrend_sampler_view **ptr, struct vrend_sampler_view *view)
753 {
754 struct vrend_sampler_view *old_view = *ptr;
755
756 if (pipe_reference(&(*ptr)->reference, &view->reference))
757 vrend_destroy_sampler_view(old_view);
758 *ptr = view;
759 }
760
vrend_destroy_so_target(struct vrend_so_target * target)761 static void vrend_destroy_so_target(struct vrend_so_target *target)
762 {
763 vrend_resource_reference(&target->buffer, NULL);
764 free(target);
765 }
766
767 static inline void
vrend_so_target_reference(struct vrend_so_target ** ptr,struct vrend_so_target * target)768 vrend_so_target_reference(struct vrend_so_target **ptr, struct vrend_so_target *target)
769 {
770 struct vrend_so_target *old_target = *ptr;
771
772 if (pipe_reference(&(*ptr)->reference, &target->reference))
773 vrend_destroy_so_target(old_target);
774 *ptr = target;
775 }
776
vrend_shader_destroy(struct vrend_shader * shader)777 static void vrend_shader_destroy(struct vrend_shader *shader)
778 {
779 struct vrend_linked_shader_program *ent, *tmp;
780
781 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &shader->programs, sl[shader->sel->type]) {
782 vrend_destroy_program(ent);
783 }
784
785 glDeleteShader(shader->id);
786 free(shader->glsl_prog);
787 free(shader);
788 }
789
vrend_destroy_shader_selector(struct vrend_shader_selector * sel)790 static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel)
791 {
792 struct vrend_shader *p = sel->current, *c;
793 unsigned i;
794 while (p) {
795 c = p->next_variant;
796 vrend_shader_destroy(p);
797 p = c;
798 }
799 if (sel->sinfo.so_names)
800 for (i = 0; i < sel->sinfo.so_info.num_outputs; i++)
801 free(sel->sinfo.so_names[i]);
802 free(sel->tmp_buf);
803 free(sel->sinfo.so_names);
804 free(sel->sinfo.interpinfo);
805 free(sel->sinfo.sampler_arrays);
806 free(sel->sinfo.image_arrays);
807 free(sel->tokens);
808 free(sel);
809 }
810
vrend_compile_shader(struct vrend_context * ctx,struct vrend_shader * shader)811 static bool vrend_compile_shader(struct vrend_context *ctx,
812 struct vrend_shader *shader)
813 {
814 GLint param;
815 glShaderSource(shader->id, 1, (const char **)&shader->glsl_prog, NULL);
816 glCompileShader(shader->id);
817 glGetShaderiv(shader->id, GL_COMPILE_STATUS, ¶m);
818 if (param == GL_FALSE) {
819 char infolog[65536];
820 int len;
821 glGetShaderInfoLog(shader->id, 65536, &len, infolog);
822 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
823 fprintf(stderr,"shader failed to compile\n%s\n", infolog);
824 fprintf(stderr,"GLSL:\n%s\n", shader->glsl_prog);
825 return false;
826 }
827 return true;
828 }
829
830 static inline void
vrend_shader_state_reference(struct vrend_shader_selector ** ptr,struct vrend_shader_selector * shader)831 vrend_shader_state_reference(struct vrend_shader_selector **ptr, struct vrend_shader_selector *shader)
832 {
833 struct vrend_shader_selector *old_shader = *ptr;
834
835 if (pipe_reference(&(*ptr)->reference, &shader->reference))
836 vrend_destroy_shader_selector(old_shader);
837 *ptr = shader;
838 }
839
840 void
vrend_insert_format(struct vrend_format_table * entry,uint32_t bindings)841 vrend_insert_format(struct vrend_format_table *entry, uint32_t bindings)
842 {
843 tex_conv_table[entry->format] = *entry;
844 tex_conv_table[entry->format].bindings = bindings;
845 }
846
847 void
vrend_insert_format_swizzle(int override_format,struct vrend_format_table * entry,uint32_t bindings,uint8_t swizzle[4])848 vrend_insert_format_swizzle(int override_format, struct vrend_format_table *entry, uint32_t bindings, uint8_t swizzle[4])
849 {
850 int i;
851 tex_conv_table[override_format] = *entry;
852 tex_conv_table[override_format].bindings = bindings;
853 tex_conv_table[override_format].flags = VIRGL_BIND_NEED_SWIZZLE;
854 for (i = 0; i < 4; i++)
855 tex_conv_table[override_format].swizzle[i] = swizzle[i];
856 }
857
858 const struct vrend_format_table *
vrend_get_format_table_entry(enum virgl_formats format)859 vrend_get_format_table_entry(enum virgl_formats format)
860 {
861 return &tex_conv_table[format];
862 }
863
vrend_is_timer_query(GLenum gltype)864 static bool vrend_is_timer_query(GLenum gltype)
865 {
866 return gltype == GL_TIMESTAMP ||
867 gltype == GL_TIME_ELAPSED;
868 }
869
vrend_use_program(struct vrend_context * ctx,GLuint program_id)870 static void vrend_use_program(struct vrend_context *ctx, GLuint program_id)
871 {
872 if (ctx->sub->program_id != program_id) {
873 glUseProgram(program_id);
874 ctx->sub->program_id = program_id;
875 }
876 }
877
vrend_init_pstipple_texture(struct vrend_context * ctx)878 static void vrend_init_pstipple_texture(struct vrend_context *ctx)
879 {
880 glGenTextures(1, &ctx->pstipple_tex_id);
881 glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
882 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, 32, 32, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
883 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
884 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
885 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
886 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
887
888 ctx->pstip_inited = true;
889 }
890
vrend_blend_enable(struct vrend_context * ctx,bool blend_enable)891 static void vrend_blend_enable(struct vrend_context *ctx, bool blend_enable)
892 {
893 if (ctx->sub->blend_enabled != blend_enable) {
894 ctx->sub->blend_enabled = blend_enable;
895 if (blend_enable)
896 glEnable(GL_BLEND);
897 else
898 glDisable(GL_BLEND);
899 }
900 }
901
vrend_depth_test_enable(struct vrend_context * ctx,bool depth_test_enable)902 static void vrend_depth_test_enable(struct vrend_context *ctx, bool depth_test_enable)
903 {
904 if (ctx->sub->depth_test_enabled != depth_test_enable) {
905 ctx->sub->depth_test_enabled = depth_test_enable;
906 if (depth_test_enable)
907 glEnable(GL_DEPTH_TEST);
908 else
909 glDisable(GL_DEPTH_TEST);
910 }
911 }
912
vrend_alpha_test_enable(struct vrend_context * ctx,bool alpha_test_enable)913 static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_enable)
914 {
915 if (vrend_state.use_core_profile) {
916 /* handled in shaders */
917 return;
918 }
919 if (ctx->sub->alpha_test_enabled != alpha_test_enable) {
920 ctx->sub->alpha_test_enabled = alpha_test_enable;
921 if (alpha_test_enable)
922 glEnable(GL_ALPHA_TEST);
923 else
924 glDisable(GL_ALPHA_TEST);
925 }
926 }
927
vrend_stencil_test_enable(struct vrend_context * ctx,bool stencil_test_enable)928 static void vrend_stencil_test_enable(struct vrend_context *ctx, bool stencil_test_enable)
929 {
930 if (ctx->sub->stencil_test_enabled != stencil_test_enable) {
931 ctx->sub->stencil_test_enabled = stencil_test_enable;
932 if (stencil_test_enable)
933 glEnable(GL_STENCIL_TEST);
934 else
935 glDisable(GL_STENCIL_TEST);
936 }
937 }
938
dump_stream_out(struct pipe_stream_output_info * so)939 static void dump_stream_out(struct pipe_stream_output_info *so)
940 {
941 unsigned i;
942 if (!so)
943 return;
944 printf("streamout: %d\n", so->num_outputs);
945 printf("strides: ");
946 for (i = 0; i < 4; i++)
947 printf("%d ", so->stride[i]);
948 printf("\n");
949 printf("outputs:\n");
950 for (i = 0; i < so->num_outputs; i++) {
951 printf("\t%d: reg: %d sc: %d, nc: %d ob: %d do: %d st: %d\n",
952 i,
953 so->output[i].register_index,
954 so->output[i].start_component,
955 so->output[i].num_components,
956 so->output[i].output_buffer,
957 so->output[i].dst_offset,
958 so->output[i].stream);
959 }
960 }
961
get_skip_str(int * skip_val)962 static char *get_skip_str(int *skip_val)
963 {
964 char *start_skip = NULL;
965 if (*skip_val < 0) {
966 *skip_val = 0;
967 return NULL;
968 }
969
970 if (*skip_val == 1) {
971 start_skip = strdup("gl_SkipComponents1");
972 *skip_val -= 1;
973 } else if (*skip_val == 2) {
974 start_skip = strdup("gl_SkipComponents2");
975 *skip_val -= 2;
976 } else if (*skip_val == 3) {
977 start_skip = strdup("gl_SkipComponents3");
978 *skip_val -= 3;
979 } else if (*skip_val >= 4) {
980 start_skip = strdup("gl_SkipComponents4");
981 *skip_val -= 4;
982 }
983 return start_skip;
984 }
985
set_stream_out_varyings(int prog_id,struct vrend_shader_info * sinfo)986 static void set_stream_out_varyings(int prog_id, struct vrend_shader_info *sinfo)
987 {
988 struct pipe_stream_output_info *so = &sinfo->so_info;
989 char *varyings[PIPE_MAX_SHADER_OUTPUTS*2];
990 int j;
991 uint i, n_outputs = 0;
992 int last_buffer = 0;
993 char *start_skip;
994 int buf_offset = 0;
995 int skip;
996 if (!so->num_outputs)
997 return;
998
999 if (vrend_dump_shaders)
1000 dump_stream_out(so);
1001
1002 for (i = 0; i < so->num_outputs; i++) {
1003 if (last_buffer != so->output[i].output_buffer) {
1004
1005 skip = so->stride[last_buffer] - buf_offset;
1006 while (skip) {
1007 start_skip = get_skip_str(&skip);
1008 if (start_skip)
1009 varyings[n_outputs++] = start_skip;
1010 }
1011 for (j = last_buffer; j < so->output[i].output_buffer; j++)
1012 varyings[n_outputs++] = strdup("gl_NextBuffer");
1013 last_buffer = so->output[i].output_buffer;
1014 buf_offset = 0;
1015 }
1016
1017 skip = so->output[i].dst_offset - buf_offset;
1018 while (skip) {
1019 start_skip = get_skip_str(&skip);
1020 if (start_skip)
1021 varyings[n_outputs++] = start_skip;
1022 }
1023 buf_offset = so->output[i].dst_offset;
1024
1025 buf_offset += so->output[i].num_components;
1026 if (sinfo->so_names[i])
1027 varyings[n_outputs++] = strdup(sinfo->so_names[i]);
1028 }
1029
1030 skip = so->stride[last_buffer] - buf_offset;
1031 while (skip) {
1032 start_skip = get_skip_str(&skip);
1033 if (start_skip)
1034 varyings[n_outputs++] = start_skip;
1035 }
1036
1037 glTransformFeedbackVaryings(prog_id, n_outputs,
1038 (const GLchar **)varyings, GL_INTERLEAVED_ATTRIBS_EXT);
1039
1040 for (i = 0; i < n_outputs; i++)
1041 if (varyings[i])
1042 free(varyings[i]);
1043 }
1044
bind_sampler_locs(struct vrend_linked_shader_program * sprog,int id)1045 static void bind_sampler_locs(struct vrend_linked_shader_program *sprog,
1046 int id)
1047 {
1048 if (sprog->ss[id]->sel->sinfo.samplers_used_mask) {
1049 uint32_t mask = sprog->ss[id]->sel->sinfo.samplers_used_mask;
1050 int nsamp = util_bitcount(sprog->ss[id]->sel->sinfo.samplers_used_mask);
1051 int index;
1052 sprog->shadow_samp_mask[id] = sprog->ss[id]->sel->sinfo.shadow_samp_mask;
1053 if (sprog->ss[id]->sel->sinfo.shadow_samp_mask) {
1054 sprog->shadow_samp_mask_locs[id] = calloc(nsamp, sizeof(uint32_t));
1055 sprog->shadow_samp_add_locs[id] = calloc(nsamp, sizeof(uint32_t));
1056 } else {
1057 sprog->shadow_samp_mask_locs[id] = sprog->shadow_samp_add_locs[id] = NULL;
1058 }
1059 sprog->samp_locs[id] = calloc(nsamp, sizeof(uint32_t));
1060 if (sprog->samp_locs[id]) {
1061 const char *prefix = pipe_shader_to_prefix(id);
1062 index = 0;
1063 while(mask) {
1064 uint32_t i = u_bit_scan(&mask);
1065 char name[64];
1066 if (sprog->ss[id]->sel->sinfo.num_sampler_arrays) {
1067 int arr_idx = shader_lookup_sampler_array(&sprog->ss[id]->sel->sinfo, i);
1068 snprintf(name, 32, "%ssamp%d[%d]", prefix, arr_idx, i - arr_idx);
1069 } else
1070 snprintf(name, 32, "%ssamp%d", prefix, i);
1071 sprog->samp_locs[id][index] = glGetUniformLocation(sprog->id, name);
1072 if (sprog->ss[id]->sel->sinfo.shadow_samp_mask & (1 << i)) {
1073 snprintf(name, 32, "%sshadmask%d", prefix, i);
1074 sprog->shadow_samp_mask_locs[id][index] = glGetUniformLocation(sprog->id, name);
1075 snprintf(name, 32, "%sshadadd%d", prefix, i);
1076 sprog->shadow_samp_add_locs[id][index] = glGetUniformLocation(sprog->id, name);
1077 }
1078 index++;
1079 }
1080 }
1081 } else {
1082 sprog->samp_locs[id] = NULL;
1083 sprog->shadow_samp_mask_locs[id] = NULL;
1084 sprog->shadow_samp_add_locs[id] = NULL;
1085 sprog->shadow_samp_mask[id] = 0;
1086 }
1087 sprog->samplers_used_mask[id] = sprog->ss[id]->sel->sinfo.samplers_used_mask;
1088 }
1089
bind_const_locs(struct vrend_linked_shader_program * sprog,int id)1090 static void bind_const_locs(struct vrend_linked_shader_program *sprog,
1091 int id)
1092 {
1093 if (sprog->ss[id]->sel->sinfo.num_consts) {
1094 sprog->const_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_consts, sizeof(uint32_t));
1095 if (sprog->const_locs[id]) {
1096 const char *prefix = pipe_shader_to_prefix(id);
1097 for (int i = 0; i < sprog->ss[id]->sel->sinfo.num_consts; i++) {
1098 char name[32];
1099 snprintf(name, 32, "%sconst0[%d]", prefix, i);
1100 sprog->const_locs[id][i] = glGetUniformLocation(sprog->id, name);
1101 }
1102 }
1103 } else
1104 sprog->const_locs[id] = NULL;
1105 }
1106
bind_ubo_locs(struct vrend_linked_shader_program * sprog,int id)1107 static void bind_ubo_locs(struct vrend_linked_shader_program *sprog,
1108 int id)
1109 {
1110 if (!has_feature(feat_ubo))
1111 return;
1112 if (sprog->ss[id]->sel->sinfo.num_ubos) {
1113 const char *prefix = pipe_shader_to_prefix(id);
1114
1115 sprog->ubo_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_ubos, sizeof(uint32_t));
1116 for (int i = 0; i < sprog->ss[id]->sel->sinfo.num_ubos; i++) {
1117 int ubo_idx = sprog->ss[id]->sel->sinfo.ubo_idx[i];
1118 char name[32];
1119 if (sprog->ss[id]->sel->sinfo.ubo_indirect)
1120 snprintf(name, 32, "%subo[%d]", prefix, ubo_idx - 1);
1121 else
1122 snprintf(name, 32, "%subo%d", prefix, ubo_idx);
1123
1124 sprog->ubo_locs[id][i] = glGetUniformBlockIndex(sprog->id, name);
1125 }
1126 } else
1127 sprog->ubo_locs[id] = NULL;
1128 }
1129
bind_ssbo_locs(struct vrend_linked_shader_program * sprog,int id)1130 static void bind_ssbo_locs(struct vrend_linked_shader_program *sprog,
1131 int id)
1132 {
1133 int i;
1134 char name[32];
1135 if (!has_feature(feat_ssbo))
1136 return;
1137 if (sprog->ss[id]->sel->sinfo.ssbo_used_mask) {
1138 const char *prefix = pipe_shader_to_prefix(id);
1139 uint32_t mask = sprog->ss[id]->sel->sinfo.ssbo_used_mask;
1140 sprog->ssbo_locs[id] = calloc(util_last_bit(mask), sizeof(uint32_t));
1141
1142 while (mask) {
1143 i = u_bit_scan(&mask);
1144 snprintf(name, 32, "%sssbo%d", prefix, i);
1145 sprog->ssbo_locs[id][i] = glGetProgramResourceIndex(sprog->id, GL_SHADER_STORAGE_BLOCK, name);
1146 }
1147 } else
1148 sprog->ssbo_locs[id] = NULL;
1149 sprog->ssbo_used_mask[id] = sprog->ss[id]->sel->sinfo.ssbo_used_mask;
1150 }
1151
bind_image_locs(struct vrend_linked_shader_program * sprog,int id)1152 static void bind_image_locs(struct vrend_linked_shader_program *sprog,
1153 int id)
1154 {
1155 int i;
1156 char name[32];
1157 const char *prefix = pipe_shader_to_prefix(id);
1158
1159 if (!has_feature(feat_images))
1160 return;
1161
1162 uint32_t mask = sprog->ss[id]->sel->sinfo.images_used_mask;
1163 int nsamp = util_last_bit(mask);
1164 if (nsamp) {
1165 sprog->img_locs[id] = calloc(nsamp, sizeof(GLint));
1166 if (!sprog->img_locs[id])
1167 return;
1168 } else
1169 sprog->img_locs[id] = NULL;
1170
1171 if (sprog->ss[id]->sel->sinfo.num_image_arrays) {
1172 for (i = 0; i < sprog->ss[id]->sel->sinfo.num_image_arrays; i++) {
1173 struct vrend_array *img_array = &sprog->ss[id]->sel->sinfo.image_arrays[i];
1174 for (int j = 0; j < img_array->array_size; j++) {
1175 snprintf(name, 32, "%simg%d[%d]", prefix, img_array->first, j);
1176 sprog->img_locs[id][img_array->first + j] = glGetUniformLocation(sprog->id, name);
1177 if (sprog->img_locs[id][img_array->first + j] == -1)
1178 fprintf(stderr, "failed to get uniform loc for image %s\n", name);
1179 }
1180 }
1181 } else if (mask) {
1182 for (i = 0; i < nsamp; i++) {
1183 if (mask & (1 << i)) {
1184 snprintf(name, 32, "%simg%d", prefix, i);
1185 sprog->img_locs[id][i] = glGetUniformLocation(sprog->id, name);
1186 if (sprog->img_locs[id][i] == -1)
1187 fprintf(stderr, "failed to get uniform loc for image %s\n", name);
1188 } else {
1189 sprog->img_locs[id][i] = -1;
1190 }
1191 }
1192 }
1193 sprog->images_used_mask[id] = mask;
1194 }
1195
add_cs_shader_program(struct vrend_context * ctx,struct vrend_shader * cs)1196 static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_context *ctx,
1197 struct vrend_shader *cs)
1198 {
1199 struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program);
1200 GLuint prog_id;
1201 GLint lret;
1202 prog_id = glCreateProgram();
1203 glAttachShader(prog_id, cs->id);
1204 glLinkProgram(prog_id);
1205
1206 glGetProgramiv(prog_id, GL_LINK_STATUS, &lret);
1207 if (lret == GL_FALSE) {
1208 char infolog[65536];
1209 int len;
1210 glGetProgramInfoLog(prog_id, 65536, &len, infolog);
1211 fprintf(stderr,"got error linking\n%s\n", infolog);
1212 /* dump shaders */
1213 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
1214 fprintf(stderr,"compute shader: %d GLSL\n%s\n", cs->id, cs->glsl_prog);
1215 glDeleteProgram(prog_id);
1216 free(sprog);
1217 return NULL;
1218 }
1219 sprog->ss[PIPE_SHADER_COMPUTE] = cs;
1220
1221 list_add(&sprog->sl[PIPE_SHADER_COMPUTE], &cs->programs);
1222 sprog->id = prog_id;
1223 list_addtail(&sprog->head, &ctx->sub->programs);
1224
1225 bind_sampler_locs(sprog, PIPE_SHADER_COMPUTE);
1226 bind_ubo_locs(sprog, PIPE_SHADER_COMPUTE);
1227 bind_ssbo_locs(sprog, PIPE_SHADER_COMPUTE);
1228 bind_const_locs(sprog, PIPE_SHADER_COMPUTE);
1229 bind_image_locs(sprog, PIPE_SHADER_COMPUTE);
1230 return sprog;
1231 }
1232
add_shader_program(struct vrend_context * ctx,struct vrend_shader * vs,struct vrend_shader * fs,struct vrend_shader * gs,struct vrend_shader * tcs,struct vrend_shader * tes)1233 static struct vrend_linked_shader_program *add_shader_program(struct vrend_context *ctx,
1234 struct vrend_shader *vs,
1235 struct vrend_shader *fs,
1236 struct vrend_shader *gs,
1237 struct vrend_shader *tcs,
1238 struct vrend_shader *tes)
1239 {
1240 struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program);
1241 char name[64];
1242 int i;
1243 GLuint prog_id;
1244 GLint lret;
1245 int id;
1246 int last_shader;
1247 bool do_patch = false;
1248 if (!sprog)
1249 return NULL;
1250
1251 /* need to rewrite VS code to add interpolation params */
1252 if (gs && gs->compiled_fs_id != fs->id)
1253 do_patch = true;
1254 if (!gs && tes && tes->compiled_fs_id != fs->id)
1255 do_patch = true;
1256 if (!gs && !tes && vs->compiled_fs_id != fs->id)
1257 do_patch = true;
1258
1259 if (do_patch) {
1260 bool ret;
1261
1262 if (gs)
1263 vrend_patch_vertex_shader_interpolants(&ctx->shader_cfg, gs->glsl_prog,
1264 &gs->sel->sinfo,
1265 &fs->sel->sinfo, "gso", fs->key.flatshade);
1266 else if (tes)
1267 vrend_patch_vertex_shader_interpolants(&ctx->shader_cfg, tes->glsl_prog,
1268 &tes->sel->sinfo,
1269 &fs->sel->sinfo, "teo", fs->key.flatshade);
1270 else
1271 vrend_patch_vertex_shader_interpolants(&ctx->shader_cfg, vs->glsl_prog,
1272 &vs->sel->sinfo,
1273 &fs->sel->sinfo, "vso", fs->key.flatshade);
1274 ret = vrend_compile_shader(ctx, gs ? gs : (tes ? tes : vs));
1275 if (ret == false) {
1276 glDeleteShader(gs ? gs->id : (tes ? tes->id : vs->id));
1277 free(sprog);
1278 return NULL;
1279 }
1280 if (gs)
1281 gs->compiled_fs_id = fs->id;
1282 else if (tes)
1283 tes->compiled_fs_id = fs->id;
1284 else
1285 vs->compiled_fs_id = fs->id;
1286 }
1287
1288 prog_id = glCreateProgram();
1289 glAttachShader(prog_id, vs->id);
1290 if (tcs && tcs->id > 0)
1291 glAttachShader(prog_id, tcs->id);
1292 if (tes && tes->id > 0)
1293 glAttachShader(prog_id, tes->id);
1294
1295 if (gs) {
1296 if (gs->id > 0)
1297 glAttachShader(prog_id, gs->id);
1298 set_stream_out_varyings(prog_id, &gs->sel->sinfo);
1299 } else if (tes)
1300 set_stream_out_varyings(prog_id, &tes->sel->sinfo);
1301 else
1302 set_stream_out_varyings(prog_id, &vs->sel->sinfo);
1303 glAttachShader(prog_id, fs->id);
1304
1305 if (fs->sel->sinfo.num_outputs > 1) {
1306 if (util_blend_state_is_dual(&ctx->sub->blend_state, 0)) {
1307 glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
1308 glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
1309 sprog->dual_src_linked = true;
1310 } else {
1311 glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
1312 glBindFragDataLocationIndexed(prog_id, 1, 0, "fsout_c1");
1313 sprog->dual_src_linked = false;
1314 }
1315 } else
1316 sprog->dual_src_linked = false;
1317
1318 if (has_feature(feat_gles31_vertex_attrib_binding)) {
1319 uint32_t mask = vs->sel->sinfo.attrib_input_mask;
1320 while (mask) {
1321 i = u_bit_scan(&mask);
1322 snprintf(name, 32, "in_%d", i);
1323 glBindAttribLocation(prog_id, i, name);
1324 }
1325 }
1326
1327 glLinkProgram(prog_id);
1328
1329 glGetProgramiv(prog_id, GL_LINK_STATUS, &lret);
1330 if (lret == GL_FALSE) {
1331 char infolog[65536];
1332 int len;
1333 glGetProgramInfoLog(prog_id, 65536, &len, infolog);
1334 fprintf(stderr,"got error linking\n%s\n", infolog);
1335 /* dump shaders */
1336 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
1337 fprintf(stderr,"vert shader: %d GLSL\n%s\n", vs->id, vs->glsl_prog);
1338 if (gs)
1339 fprintf(stderr,"geom shader: %d GLSL\n%s\n", gs->id, gs->glsl_prog);
1340 fprintf(stderr,"frag shader: %d GLSL\n%s\n", fs->id, fs->glsl_prog);
1341 glDeleteProgram(prog_id);
1342 free(sprog);
1343 return NULL;
1344 }
1345
1346 sprog->ss[PIPE_SHADER_VERTEX] = vs;
1347 sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
1348 sprog->ss[PIPE_SHADER_GEOMETRY] = gs;
1349 sprog->ss[PIPE_SHADER_TESS_CTRL] = tcs;
1350 sprog->ss[PIPE_SHADER_TESS_EVAL] = tes;
1351
1352 list_add(&sprog->sl[PIPE_SHADER_VERTEX], &vs->programs);
1353 list_add(&sprog->sl[PIPE_SHADER_FRAGMENT], &fs->programs);
1354 if (gs)
1355 list_add(&sprog->sl[PIPE_SHADER_GEOMETRY], &gs->programs);
1356 if (tcs)
1357 list_add(&sprog->sl[PIPE_SHADER_TESS_CTRL], &tcs->programs);
1358 if (tes)
1359 list_add(&sprog->sl[PIPE_SHADER_TESS_EVAL], &tes->programs);
1360
1361 last_shader = tes ? PIPE_SHADER_TESS_EVAL : (gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
1362 sprog->id = prog_id;
1363
1364 list_addtail(&sprog->head, &ctx->sub->programs);
1365
1366 if (fs->key.pstipple_tex)
1367 sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
1368 else
1369 sprog->fs_stipple_loc = -1;
1370 sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust_y");
1371 for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
1372 if (!sprog->ss[id])
1373 continue;
1374
1375 bind_sampler_locs(sprog, id);
1376 bind_const_locs(sprog, id);
1377 bind_ubo_locs(sprog, id);
1378 bind_image_locs(sprog, id);
1379 bind_ssbo_locs(sprog, id);
1380 }
1381
1382 if (!has_feature(feat_gles31_vertex_attrib_binding)) {
1383 if (vs->sel->sinfo.num_inputs) {
1384 sprog->attrib_locs = calloc(vs->sel->sinfo.num_inputs, sizeof(uint32_t));
1385 if (sprog->attrib_locs) {
1386 for (i = 0; i < vs->sel->sinfo.num_inputs; i++) {
1387 snprintf(name, 32, "in_%d", i);
1388 sprog->attrib_locs[i] = glGetAttribLocation(prog_id, name);
1389 }
1390 }
1391 } else
1392 sprog->attrib_locs = NULL;
1393 }
1394
1395 if (vs->sel->sinfo.num_ucp) {
1396 for (i = 0; i < vs->sel->sinfo.num_ucp; i++) {
1397 snprintf(name, 32, "clipp[%d]", i);
1398 sprog->clip_locs[i] = glGetUniformLocation(prog_id, name);
1399 }
1400 }
1401 return sprog;
1402 }
1403
lookup_cs_shader_program(struct vrend_context * ctx,GLuint cs_id)1404 static struct vrend_linked_shader_program *lookup_cs_shader_program(struct vrend_context *ctx,
1405 GLuint cs_id)
1406 {
1407 struct vrend_linked_shader_program *ent;
1408 LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
1409 if (!ent->ss[PIPE_SHADER_COMPUTE])
1410 continue;
1411 if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id)
1412 return ent;
1413 }
1414 return NULL;
1415 }
1416
lookup_shader_program(struct vrend_context * ctx,GLuint vs_id,GLuint fs_id,GLuint gs_id,GLuint tcs_id,GLuint tes_id,bool dual_src)1417 static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_context *ctx,
1418 GLuint vs_id,
1419 GLuint fs_id,
1420 GLuint gs_id,
1421 GLuint tcs_id,
1422 GLuint tes_id,
1423 bool dual_src)
1424 {
1425 struct vrend_linked_shader_program *ent;
1426 LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
1427 if (ent->dual_src_linked != dual_src)
1428 continue;
1429 if (ent->ss[PIPE_SHADER_COMPUTE])
1430 continue;
1431 if (ent->ss[PIPE_SHADER_VERTEX]->id != vs_id)
1432 continue;
1433 if (ent->ss[PIPE_SHADER_FRAGMENT]->id != fs_id)
1434 continue;
1435 if (ent->ss[PIPE_SHADER_GEOMETRY] &&
1436 ent->ss[PIPE_SHADER_GEOMETRY]->id != gs_id)
1437 continue;
1438 if (ent->ss[PIPE_SHADER_TESS_CTRL] &&
1439 ent->ss[PIPE_SHADER_TESS_CTRL]->id != tcs_id)
1440 continue;
1441 if (ent->ss[PIPE_SHADER_TESS_EVAL] &&
1442 ent->ss[PIPE_SHADER_TESS_EVAL]->id != tes_id)
1443 continue;
1444 return ent;
1445 }
1446 return NULL;
1447 }
1448
vrend_destroy_program(struct vrend_linked_shader_program * ent)1449 static void vrend_destroy_program(struct vrend_linked_shader_program *ent)
1450 {
1451 int i;
1452 glDeleteProgram(ent->id);
1453 list_del(&ent->head);
1454
1455 for (i = PIPE_SHADER_VERTEX; i <= PIPE_SHADER_COMPUTE; i++) {
1456 if (ent->ss[i])
1457 list_del(&ent->sl[i]);
1458 free(ent->shadow_samp_mask_locs[i]);
1459 free(ent->shadow_samp_add_locs[i]);
1460 free(ent->samp_locs[i]);
1461 free(ent->ssbo_locs[i]);
1462 free(ent->img_locs[i]);
1463 free(ent->const_locs[i]);
1464 free(ent->ubo_locs[i]);
1465 }
1466 free(ent->attrib_locs);
1467 free(ent);
1468 }
1469
vrend_free_programs(struct vrend_sub_context * sub)1470 static void vrend_free_programs(struct vrend_sub_context *sub)
1471 {
1472 struct vrend_linked_shader_program *ent, *tmp;
1473
1474 if (LIST_IS_EMPTY(&sub->programs))
1475 return;
1476
1477 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->programs, head) {
1478 vrend_destroy_program(ent);
1479 }
1480 }
1481
vrend_destroy_streamout_object(struct vrend_streamout_object * obj)1482 static void vrend_destroy_streamout_object(struct vrend_streamout_object *obj)
1483 {
1484 unsigned i;
1485 list_del(&obj->head);
1486 for (i = 0; i < obj->num_targets; i++)
1487 vrend_so_target_reference(&obj->so_targets[i], NULL);
1488 if (has_feature(feat_transform_feedback2))
1489 glDeleteTransformFeedbacks(1, &obj->id);
1490 FREE(obj);
1491 }
1492
vrend_create_surface(struct vrend_context * ctx,uint32_t handle,uint32_t res_handle,uint32_t format,uint32_t val0,uint32_t val1)1493 int vrend_create_surface(struct vrend_context *ctx,
1494 uint32_t handle,
1495 uint32_t res_handle, uint32_t format,
1496 uint32_t val0, uint32_t val1)
1497 {
1498 struct vrend_surface *surf;
1499 struct vrend_resource *res;
1500 uint32_t ret_handle;
1501
1502 if (format >= PIPE_FORMAT_COUNT) {
1503 return EINVAL;
1504 }
1505
1506 res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
1507 if (!res) {
1508 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
1509 return EINVAL;
1510 }
1511
1512 surf = CALLOC_STRUCT(vrend_surface);
1513 if (!surf)
1514 return ENOMEM;
1515
1516 surf->res_handle = res_handle;
1517 surf->format = format;
1518 surf->val0 = val0;
1519 surf->val1 = val1;
1520 surf->id = res->id;
1521
1522 if (has_feature(feat_texture_view) && !res->is_buffer) {
1523 /* We don't need texture views for buffer objects.
1524 * Otherwise we only need a texture view if the
1525 * a) formats differ between the surface and base texture
1526 * b) we need to map a sub range > 1 layer to a surface,
1527 * GL can make a single layer fine without a view, and it
1528 * can map the whole texure fine. In those cases we don't
1529 * create a texture view.
1530 */
1531 int first_layer = surf->val1 & 0xffff;
1532 int last_layer = (surf->val1 >> 16) & 0xffff;
1533
1534 if ((first_layer != last_layer &&
1535 (first_layer != 0 || (last_layer != (int)util_max_layer(&res->base, surf->val0)))) ||
1536 surf->format != res->base.format) {
1537 GLenum internalformat = tex_conv_table[surf->format].internalformat;
1538 glGenTextures(1, &surf->id);
1539 glTextureView(surf->id, res->target, res->id, internalformat,
1540 0, res->base.last_level + 1,
1541 first_layer, last_layer - first_layer + 1);
1542 }
1543 }
1544
1545 pipe_reference_init(&surf->reference, 1);
1546
1547 vrend_resource_reference(&surf->texture, res);
1548
1549 ret_handle = vrend_renderer_object_insert(ctx, surf, sizeof(*surf), handle, VIRGL_OBJECT_SURFACE);
1550 if (ret_handle == 0) {
1551 FREE(surf);
1552 return ENOMEM;
1553 }
1554 return 0;
1555 }
1556
vrend_destroy_surface_object(void * obj_ptr)1557 static void vrend_destroy_surface_object(void *obj_ptr)
1558 {
1559 struct vrend_surface *surface = obj_ptr;
1560
1561 vrend_surface_reference(&surface, NULL);
1562 }
1563
vrend_destroy_sampler_view_object(void * obj_ptr)1564 static void vrend_destroy_sampler_view_object(void *obj_ptr)
1565 {
1566 struct vrend_sampler_view *samp = obj_ptr;
1567
1568 vrend_sampler_view_reference(&samp, NULL);
1569 }
1570
vrend_destroy_so_target_object(void * obj_ptr)1571 static void vrend_destroy_so_target_object(void *obj_ptr)
1572 {
1573 struct vrend_so_target *target = obj_ptr;
1574 struct vrend_sub_context *sub_ctx = target->sub_ctx;
1575 struct vrend_streamout_object *obj, *tmp;
1576 bool found;
1577 unsigned i;
1578
1579 LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub_ctx->streamout_list, head) {
1580 found = false;
1581 for (i = 0; i < obj->num_targets; i++) {
1582 if (obj->so_targets[i] == target) {
1583 found = true;
1584 break;
1585 }
1586 }
1587 if (found) {
1588 if (obj == sub_ctx->current_so)
1589 sub_ctx->current_so = NULL;
1590 if (obj->xfb_state == XFB_STATE_PAUSED) {
1591 if (has_feature(feat_transform_feedback2))
1592 glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
1593 glEndTransformFeedback();
1594 if (sub_ctx->current_so && has_feature(feat_transform_feedback2))
1595 glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, sub_ctx->current_so->id);
1596 }
1597 vrend_destroy_streamout_object(obj);
1598 }
1599 }
1600
1601 vrend_so_target_reference(&target, NULL);
1602 }
1603
vrend_destroy_vertex_elements_object(void * obj_ptr)1604 static void vrend_destroy_vertex_elements_object(void *obj_ptr)
1605 {
1606 struct vrend_vertex_element_array *v = obj_ptr;
1607
1608 if (has_feature(feat_gles31_vertex_attrib_binding)) {
1609 glDeleteVertexArrays(1, &v->id);
1610 }
1611 FREE(v);
1612 }
1613
vrend_destroy_sampler_state_object(void * obj_ptr)1614 static void vrend_destroy_sampler_state_object(void *obj_ptr)
1615 {
1616 struct vrend_sampler_state *state = obj_ptr;
1617
1618 if (has_feature(feat_samplers))
1619 glDeleteSamplers(1, &state->id);
1620 FREE(state);
1621 }
1622
convert_wrap(int wrap)1623 static GLuint convert_wrap(int wrap)
1624 {
1625 switch(wrap){
1626 case PIPE_TEX_WRAP_REPEAT: return GL_REPEAT;
1627 case PIPE_TEX_WRAP_CLAMP: if (vrend_state.use_core_profile == false) return GL_CLAMP; else return GL_CLAMP_TO_EDGE;
1628
1629 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE;
1630 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_BORDER;
1631
1632 case PIPE_TEX_WRAP_MIRROR_REPEAT: return GL_MIRRORED_REPEAT;
1633 case PIPE_TEX_WRAP_MIRROR_CLAMP: return GL_MIRROR_CLAMP_EXT;
1634 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return GL_MIRROR_CLAMP_TO_EDGE_EXT;
1635 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return GL_MIRROR_CLAMP_TO_BORDER_EXT;
1636 default:
1637 assert(0);
1638 return -1;
1639 }
1640 }
1641
convert_mag_filter(unsigned int filter)1642 static inline GLenum convert_mag_filter(unsigned int filter)
1643 {
1644 if (filter == PIPE_TEX_FILTER_NEAREST)
1645 return GL_NEAREST;
1646 return GL_LINEAR;
1647 }
1648
convert_min_filter(unsigned int filter,unsigned int mip_filter)1649 static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_filter)
1650 {
1651 if (mip_filter == PIPE_TEX_MIPFILTER_NONE)
1652 return convert_mag_filter(filter);
1653 else if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
1654 if (filter == PIPE_TEX_FILTER_NEAREST)
1655 return GL_NEAREST_MIPMAP_LINEAR;
1656 else
1657 return GL_LINEAR_MIPMAP_LINEAR;
1658 } else if (mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
1659 if (filter == PIPE_TEX_FILTER_NEAREST)
1660 return GL_NEAREST_MIPMAP_NEAREST;
1661 else
1662 return GL_LINEAR_MIPMAP_NEAREST;
1663 }
1664 assert(0);
1665 return 0;
1666 }
1667
vrend_create_sampler_state(struct vrend_context * ctx,uint32_t handle,struct pipe_sampler_state * templ)1668 int vrend_create_sampler_state(struct vrend_context *ctx,
1669 uint32_t handle,
1670 struct pipe_sampler_state *templ)
1671 {
1672 struct vrend_sampler_state *state = CALLOC_STRUCT(vrend_sampler_state);
1673 int ret_handle;
1674
1675 if (!state)
1676 return ENOMEM;
1677
1678 state->base = *templ;
1679
1680 if (has_feature(feat_samplers)) {
1681 glGenSamplers(1, &state->id);
1682
1683 glSamplerParameteri(state->id, GL_TEXTURE_WRAP_S, convert_wrap(templ->wrap_s));
1684 glSamplerParameteri(state->id, GL_TEXTURE_WRAP_T, convert_wrap(templ->wrap_t));
1685 glSamplerParameteri(state->id, GL_TEXTURE_WRAP_R, convert_wrap(templ->wrap_r));
1686 glSamplerParameterf(state->id, GL_TEXTURE_MIN_FILTER, convert_min_filter(templ->min_img_filter, templ->min_mip_filter));
1687 glSamplerParameterf(state->id, GL_TEXTURE_MAG_FILTER, convert_mag_filter(templ->mag_img_filter));
1688 glSamplerParameterf(state->id, GL_TEXTURE_MIN_LOD, templ->min_lod);
1689 glSamplerParameterf(state->id, GL_TEXTURE_MAX_LOD, templ->max_lod);
1690 glSamplerParameteri(state->id, GL_TEXTURE_COMPARE_MODE, templ->compare_mode ? GL_COMPARE_R_TO_TEXTURE : GL_NONE);
1691 glSamplerParameteri(state->id, GL_TEXTURE_COMPARE_FUNC, GL_NEVER + templ->compare_func);
1692 if (vrend_state.use_gles) {
1693 if (templ->lod_bias != 0.0f) {
1694 report_gles_warn(ctx, GLES_WARN_LOD_BIAS, 0);
1695 }
1696 } else {
1697 glSamplerParameteri(state->id, GL_TEXTURE_CUBE_MAP_SEAMLESS, templ->seamless_cube_map);
1698 glSamplerParameterf(state->id, GL_TEXTURE_LOD_BIAS, templ->lod_bias);
1699 }
1700
1701 glSamplerParameterIuiv(state->id, GL_TEXTURE_BORDER_COLOR, templ->border_color.ui);
1702 }
1703 ret_handle = vrend_renderer_object_insert(ctx, state, sizeof(struct vrend_sampler_state), handle,
1704 VIRGL_OBJECT_SAMPLER_STATE);
1705 if (!ret_handle) {
1706 if (has_feature(feat_samplers))
1707 glDeleteSamplers(1, &state->id);
1708 FREE(state);
1709 return ENOMEM;
1710 }
1711 return 0;
1712 }
1713
to_gl_swizzle(int swizzle)1714 static inline GLenum to_gl_swizzle(int swizzle)
1715 {
1716 switch (swizzle) {
1717 case PIPE_SWIZZLE_RED: return GL_RED;
1718 case PIPE_SWIZZLE_GREEN: return GL_GREEN;
1719 case PIPE_SWIZZLE_BLUE: return GL_BLUE;
1720 case PIPE_SWIZZLE_ALPHA: return GL_ALPHA;
1721 case PIPE_SWIZZLE_ZERO: return GL_ZERO;
1722 case PIPE_SWIZZLE_ONE: return GL_ONE;
1723 default:
1724 assert(0);
1725 return 0;
1726 }
1727 }
1728
vrend_create_sampler_view(struct vrend_context * ctx,uint32_t handle,uint32_t res_handle,uint32_t format,uint32_t val0,uint32_t val1,uint32_t swizzle_packed)1729 int vrend_create_sampler_view(struct vrend_context *ctx,
1730 uint32_t handle,
1731 uint32_t res_handle, uint32_t format,
1732 uint32_t val0, uint32_t val1, uint32_t swizzle_packed)
1733 {
1734 struct vrend_sampler_view *view;
1735 struct vrend_resource *res;
1736 int ret_handle;
1737 uint8_t swizzle[4];
1738
1739 res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
1740 if (!res) {
1741 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
1742 return EINVAL;
1743 }
1744
1745 view = CALLOC_STRUCT(vrend_sampler_view);
1746 if (!view)
1747 return ENOMEM;
1748
1749 pipe_reference_init(&view->reference, 1);
1750 view->format = format & 0xffffff;
1751 view->target = tgsitargettogltarget((format >> 24) & 0xff, res->base.nr_samples);
1752 view->val0 = val0;
1753 view->val1 = val1;
1754 view->cur_base = -1;
1755 view->cur_max = 10000;
1756
1757 swizzle[0] = swizzle_packed & 0x7;
1758 swizzle[1] = (swizzle_packed >> 3) & 0x7;
1759 swizzle[2] = (swizzle_packed >> 6) & 0x7;
1760 swizzle[3] = (swizzle_packed >> 9) & 0x7;
1761
1762 vrend_resource_reference(&view->texture, res);
1763
1764 view->id = view->texture->id;
1765 if (!view->target)
1766 view->target = view->texture->target;
1767
1768 if (has_feature(feat_texture_view) && !view->texture->is_buffer) {
1769 enum pipe_format format;
1770 bool needs_view = false;
1771
1772 /*
1773 * Need to use a texture view if the gallium
1774 * view target is different than the underlying
1775 * texture target.
1776 */
1777 if (view->target != view->texture->target)
1778 needs_view = true;
1779
1780 /*
1781 * If the formats are different and this isn't
1782 * a DS texture a view is required.
1783 * DS are special as they use different gallium
1784 * formats for DS views into a combined resource.
1785 * GL texture views can't be use for this, stencil
1786 * texturing is used instead. For DS formats
1787 * aways program the underlying DS format as a
1788 * view could be required for layers.
1789 */
1790 format = view->format;
1791 if (util_format_is_depth_or_stencil(view->texture->base.format))
1792 format = view->texture->base.format;
1793 else if (view->format != view->texture->base.format)
1794 needs_view = true;
1795 if (needs_view) {
1796 glGenTextures(1, &view->id);
1797 GLenum internalformat = tex_conv_table[format].internalformat;
1798 unsigned base_layer = view->val0 & 0xffff;
1799 unsigned max_layer = (view->val0 >> 16) & 0xffff;
1800 view->cur_base = view->val1 & 0xff;
1801 view->cur_max = (view->val1 >> 8) & 0xff;
1802 glTextureView(view->id, view->target, view->texture->id, internalformat,
1803 view->cur_base, (view->cur_max - view->cur_base) + 1,
1804 base_layer, max_layer - base_layer + 1);
1805 }
1806 }
1807 view->srgb_decode = GL_DECODE_EXT;
1808 if (view->format != view->texture->base.format) {
1809 if (util_format_is_srgb(view->texture->base.format) &&
1810 !util_format_is_srgb(view->format))
1811 view->srgb_decode = GL_SKIP_DECODE_EXT;
1812 }
1813
1814 if (!(util_format_has_alpha(view->format) || util_format_is_depth_or_stencil(view->format))) {
1815 if (swizzle[0] == PIPE_SWIZZLE_ALPHA)
1816 swizzle[0] = PIPE_SWIZZLE_ONE;
1817 if (swizzle[1] == PIPE_SWIZZLE_ALPHA)
1818 swizzle[1] = PIPE_SWIZZLE_ONE;
1819 if (swizzle[2] == PIPE_SWIZZLE_ALPHA)
1820 swizzle[2] = PIPE_SWIZZLE_ONE;
1821 if (swizzle[3] == PIPE_SWIZZLE_ALPHA)
1822 swizzle[3] = PIPE_SWIZZLE_ONE;
1823 }
1824
1825 if (tex_conv_table[view->format].flags & VIRGL_BIND_NEED_SWIZZLE) {
1826 if (swizzle[0] <= PIPE_SWIZZLE_ALPHA)
1827 swizzle[0] = tex_conv_table[view->format].swizzle[swizzle[0]];
1828 if (swizzle[1] <= PIPE_SWIZZLE_ALPHA)
1829 swizzle[1] = tex_conv_table[view->format].swizzle[swizzle[1]];
1830 if (swizzle[2] <= PIPE_SWIZZLE_ALPHA)
1831 swizzle[2] = tex_conv_table[view->format].swizzle[swizzle[2]];
1832 if (swizzle[3] <= PIPE_SWIZZLE_ALPHA)
1833 swizzle[3] = tex_conv_table[view->format].swizzle[swizzle[3]];
1834 }
1835
1836 view->gl_swizzle_r = to_gl_swizzle(swizzle[0]);
1837 view->gl_swizzle_g = to_gl_swizzle(swizzle[1]);
1838 view->gl_swizzle_b = to_gl_swizzle(swizzle[2]);
1839 view->gl_swizzle_a = to_gl_swizzle(swizzle[3]);
1840
1841 view->cur_swizzle_r = view->cur_swizzle_g =
1842 view->cur_swizzle_b = view->cur_swizzle_a = -1;
1843
1844 ret_handle = vrend_renderer_object_insert(ctx, view, sizeof(*view), handle, VIRGL_OBJECT_SAMPLER_VIEW);
1845 if (ret_handle == 0) {
1846 FREE(view);
1847 return ENOMEM;
1848 }
1849 return 0;
1850 }
1851
vrend_fb_bind_texture_id(struct vrend_resource * res,int id,int idx,uint32_t level,uint32_t layer)1852 static void vrend_fb_bind_texture_id(struct vrend_resource *res,
1853 int id,
1854 int idx,
1855 uint32_t level, uint32_t layer)
1856 {
1857 const struct util_format_description *desc = util_format_description(res->base.format);
1858 GLenum attachment = GL_COLOR_ATTACHMENT0_EXT + idx;
1859
1860 if (vrend_format_is_ds(res->base.format)) {
1861 if (util_format_has_stencil(desc)) {
1862 if (util_format_has_depth(desc))
1863 attachment = GL_DEPTH_STENCIL_ATTACHMENT;
1864 else
1865 attachment = GL_STENCIL_ATTACHMENT;
1866 } else
1867 attachment = GL_DEPTH_ATTACHMENT;
1868 }
1869
1870 switch (res->target) {
1871 case GL_TEXTURE_1D_ARRAY:
1872 case GL_TEXTURE_2D_ARRAY:
1873 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
1874 case GL_TEXTURE_CUBE_MAP_ARRAY:
1875 if (layer == 0xffffffff)
1876 glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
1877 id, level);
1878 else
1879 glFramebufferTextureLayer(GL_FRAMEBUFFER_EXT, attachment,
1880 id, level, layer);
1881 break;
1882 case GL_TEXTURE_3D:
1883 if (layer == 0xffffffff)
1884 glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
1885 id, level);
1886 else if (vrend_state.use_gles)
1887 glFramebufferTexture3DOES(GL_FRAMEBUFFER_EXT, attachment,
1888 res->target, id, level, layer);
1889 else
1890 glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, attachment,
1891 res->target, id, level, layer);
1892 break;
1893 case GL_TEXTURE_CUBE_MAP:
1894 if (layer == 0xffffffff)
1895 glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
1896 id, level);
1897 else
1898 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, attachment,
1899 GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer, id, level);
1900 break;
1901 case GL_TEXTURE_1D:
1902 glFramebufferTexture1DEXT(GL_FRAMEBUFFER_EXT, attachment,
1903 res->target, id, level);
1904 break;
1905 case GL_TEXTURE_2D:
1906 default:
1907 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, attachment,
1908 res->target, id, level);
1909 break;
1910 }
1911
1912 if (attachment == GL_DEPTH_ATTACHMENT) {
1913 switch (res->target) {
1914 case GL_TEXTURE_1D:
1915 glFramebufferTexture1DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT,
1916 GL_TEXTURE_1D, 0, 0);
1917 break;
1918 case GL_TEXTURE_2D:
1919 default:
1920 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT,
1921 GL_TEXTURE_2D, 0, 0);
1922 break;
1923 }
1924 }
1925 }
1926
vrend_fb_bind_texture(struct vrend_resource * res,int idx,uint32_t level,uint32_t layer)1927 void vrend_fb_bind_texture(struct vrend_resource *res,
1928 int idx,
1929 uint32_t level, uint32_t layer)
1930 {
1931 vrend_fb_bind_texture_id(res, res->id, idx, level, layer);
1932 }
1933
vrend_hw_set_zsurf_texture(struct vrend_context * ctx)1934 static void vrend_hw_set_zsurf_texture(struct vrend_context *ctx)
1935 {
1936 struct vrend_surface *surf = ctx->sub->zsurf;
1937
1938 if (!surf) {
1939 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
1940 GL_TEXTURE_2D, 0, 0);
1941 } else {
1942 uint32_t first_layer = surf->val1 & 0xffff;
1943 uint32_t last_layer = (surf->val1 >> 16) & 0xffff;
1944
1945 if (!surf->texture)
1946 return;
1947
1948 vrend_fb_bind_texture_id(surf->texture, surf->id, 0, surf->val0,
1949 first_layer != last_layer ? 0xffffffff : first_layer);
1950 }
1951 }
1952
vrend_hw_set_color_surface(struct vrend_context * ctx,int index)1953 static void vrend_hw_set_color_surface(struct vrend_context *ctx, int index)
1954 {
1955 struct vrend_surface *surf = ctx->sub->surf[index];
1956
1957 if (!surf) {
1958 GLenum attachment = GL_COLOR_ATTACHMENT0 + index;
1959
1960 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, attachment,
1961 GL_TEXTURE_2D, 0, 0);
1962 } else {
1963 uint32_t first_layer = ctx->sub->surf[index]->val1 & 0xffff;
1964 uint32_t last_layer = (ctx->sub->surf[index]->val1 >> 16) & 0xffff;
1965
1966 vrend_fb_bind_texture_id(surf->texture, surf->id, index, surf->val0,
1967 first_layer != last_layer ? 0xffffffff : first_layer);
1968 }
1969 }
1970
vrend_hw_emit_framebuffer_state(struct vrend_context * ctx)1971 static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
1972 {
1973 static const GLenum buffers[8] = {
1974 GL_COLOR_ATTACHMENT0_EXT,
1975 GL_COLOR_ATTACHMENT1_EXT,
1976 GL_COLOR_ATTACHMENT2_EXT,
1977 GL_COLOR_ATTACHMENT3_EXT,
1978 GL_COLOR_ATTACHMENT4_EXT,
1979 GL_COLOR_ATTACHMENT5_EXT,
1980 GL_COLOR_ATTACHMENT6_EXT,
1981 GL_COLOR_ATTACHMENT7_EXT,
1982 };
1983 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
1984
1985 if (ctx->sub->nr_cbufs == 0) {
1986 glReadBuffer(GL_NONE);
1987 if (!vrend_state.use_gles) {
1988 glDisable(GL_FRAMEBUFFER_SRGB_EXT);
1989 }
1990 } else if (!vrend_state.use_gles) {
1991 /* Do not enter this path on GLES as this is not needed. */
1992 struct vrend_surface *surf = NULL;
1993 bool use_srgb = false;
1994 int i;
1995 for (i = 0; i < ctx->sub->nr_cbufs; i++) {
1996 if (ctx->sub->surf[i]) {
1997 surf = ctx->sub->surf[i];
1998 if (util_format_is_srgb(surf->format)) {
1999 use_srgb = true;
2000 }
2001 }
2002 }
2003 if (use_srgb) {
2004 glEnable(GL_FRAMEBUFFER_SRGB_EXT);
2005 } else {
2006 glDisable(GL_FRAMEBUFFER_SRGB_EXT);
2007 }
2008 }
2009 glDrawBuffers(ctx->sub->nr_cbufs, buffers);
2010 }
2011
vrend_set_framebuffer_state(struct vrend_context * ctx,uint32_t nr_cbufs,uint32_t surf_handle[PIPE_MAX_COLOR_BUFS],uint32_t zsurf_handle)2012 void vrend_set_framebuffer_state(struct vrend_context *ctx,
2013 uint32_t nr_cbufs, uint32_t surf_handle[PIPE_MAX_COLOR_BUFS],
2014 uint32_t zsurf_handle)
2015 {
2016 struct vrend_surface *surf, *zsurf;
2017 int i;
2018 int old_num;
2019 GLenum status;
2020 GLint new_height = -1;
2021 bool new_ibf = false;
2022
2023 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
2024
2025 if (zsurf_handle) {
2026 zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
2027 if (!zsurf) {
2028 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
2029 return;
2030 }
2031 } else
2032 zsurf = NULL;
2033
2034 if (ctx->sub->zsurf != zsurf) {
2035 vrend_surface_reference(&ctx->sub->zsurf, zsurf);
2036 vrend_hw_set_zsurf_texture(ctx);
2037 }
2038
2039 old_num = ctx->sub->nr_cbufs;
2040 ctx->sub->nr_cbufs = nr_cbufs;
2041 ctx->sub->old_nr_cbufs = old_num;
2042
2043 for (i = 0; i < (int)nr_cbufs; i++) {
2044 if (surf_handle[i] != 0) {
2045 surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
2046 if (!surf) {
2047 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
2048 return;
2049 }
2050 } else
2051 surf = NULL;
2052
2053 if (ctx->sub->surf[i] != surf) {
2054 vrend_surface_reference(&ctx->sub->surf[i], surf);
2055 vrend_hw_set_color_surface(ctx, i);
2056 }
2057 }
2058
2059 if (old_num > ctx->sub->nr_cbufs) {
2060 for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
2061 vrend_surface_reference(&ctx->sub->surf[i], NULL);
2062 vrend_hw_set_color_surface(ctx, i);
2063 }
2064 }
2065
2066 /* find a buffer to set fb_height from */
2067 if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
2068 new_height = 0;
2069 new_ibf = false;
2070 } else if (ctx->sub->nr_cbufs == 0) {
2071 new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
2072 new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
2073 }
2074 else {
2075 surf = NULL;
2076 for (i = 0; i < ctx->sub->nr_cbufs; i++) {
2077 if (ctx->sub->surf[i]) {
2078 surf = ctx->sub->surf[i];
2079 break;
2080 }
2081 }
2082 if (surf == NULL) {
2083 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i);
2084 return;
2085 }
2086 new_height = u_minify(surf->texture->base.height0, surf->val0);
2087 new_ibf = surf->texture->y_0_top ? true : false;
2088 }
2089
2090 if (new_height != -1) {
2091 if (ctx->sub->fb_height != (uint32_t)new_height || ctx->sub->inverted_fbo_content != new_ibf) {
2092 ctx->sub->fb_height = new_height;
2093 ctx->sub->inverted_fbo_content = new_ibf;
2094 ctx->sub->scissor_state_dirty = (1 << 0);
2095 ctx->sub->viewport_state_dirty = (1 << 0);
2096 }
2097 }
2098
2099 vrend_hw_emit_framebuffer_state(ctx);
2100
2101 if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
2102 status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
2103 if (status != GL_FRAMEBUFFER_COMPLETE)
2104 fprintf(stderr,"failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
2105 }
2106 ctx->sub->shader_dirty = true;
2107 }
2108
vrend_set_framebuffer_state_no_attach(UNUSED struct vrend_context * ctx,uint32_t width,uint32_t height,uint32_t layers,uint32_t samples)2109 void vrend_set_framebuffer_state_no_attach(UNUSED struct vrend_context *ctx,
2110 uint32_t width, uint32_t height,
2111 uint32_t layers, uint32_t samples)
2112 {
2113 if (has_feature(feat_fb_no_attach)) {
2114 glFramebufferParameteri(GL_FRAMEBUFFER,
2115 GL_FRAMEBUFFER_DEFAULT_WIDTH, width);
2116 glFramebufferParameteri(GL_FRAMEBUFFER,
2117 GL_FRAMEBUFFER_DEFAULT_HEIGHT, height);
2118 glFramebufferParameteri(GL_FRAMEBUFFER,
2119 GL_FRAMEBUFFER_DEFAULT_LAYERS, layers);
2120 glFramebufferParameteri(GL_FRAMEBUFFER,
2121 GL_FRAMEBUFFER_DEFAULT_SAMPLES, samples);
2122 }
2123 }
2124
2125 /*
2126 * if the viewport Y scale factor is > 0 then we are rendering to
2127 * an FBO already so don't need to invert rendering?
2128 */
vrend_set_viewport_states(struct vrend_context * ctx,uint32_t start_slot,uint32_t num_viewports,const struct pipe_viewport_state * state)2129 void vrend_set_viewport_states(struct vrend_context *ctx,
2130 uint32_t start_slot,
2131 uint32_t num_viewports,
2132 const struct pipe_viewport_state *state)
2133 {
2134 /* convert back to glViewport */
2135 GLint x, y;
2136 GLsizei width, height;
2137 GLclampd near_val, far_val;
2138 bool viewport_is_negative = (state[0].scale[1] < 0) ? true : false;
2139 uint i, idx;
2140
2141 if (num_viewports > PIPE_MAX_VIEWPORTS ||
2142 start_slot > (PIPE_MAX_VIEWPORTS - num_viewports)) {
2143 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, num_viewports);
2144 return;
2145 }
2146
2147 for (i = 0; i < num_viewports; i++) {
2148 GLfloat abs_s1 = fabsf(state[i].scale[1]);
2149
2150 idx = start_slot + i;
2151 width = state[i].scale[0] * 2.0f;
2152 height = abs_s1 * 2.0f;
2153 x = state[i].translate[0] - state[i].scale[0];
2154 y = state[i].translate[1] - state[i].scale[1];
2155
2156 near_val = state[i].translate[2] - state[i].scale[2];
2157 far_val = near_val + (state[i].scale[2] * 2.0);
2158
2159 if (ctx->sub->vps[idx].cur_x != x ||
2160 ctx->sub->vps[idx].cur_y != y ||
2161 ctx->sub->vps[idx].width != width ||
2162 ctx->sub->vps[idx].height != height) {
2163 ctx->sub->viewport_state_dirty |= (1 << idx);
2164 ctx->sub->vps[idx].cur_x = x;
2165 ctx->sub->vps[idx].cur_y = y;
2166 ctx->sub->vps[idx].width = width;
2167 ctx->sub->vps[idx].height = height;
2168 }
2169
2170 if (idx == 0) {
2171 if (ctx->sub->viewport_is_negative != viewport_is_negative)
2172 ctx->sub->viewport_is_negative = viewport_is_negative;
2173
2174 ctx->sub->depth_scale = fabsf(far_val - near_val);
2175 ctx->sub->depth_transform = near_val;
2176 }
2177
2178 if (ctx->sub->vps[idx].near_val != near_val ||
2179 ctx->sub->vps[idx].far_val != far_val) {
2180 ctx->sub->vps[idx].near_val = near_val;
2181 ctx->sub->vps[idx].far_val = far_val;
2182
2183 if (vrend_state.use_gles) {
2184 if (near_val < 0.0f || far_val < 0.0f ||
2185 near_val > 1.0f || far_val > 1.0f || idx) {
2186 report_gles_warn(ctx, GLES_WARN_DEPTH_RANGE, 0);
2187 }
2188
2189 /* Best effort despite the warning, gles will clamp. */
2190 glDepthRangef(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
2191 } else if (idx && has_feature(feat_viewport_array))
2192 glDepthRangeIndexed(idx, ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
2193 else
2194 glDepthRange(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
2195 }
2196 }
2197 }
2198
vrend_create_vertex_elements_state(struct vrend_context * ctx,uint32_t handle,unsigned num_elements,const struct pipe_vertex_element * elements)2199 int vrend_create_vertex_elements_state(struct vrend_context *ctx,
2200 uint32_t handle,
2201 unsigned num_elements,
2202 const struct pipe_vertex_element *elements)
2203 {
2204 struct vrend_vertex_element_array *v;
2205 const struct util_format_description *desc;
2206 GLenum type;
2207 uint i;
2208 uint32_t ret_handle;
2209
2210 if (num_elements > PIPE_MAX_ATTRIBS)
2211 return EINVAL;
2212
2213 v = CALLOC_STRUCT(vrend_vertex_element_array);
2214 if (!v)
2215 return ENOMEM;
2216
2217 v->count = num_elements;
2218 for (i = 0; i < num_elements; i++) {
2219 memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
2220
2221 desc = util_format_description(elements[i].src_format);
2222 if (!desc) {
2223 FREE(v);
2224 return EINVAL;
2225 }
2226
2227 type = GL_FALSE;
2228 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) {
2229 if (desc->channel[0].size == 32)
2230 type = GL_FLOAT;
2231 else if (desc->channel[0].size == 64)
2232 type = GL_DOUBLE;
2233 else if (desc->channel[0].size == 16)
2234 type = GL_HALF_FLOAT;
2235 } else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
2236 desc->channel[0].size == 8)
2237 type = GL_UNSIGNED_BYTE;
2238 else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
2239 desc->channel[0].size == 8)
2240 type = GL_BYTE;
2241 else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
2242 desc->channel[0].size == 16)
2243 type = GL_UNSIGNED_SHORT;
2244 else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
2245 desc->channel[0].size == 16)
2246 type = GL_SHORT;
2247 else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
2248 desc->channel[0].size == 32)
2249 type = GL_UNSIGNED_INT;
2250 else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
2251 desc->channel[0].size == 32)
2252 type = GL_INT;
2253 else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SSCALED ||
2254 elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SNORM ||
2255 elements[i].src_format == PIPE_FORMAT_B10G10R10A2_SNORM)
2256 type = GL_INT_2_10_10_10_REV;
2257 else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_USCALED ||
2258 elements[i].src_format == PIPE_FORMAT_R10G10B10A2_UNORM ||
2259 elements[i].src_format == PIPE_FORMAT_B10G10R10A2_UNORM)
2260 type = GL_UNSIGNED_INT_2_10_10_10_REV;
2261 else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
2262 type = GL_UNSIGNED_INT_10F_11F_11F_REV;
2263
2264 if (type == GL_FALSE) {
2265 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format);
2266 FREE(v);
2267 return EINVAL;
2268 }
2269
2270 v->elements[i].type = type;
2271 if (desc->channel[0].normalized)
2272 v->elements[i].norm = GL_TRUE;
2273 if (desc->nr_channels == 4 && desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z)
2274 v->elements[i].nr_chan = GL_BGRA;
2275 else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
2276 v->elements[i].nr_chan = 3;
2277 else
2278 v->elements[i].nr_chan = desc->nr_channels;
2279 }
2280
2281 if (has_feature(feat_gles31_vertex_attrib_binding)) {
2282 glGenVertexArrays(1, &v->id);
2283 glBindVertexArray(v->id);
2284 for (i = 0; i < num_elements; i++) {
2285 struct vrend_vertex_element *ve = &v->elements[i];
2286
2287 if (util_format_is_pure_integer(ve->base.src_format))
2288 glVertexAttribIFormat(i, ve->nr_chan, ve->type, ve->base.src_offset);
2289 else
2290 glVertexAttribFormat(i, ve->nr_chan, ve->type, ve->norm, ve->base.src_offset);
2291 glVertexAttribBinding(i, ve->base.vertex_buffer_index);
2292 glVertexBindingDivisor(i, ve->base.instance_divisor);
2293 glEnableVertexAttribArray(i);
2294 }
2295 }
2296 ret_handle = vrend_renderer_object_insert(ctx, v, sizeof(struct vrend_vertex_element), handle,
2297 VIRGL_OBJECT_VERTEX_ELEMENTS);
2298 if (!ret_handle) {
2299 FREE(v);
2300 return ENOMEM;
2301 }
2302 return 0;
2303 }
2304
vrend_bind_vertex_elements_state(struct vrend_context * ctx,uint32_t handle)2305 void vrend_bind_vertex_elements_state(struct vrend_context *ctx,
2306 uint32_t handle)
2307 {
2308 struct vrend_vertex_element_array *v;
2309
2310 if (!handle) {
2311 ctx->sub->ve = NULL;
2312 return;
2313 }
2314 v = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
2315 if (!v) {
2316 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
2317 return;
2318 }
2319
2320 if (ctx->sub->ve != v)
2321 ctx->sub->vbo_dirty = true;
2322 ctx->sub->ve = v;
2323 }
2324
vrend_set_constants(struct vrend_context * ctx,uint32_t shader,UNUSED uint32_t index,uint32_t num_constant,float * data)2325 void vrend_set_constants(struct vrend_context *ctx,
2326 uint32_t shader,
2327 UNUSED uint32_t index,
2328 uint32_t num_constant,
2329 float *data)
2330 {
2331 struct vrend_constants *consts;
2332 uint i;
2333
2334 consts = &ctx->sub->consts[shader];
2335 ctx->sub->const_dirty[shader] = true;
2336
2337 consts->consts = realloc(consts->consts, num_constant * sizeof(float));
2338 if (!consts->consts)
2339 return;
2340
2341 consts->num_consts = num_constant;
2342 for (i = 0; i < num_constant; i++)
2343 consts->consts[i] = ((unsigned int *)data)[i];
2344 }
2345
vrend_set_uniform_buffer(struct vrend_context * ctx,uint32_t shader,uint32_t index,uint32_t offset,uint32_t length,uint32_t res_handle)2346 void vrend_set_uniform_buffer(struct vrend_context *ctx,
2347 uint32_t shader,
2348 uint32_t index,
2349 uint32_t offset,
2350 uint32_t length,
2351 uint32_t res_handle)
2352 {
2353 struct vrend_resource *res;
2354
2355 if (!has_feature(feat_ubo))
2356 return;
2357
2358 if (res_handle) {
2359 res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
2360
2361 if (!res) {
2362 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
2363 return;
2364 }
2365 vrend_resource_reference((struct vrend_resource **)&ctx->sub->cbs[shader][index].buffer, res);
2366 ctx->sub->cbs[shader][index].buffer_offset = offset;
2367 ctx->sub->cbs[shader][index].buffer_size = length;
2368
2369 ctx->sub->const_bufs_used_mask[shader] |= (1 << index);
2370 } else {
2371 vrend_resource_reference((struct vrend_resource **)&ctx->sub->cbs[shader][index].buffer, NULL);
2372 ctx->sub->cbs[shader][index].buffer_offset = 0;
2373 ctx->sub->cbs[shader][index].buffer_size = 0;
2374 ctx->sub->const_bufs_used_mask[shader] &= ~(1 << index);
2375 }
2376 }
2377
vrend_set_index_buffer(struct vrend_context * ctx,uint32_t res_handle,uint32_t index_size,uint32_t offset)2378 void vrend_set_index_buffer(struct vrend_context *ctx,
2379 uint32_t res_handle,
2380 uint32_t index_size,
2381 uint32_t offset)
2382 {
2383 struct vrend_resource *res;
2384
2385 ctx->sub->ib.index_size = index_size;
2386 ctx->sub->ib.offset = offset;
2387 if (res_handle) {
2388 if (ctx->sub->index_buffer_res_id != res_handle) {
2389 res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
2390 if (!res) {
2391 vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
2392 ctx->sub->index_buffer_res_id = 0;
2393 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
2394 return;
2395 }
2396 vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, res);
2397 ctx->sub->index_buffer_res_id = res_handle;
2398 }
2399 } else {
2400 vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
2401 ctx->sub->index_buffer_res_id = 0;
2402 }
2403 }
2404
vrend_set_single_vbo(struct vrend_context * ctx,int index,uint32_t stride,uint32_t buffer_offset,uint32_t res_handle)2405 void vrend_set_single_vbo(struct vrend_context *ctx,
2406 int index,
2407 uint32_t stride,
2408 uint32_t buffer_offset,
2409 uint32_t res_handle)
2410 {
2411 struct vrend_resource *res;
2412
2413 if (ctx->sub->vbo[index].stride != stride ||
2414 ctx->sub->vbo[index].buffer_offset != buffer_offset ||
2415 ctx->sub->vbo_res_ids[index] != res_handle)
2416 ctx->sub->vbo_dirty = true;
2417
2418 ctx->sub->vbo[index].stride = stride;
2419 ctx->sub->vbo[index].buffer_offset = buffer_offset;
2420
2421 if (res_handle == 0) {
2422 vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, NULL);
2423 ctx->sub->vbo_res_ids[index] = 0;
2424 } else if (ctx->sub->vbo_res_ids[index] != res_handle) {
2425 res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
2426 if (!res) {
2427 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
2428 ctx->sub->vbo_res_ids[index] = 0;
2429 return;
2430 }
2431 vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, res);
2432 ctx->sub->vbo_res_ids[index] = res_handle;
2433 }
2434 }
2435
vrend_set_num_vbo(struct vrend_context * ctx,int num_vbo)2436 void vrend_set_num_vbo(struct vrend_context *ctx,
2437 int num_vbo)
2438 {
2439 int old_num = ctx->sub->num_vbos;
2440 int i;
2441
2442 ctx->sub->num_vbos = num_vbo;
2443 ctx->sub->old_num_vbos = old_num;
2444
2445 if (old_num != num_vbo)
2446 ctx->sub->vbo_dirty = true;
2447
2448 for (i = num_vbo; i < old_num; i++) {
2449 vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].buffer, NULL);
2450 ctx->sub->vbo_res_ids[i] = 0;
2451 }
2452
2453 }
2454
vrend_set_single_sampler_view(struct vrend_context * ctx,uint32_t shader_type,uint32_t index,uint32_t handle)2455 void vrend_set_single_sampler_view(struct vrend_context *ctx,
2456 uint32_t shader_type,
2457 uint32_t index,
2458 uint32_t handle)
2459 {
2460 struct vrend_sampler_view *view = NULL;
2461 struct vrend_texture *tex;
2462
2463 if (handle) {
2464 view = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SAMPLER_VIEW);
2465 if (!view) {
2466 ctx->sub->views[shader_type].views[index] = NULL;
2467 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
2468 return;
2469 }
2470 if (ctx->sub->views[shader_type].views[index] == view) {
2471 return;
2472 }
2473 /* we should have a reference to this texture taken at create time */
2474 tex = (struct vrend_texture *)view->texture;
2475 if (!tex) {
2476 return;
2477 }
2478 if (!view->texture->is_buffer) {
2479 glBindTexture(view->target, view->id);
2480
2481 if (util_format_is_depth_or_stencil(view->format)) {
2482 if (vrend_state.use_core_profile == false) {
2483 /* setting depth texture mode is deprecated in core profile */
2484 if (view->depth_texture_mode != GL_RED) {
2485 glTexParameteri(view->texture->target, GL_DEPTH_TEXTURE_MODE, GL_RED);
2486 view->depth_texture_mode = GL_RED;
2487 }
2488 }
2489 if (has_feature(feat_stencil_texturing)) {
2490 const struct util_format_description *desc = util_format_description(view->format);
2491 if (!util_format_has_depth(desc)) {
2492 glTexParameteri(view->texture->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX);
2493 } else {
2494 glTexParameteri(view->texture->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_DEPTH_COMPONENT);
2495 }
2496 }
2497 }
2498
2499 if (view->cur_base != (view->val1 & 0xff)) {
2500 view->cur_base = view->val1 & 0xff;
2501 glTexParameteri(view->texture->target, GL_TEXTURE_BASE_LEVEL, view->cur_base);
2502 }
2503 if (view->cur_max != ((view->val1 >> 8) & 0xff)) {
2504 view->cur_max = (view->val1 >> 8) & 0xff;
2505 glTexParameteri(view->texture->target, GL_TEXTURE_MAX_LEVEL, view->cur_max);
2506 }
2507 if (view->cur_swizzle_r != view->gl_swizzle_r) {
2508 glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R, view->gl_swizzle_r);
2509 view->cur_swizzle_r = view->gl_swizzle_r;
2510 }
2511 if (view->cur_swizzle_g != view->gl_swizzle_g) {
2512 glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_G, view->gl_swizzle_g);
2513 view->cur_swizzle_g = view->gl_swizzle_g;
2514 }
2515 if (view->cur_swizzle_b != view->gl_swizzle_b) {
2516 glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_B, view->gl_swizzle_b);
2517 view->cur_swizzle_b = view->gl_swizzle_b;
2518 }
2519 if (view->cur_swizzle_a != view->gl_swizzle_a) {
2520 glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_A, view->gl_swizzle_a);
2521 view->cur_swizzle_a = view->gl_swizzle_a;
2522 }
2523 if (view->cur_srgb_decode != view->srgb_decode && util_format_is_srgb(view->format)) {
2524 if (has_feature(feat_samplers))
2525 ctx->sub->sampler_state_dirty = true;
2526 else if (has_feature(feat_texture_srgb_decode)) {
2527 glTexParameteri(view->texture->target, GL_TEXTURE_SRGB_DECODE_EXT,
2528 view->srgb_decode);
2529 view->cur_srgb_decode = view->srgb_decode;
2530 }
2531 }
2532 } else {
2533 GLenum internalformat;
2534
2535 if (!view->texture->tbo_tex_id)
2536 glGenTextures(1, &view->texture->tbo_tex_id);
2537
2538 glBindTexture(GL_TEXTURE_BUFFER, view->texture->tbo_tex_id);
2539 internalformat = tex_conv_table[view->format].internalformat;
2540 if (has_feature(feat_texture_buffer_range)) {
2541 unsigned offset = view->val0;
2542 unsigned size = view->val1 - view->val0 + 1;
2543 int blsize = util_format_get_blocksize(view->format);
2544
2545 offset *= blsize;
2546 size *= blsize;
2547 glTexBufferRange(GL_TEXTURE_BUFFER, internalformat, view->texture->id, offset, size);
2548 } else
2549 glTexBuffer(GL_TEXTURE_BUFFER, internalformat, view->texture->id);
2550 }
2551 }
2552
2553 vrend_sampler_view_reference(&ctx->sub->views[shader_type].views[index], view);
2554 }
2555
vrend_set_num_sampler_views(struct vrend_context * ctx,uint32_t shader_type,uint32_t start_slot,int num_sampler_views)2556 void vrend_set_num_sampler_views(struct vrend_context *ctx,
2557 uint32_t shader_type,
2558 uint32_t start_slot,
2559 int num_sampler_views)
2560 {
2561 int last_slot = start_slot + num_sampler_views;
2562 int i;
2563
2564 for (i = last_slot; i < ctx->sub->views[shader_type].num_views; i++)
2565 vrend_sampler_view_reference(&ctx->sub->views[shader_type].views[i], NULL);
2566
2567 ctx->sub->views[shader_type].num_views = last_slot;
2568 }
2569
vrend_set_single_image_view(struct vrend_context * ctx,uint32_t shader_type,int index,uint32_t format,uint32_t access,uint32_t layer_offset,uint32_t level_size,uint32_t handle)2570 void vrend_set_single_image_view(struct vrend_context *ctx,
2571 uint32_t shader_type,
2572 int index,
2573 uint32_t format, uint32_t access,
2574 uint32_t layer_offset, uint32_t level_size,
2575 uint32_t handle)
2576 {
2577 struct vrend_image_view *iview = &ctx->sub->image_views[shader_type][index];
2578 struct vrend_resource *res;
2579
2580 if (!has_feature(feat_images))
2581 return;
2582
2583 if (handle) {
2584 res = vrend_renderer_ctx_res_lookup(ctx, handle);
2585 if (!res) {
2586 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle);
2587 return;
2588 }
2589 iview->texture = res;
2590 iview->format = tex_conv_table[format].internalformat;
2591 iview->access = access;
2592 iview->u.buf.offset = layer_offset;
2593 iview->u.buf.size = level_size;
2594 ctx->sub->images_used_mask[shader_type] |= (1 << index);
2595 } else {
2596 iview->texture = NULL;
2597 iview->format = 0;
2598 ctx->sub->images_used_mask[shader_type] &= ~(1 << index);
2599 }
2600 }
2601
vrend_set_single_ssbo(struct vrend_context * ctx,uint32_t shader_type,int index,uint32_t offset,uint32_t length,uint32_t handle)2602 void vrend_set_single_ssbo(struct vrend_context *ctx,
2603 uint32_t shader_type,
2604 int index,
2605 uint32_t offset, uint32_t length,
2606 uint32_t handle)
2607 {
2608 struct vrend_ssbo *ssbo = &ctx->sub->ssbo[shader_type][index];
2609 struct vrend_resource *res;
2610
2611 if (!has_feature(feat_ssbo))
2612 return;
2613
2614 if (handle) {
2615 res = vrend_renderer_ctx_res_lookup(ctx, handle);
2616 if (!res) {
2617 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle);
2618 return;
2619 }
2620 ssbo->res = res;
2621 ssbo->buffer_offset = offset;
2622 ssbo->buffer_size = length;
2623 ctx->sub->ssbo_used_mask[shader_type] |= (1 << index);
2624 } else {
2625 ssbo->res = 0;
2626 ssbo->buffer_offset = 0;
2627 ssbo->buffer_size = 0;
2628 ctx->sub->ssbo_used_mask[shader_type] &= ~(1 << index);
2629 }
2630 }
2631
vrend_memory_barrier(UNUSED struct vrend_context * ctx,unsigned flags)2632 void vrend_memory_barrier(UNUSED struct vrend_context *ctx,
2633 unsigned flags)
2634 {
2635 GLbitfield gl_barrier = 0;
2636
2637 if (!has_feature(feat_barrier))
2638 return;
2639
2640 if ((flags & PIPE_BARRIER_ALL) == PIPE_BARRIER_ALL)
2641 gl_barrier = GL_ALL_BARRIER_BITS;
2642 else {
2643 if (flags & PIPE_BARRIER_VERTEX_BUFFER)
2644 gl_barrier |= GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT;
2645 if (flags & PIPE_BARRIER_INDEX_BUFFER)
2646 gl_barrier |= GL_ELEMENT_ARRAY_BARRIER_BIT;
2647 if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
2648 gl_barrier |= GL_UNIFORM_BARRIER_BIT;
2649 if (flags & PIPE_BARRIER_TEXTURE)
2650 gl_barrier |= GL_TEXTURE_FETCH_BARRIER_BIT | GL_PIXEL_BUFFER_BARRIER_BIT;
2651 if (flags & PIPE_BARRIER_IMAGE)
2652 gl_barrier |= GL_SHADER_IMAGE_ACCESS_BARRIER_BIT;
2653 if (flags & PIPE_BARRIER_INDIRECT_BUFFER)
2654 gl_barrier |= GL_COMMAND_BARRIER_BIT;
2655 if (flags & PIPE_BARRIER_MAPPED_BUFFER)
2656 gl_barrier |= GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT;
2657 if (flags & PIPE_BARRIER_FRAMEBUFFER)
2658 gl_barrier |= GL_FRAMEBUFFER_BARRIER_BIT;
2659 if (flags & PIPE_BARRIER_STREAMOUT_BUFFER)
2660 gl_barrier |= GL_TRANSFORM_FEEDBACK_BARRIER_BIT;
2661 if (flags & PIPE_BARRIER_SHADER_BUFFER) {
2662 gl_barrier |= GL_ATOMIC_COUNTER_BARRIER_BIT;
2663 if (has_feature(feat_ssbo_barrier))
2664 gl_barrier |= GL_SHADER_STORAGE_BARRIER_BIT;
2665 }
2666 }
2667 glMemoryBarrier(gl_barrier);
2668 }
2669
vrend_texture_barrier(UNUSED struct vrend_context * ctx,unsigned flags)2670 void vrend_texture_barrier(UNUSED struct vrend_context *ctx,
2671 unsigned flags)
2672 {
2673 if (!has_feature(feat_texture_barrier))
2674 return;
2675
2676 if (flags == PIPE_TEXTURE_BARRIER_SAMPLER)
2677 glTextureBarrier();
2678 }
2679
vrend_destroy_shader_object(void * obj_ptr)2680 static void vrend_destroy_shader_object(void *obj_ptr)
2681 {
2682 struct vrend_shader_selector *state = obj_ptr;
2683
2684 vrend_shader_state_reference(&state, NULL);
2685 }
2686
vrend_fill_shader_key(struct vrend_context * ctx,unsigned type,struct vrend_shader_key * key)2687 static inline void vrend_fill_shader_key(struct vrend_context *ctx,
2688 unsigned type,
2689 struct vrend_shader_key *key)
2690 {
2691 if (vrend_state.use_core_profile == true) {
2692 int i;
2693 bool add_alpha_test = true;
2694 key->cbufs_are_a8_bitmask = 0;
2695 for (i = 0; i < ctx->sub->nr_cbufs; i++) {
2696 if (!ctx->sub->surf[i])
2697 continue;
2698 if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format))
2699 key->cbufs_are_a8_bitmask |= (1 << i);
2700 if (util_format_is_pure_integer(ctx->sub->surf[i]->format))
2701 add_alpha_test = false;
2702 }
2703 if (add_alpha_test) {
2704 key->add_alpha_test = ctx->sub->dsa_state.alpha.enabled;
2705 key->alpha_test = ctx->sub->dsa_state.alpha.func;
2706 key->alpha_ref_val = ctx->sub->dsa_state.alpha.ref_value;
2707 }
2708
2709 key->pstipple_tex = ctx->sub->rs_state.poly_stipple_enable;
2710 key->color_two_side = ctx->sub->rs_state.light_twoside;
2711
2712 key->clip_plane_enable = ctx->sub->rs_state.clip_plane_enable;
2713 key->flatshade = ctx->sub->rs_state.flatshade ? true : false;
2714 } else {
2715 key->add_alpha_test = 0;
2716 key->pstipple_tex = 0;
2717 }
2718 key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
2719 key->coord_replace = ctx->sub->rs_state.point_quad_rasterization ? ctx->sub->rs_state.sprite_coord_enable : 0;
2720
2721 if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
2722 key->gs_present = true;
2723 if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
2724 key->tcs_present = true;
2725 if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
2726 key->tes_present = true;
2727
2728 int prev_type = -1;
2729
2730 switch (type) {
2731 case PIPE_SHADER_GEOMETRY:
2732 if (key->tcs_present || key->tes_present)
2733 prev_type = PIPE_SHADER_TESS_EVAL;
2734 else
2735 prev_type = PIPE_SHADER_VERTEX;
2736 break;
2737 case PIPE_SHADER_FRAGMENT:
2738 if (key->gs_present)
2739 prev_type = PIPE_SHADER_GEOMETRY;
2740 else if (key->tcs_present || key->tes_present)
2741 prev_type = PIPE_SHADER_TESS_EVAL;
2742 else
2743 prev_type = PIPE_SHADER_VERTEX;
2744 break;
2745 case PIPE_SHADER_TESS_EVAL:
2746 prev_type = PIPE_SHADER_TESS_CTRL;
2747 break;
2748 case PIPE_SHADER_TESS_CTRL:
2749 prev_type = PIPE_SHADER_VERTEX;
2750 break;
2751 default:
2752 break;
2753 }
2754 if (prev_type != -1 && ctx->sub->shaders[prev_type]) {
2755 key->prev_stage_pervertex_out = ctx->sub->shaders[prev_type]->sinfo.has_pervertex_out;
2756 key->prev_stage_num_clip_out = ctx->sub->shaders[prev_type]->sinfo.num_clip_out;
2757 key->prev_stage_num_cull_out = ctx->sub->shaders[prev_type]->sinfo.num_cull_out;
2758 key->num_indirect_generic_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_generic_outputs;
2759 key->num_indirect_patch_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_patch_outputs;
2760 }
2761
2762 int next_type = -1;
2763 switch (type) {
2764 case PIPE_SHADER_VERTEX:
2765 if (key->tcs_present)
2766 next_type = PIPE_SHADER_TESS_CTRL;
2767 else if (key->gs_present)
2768 next_type = PIPE_SHADER_GEOMETRY;
2769 else
2770 next_type = PIPE_SHADER_FRAGMENT;
2771 break;
2772 case PIPE_SHADER_TESS_CTRL:
2773 next_type = PIPE_SHADER_TESS_EVAL;
2774 break;
2775 case PIPE_SHADER_GEOMETRY:
2776 next_type = PIPE_SHADER_FRAGMENT;
2777 break;
2778 case PIPE_SHADER_TESS_EVAL:
2779 if (key->gs_present)
2780 next_type = PIPE_SHADER_GEOMETRY;
2781 else
2782 next_type = PIPE_SHADER_FRAGMENT;
2783 default:
2784 break;
2785 }
2786
2787 if (next_type != -1 && ctx->sub->shaders[next_type]) {
2788 key->num_indirect_generic_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_generic_inputs;
2789 key->num_indirect_patch_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_patch_inputs;
2790 }
2791 }
2792
conv_shader_type(int type)2793 static inline int conv_shader_type(int type)
2794 {
2795 switch (type) {
2796 case PIPE_SHADER_VERTEX: return GL_VERTEX_SHADER;
2797 case PIPE_SHADER_FRAGMENT: return GL_FRAGMENT_SHADER;
2798 case PIPE_SHADER_GEOMETRY: return GL_GEOMETRY_SHADER;
2799 case PIPE_SHADER_TESS_CTRL: return GL_TESS_CONTROL_SHADER;
2800 case PIPE_SHADER_TESS_EVAL: return GL_TESS_EVALUATION_SHADER;
2801 case PIPE_SHADER_COMPUTE: return GL_COMPUTE_SHADER;
2802 default:
2803 return 0;
2804 };
2805 }
2806
vrend_shader_create(struct vrend_context * ctx,struct vrend_shader * shader,struct vrend_shader_key key)2807 static int vrend_shader_create(struct vrend_context *ctx,
2808 struct vrend_shader *shader,
2809 struct vrend_shader_key key)
2810 {
2811
2812 if (!shader->sel->tokens) {
2813 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
2814 return -1;
2815 }
2816
2817 shader->id = glCreateShader(conv_shader_type(shader->sel->type));
2818 shader->compiled_fs_id = 0;
2819 shader->glsl_prog = vrend_convert_shader(&ctx->shader_cfg, shader->sel->tokens, shader->sel->req_local_mem, &key, &shader->sel->sinfo);
2820 if (!shader->glsl_prog) {
2821 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
2822 glDeleteShader(shader->id);
2823 return -1;
2824 }
2825 shader->key = key;
2826 if (1) {//shader->sel->type == PIPE_SHADER_FRAGMENT || shader->sel->type == PIPE_SHADER_GEOMETRY) {
2827 bool ret;
2828
2829 ret = vrend_compile_shader(ctx, shader);
2830 if (ret == false) {
2831 glDeleteShader(shader->id);
2832 free(shader->glsl_prog);
2833 return -1;
2834 }
2835 }
2836 return 0;
2837 }
2838
vrend_shader_select(struct vrend_context * ctx,struct vrend_shader_selector * sel,bool * dirty)2839 static int vrend_shader_select(struct vrend_context *ctx,
2840 struct vrend_shader_selector *sel,
2841 bool *dirty)
2842 {
2843 struct vrend_shader_key key;
2844 struct vrend_shader *shader = NULL;
2845 int r;
2846
2847 memset(&key, 0, sizeof(key));
2848 vrend_fill_shader_key(ctx, sel->type, &key);
2849
2850 if (sel->current && !memcmp(&sel->current->key, &key, sizeof(key)))
2851 return 0;
2852
2853 if (sel->num_shaders > 1) {
2854 struct vrend_shader *p = sel->current, *c = p->next_variant;
2855 while (c && memcmp(&c->key, &key, sizeof(key)) != 0) {
2856 p = c;
2857 c = c->next_variant;
2858 }
2859 if (c) {
2860 p->next_variant = c->next_variant;
2861 shader = c;
2862 }
2863 }
2864
2865 if (!shader) {
2866 shader = CALLOC_STRUCT(vrend_shader);
2867 shader->sel = sel;
2868 list_inithead(&shader->programs);
2869
2870 r = vrend_shader_create(ctx, shader, key);
2871 if (r) {
2872 sel->current = NULL;
2873 FREE(shader);
2874 return r;
2875 }
2876 sel->num_shaders++;
2877 }
2878 if (dirty)
2879 *dirty = true;
2880
2881 shader->next_variant = sel->current;
2882 sel->current = shader;
2883 return 0;
2884 }
2885
vrend_create_shader_state(UNUSED struct vrend_context * ctx,const struct pipe_stream_output_info * so_info,uint32_t req_local_mem,unsigned pipe_shader_type)2886 static void *vrend_create_shader_state(UNUSED struct vrend_context *ctx,
2887 const struct pipe_stream_output_info *so_info,
2888 uint32_t req_local_mem,
2889 unsigned pipe_shader_type)
2890 {
2891 struct vrend_shader_selector *sel = CALLOC_STRUCT(vrend_shader_selector);
2892
2893 if (!sel)
2894 return NULL;
2895
2896 sel->req_local_mem = req_local_mem;
2897 sel->type = pipe_shader_type;
2898 sel->sinfo.so_info = *so_info;
2899 pipe_reference_init(&sel->reference, 1);
2900
2901 return sel;
2902 }
2903
vrend_finish_shader(struct vrend_context * ctx,struct vrend_shader_selector * sel,const struct tgsi_token * tokens)2904 static int vrend_finish_shader(struct vrend_context *ctx,
2905 struct vrend_shader_selector *sel,
2906 const struct tgsi_token *tokens)
2907 {
2908 int r;
2909
2910 sel->tokens = tgsi_dup_tokens(tokens);
2911
2912 r = vrend_shader_select(ctx, sel, NULL);
2913 if (r) {
2914 return EINVAL;
2915 }
2916 return 0;
2917 }
2918
vrend_create_shader(struct vrend_context * ctx,uint32_t handle,const struct pipe_stream_output_info * so_info,uint32_t req_local_mem,const char * shd_text,uint32_t offlen,uint32_t num_tokens,uint32_t type,uint32_t pkt_length)2919 int vrend_create_shader(struct vrend_context *ctx,
2920 uint32_t handle,
2921 const struct pipe_stream_output_info *so_info,
2922 uint32_t req_local_mem,
2923 const char *shd_text, uint32_t offlen, uint32_t num_tokens,
2924 uint32_t type, uint32_t pkt_length)
2925 {
2926 struct vrend_shader_selector *sel = NULL;
2927 int ret_handle;
2928 bool new_shader = true, long_shader = false;
2929 bool finished = false;
2930 int ret;
2931
2932 if (type > PIPE_SHADER_COMPUTE)
2933 return EINVAL;
2934
2935 if (!has_feature(feat_geometry_shader) &&
2936 type == PIPE_SHADER_GEOMETRY)
2937 return EINVAL;
2938
2939 if (!has_feature(feat_tessellation) &&
2940 (type == PIPE_SHADER_TESS_CTRL ||
2941 type == PIPE_SHADER_TESS_EVAL))
2942 return EINVAL;
2943
2944 if (!has_feature(feat_compute_shader) &&
2945 type == PIPE_SHADER_COMPUTE)
2946 return EINVAL;
2947
2948 if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT)
2949 new_shader = false;
2950 else if (((offlen + 3) / 4) > pkt_length)
2951 long_shader = true;
2952
2953 /* if we have an in progress one - don't allow a new shader
2954 of that type or a different handle. */
2955 if (ctx->sub->long_shader_in_progress_handle[type]) {
2956 if (new_shader == true)
2957 return EINVAL;
2958 if (handle != ctx->sub->long_shader_in_progress_handle[type])
2959 return EINVAL;
2960 }
2961
2962 if (new_shader) {
2963 sel = vrend_create_shader_state(ctx, so_info, req_local_mem, type);
2964 if (sel == NULL)
2965 return ENOMEM;
2966
2967 if (long_shader) {
2968 sel->buf_len = ((offlen + 3) / 4) * 4; /* round up buffer size */
2969 sel->tmp_buf = malloc(sel->buf_len);
2970 if (!sel->tmp_buf) {
2971 ret = ENOMEM;
2972 goto error;
2973 }
2974 memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
2975 sel->buf_offset = pkt_length * 4;
2976 ctx->sub->long_shader_in_progress_handle[type] = handle;
2977 } else
2978 finished = true;
2979 } else {
2980 sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
2981 if (!sel) {
2982 fprintf(stderr, "got continuation without original shader %d\n", handle);
2983 ret = EINVAL;
2984 goto error;
2985 }
2986
2987 offlen &= ~VIRGL_OBJ_SHADER_OFFSET_CONT;
2988 if (offlen != sel->buf_offset) {
2989 fprintf(stderr, "Got mismatched shader continuation %d vs %d\n",
2990 offlen, sel->buf_offset);
2991 ret = EINVAL;
2992 goto error;
2993 }
2994
2995 /*make sure no overflow */
2996 if (pkt_length * 4 < pkt_length ||
2997 pkt_length * 4 + sel->buf_offset < pkt_length * 4 ||
2998 pkt_length * 4 + sel->buf_offset < sel->buf_offset) {
2999 ret = EINVAL;
3000 goto error;
3001 }
3002
3003 if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) {
3004 fprintf(stderr, "Got too large shader continuation %d vs %d\n",
3005 pkt_length * 4 + sel->buf_offset, sel->buf_len);
3006 ret = EINVAL;
3007 goto error;
3008 }
3009
3010 memcpy(sel->tmp_buf + sel->buf_offset, shd_text, pkt_length * 4);
3011
3012 sel->buf_offset += pkt_length * 4;
3013 if (sel->buf_offset >= sel->buf_len) {
3014 finished = true;
3015 shd_text = sel->tmp_buf;
3016 }
3017 }
3018
3019 if (finished) {
3020 struct tgsi_token *tokens;
3021
3022 tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token));
3023 if (!tokens) {
3024 ret = ENOMEM;
3025 goto error;
3026 }
3027
3028 if (vrend_dump_shaders)
3029 fprintf(stderr,"shader\n%s\n", shd_text);
3030 if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10)) {
3031 free(tokens);
3032 ret = EINVAL;
3033 goto error;
3034 }
3035
3036 if (vrend_finish_shader(ctx, sel, tokens)) {
3037 free(tokens);
3038 ret = EINVAL;
3039 goto error;
3040 } else {
3041 free(sel->tmp_buf);
3042 sel->tmp_buf = NULL;
3043 }
3044 free(tokens);
3045 ctx->sub->long_shader_in_progress_handle[type] = 0;
3046 }
3047
3048 if (new_shader) {
3049 ret_handle = vrend_renderer_object_insert(ctx, sel, sizeof(*sel), handle, VIRGL_OBJECT_SHADER);
3050 if (ret_handle == 0) {
3051 ret = ENOMEM;
3052 goto error;
3053 }
3054 }
3055
3056 return 0;
3057
3058 error:
3059 if (new_shader)
3060 vrend_destroy_shader_selector(sel);
3061 else
3062 vrend_renderer_object_destroy(ctx, handle);
3063
3064 return ret;
3065 }
3066
vrend_bind_shader(struct vrend_context * ctx,uint32_t handle,uint32_t type)3067 void vrend_bind_shader(struct vrend_context *ctx,
3068 uint32_t handle, uint32_t type)
3069 {
3070 struct vrend_shader_selector *sel;
3071
3072 if (type > PIPE_SHADER_COMPUTE)
3073 return;
3074
3075 if (handle == 0) {
3076 if (type == PIPE_SHADER_COMPUTE)
3077 ctx->sub->cs_shader_dirty = true;
3078 else
3079 ctx->sub->shader_dirty = true;
3080 vrend_shader_state_reference(&ctx->sub->shaders[type], NULL);
3081 return;
3082 }
3083
3084 sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
3085 if (!sel)
3086 return;
3087
3088 if (sel->type != type)
3089 return;
3090
3091 if (ctx->sub->shaders[sel->type] != sel) {
3092 if (type == PIPE_SHADER_COMPUTE)
3093 ctx->sub->cs_shader_dirty = true;
3094 else
3095 ctx->sub->shader_dirty = true;
3096 ctx->sub->prog_ids[sel->type] = 0;
3097 }
3098
3099 vrend_shader_state_reference(&ctx->sub->shaders[sel->type], sel);
3100 }
3101
vrend_clear(struct vrend_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)3102 void vrend_clear(struct vrend_context *ctx,
3103 unsigned buffers,
3104 const union pipe_color_union *color,
3105 double depth, unsigned stencil)
3106 {
3107 GLbitfield bits = 0;
3108
3109 if (ctx->in_error)
3110 return;
3111
3112 if (ctx->ctx_switch_pending)
3113 vrend_finish_context_switch(ctx);
3114
3115 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
3116
3117 vrend_update_frontface_state(ctx);
3118 if (ctx->sub->stencil_state_dirty)
3119 vrend_update_stencil_state(ctx);
3120 if (ctx->sub->scissor_state_dirty)
3121 vrend_update_scissor_state(ctx);
3122 if (ctx->sub->viewport_state_dirty)
3123 vrend_update_viewport_state(ctx);
3124
3125 vrend_use_program(ctx, 0);
3126
3127 if (buffers & PIPE_CLEAR_COLOR) {
3128 if (ctx->sub->nr_cbufs && ctx->sub->surf[0] && vrend_format_is_emulated_alpha(ctx->sub->surf[0]->format)) {
3129 glClearColor(color->f[3], 0.0, 0.0, 0.0);
3130 } else {
3131 glClearColor(color->f[0], color->f[1], color->f[2], color->f[3]);
3132 }
3133
3134 /* This function implements Gallium's full clear callback (st->pipe->clear) on the host. This
3135 callback requires no color component be masked. We must unmask all components before
3136 calling glClear* and restore the previous colormask afterwards, as Gallium expects. */
3137 if (ctx->sub->hw_blend_state.independent_blend_enable &&
3138 has_feature(feat_indep_blend)) {
3139 int i;
3140 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
3141 glColorMaskIndexedEXT(i, GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
3142 } else
3143 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
3144 }
3145
3146 if (buffers & PIPE_CLEAR_DEPTH) {
3147 /* gallium clears don't respect depth mask */
3148 glDepthMask(GL_TRUE);
3149 if (vrend_state.use_gles) {
3150 if (0.0f < depth && depth > 1.0f) {
3151 // Only warn, it is clamped by the function.
3152 report_gles_warn(ctx, GLES_WARN_DEPTH_CLEAR, 0);
3153 }
3154 glClearDepthf(depth);
3155 } else {
3156 glClearDepth(depth);
3157 }
3158 }
3159
3160 if (buffers & PIPE_CLEAR_STENCIL) {
3161 glStencilMask(~0u);
3162 glClearStencil(stencil);
3163 }
3164
3165 if (ctx->sub->hw_rs_state.rasterizer_discard)
3166 glDisable(GL_RASTERIZER_DISCARD);
3167
3168 if (buffers & PIPE_CLEAR_COLOR) {
3169 uint32_t mask = 0;
3170 int i;
3171 for (i = 0; i < ctx->sub->nr_cbufs; i++) {
3172 if (ctx->sub->surf[i])
3173 mask |= (1 << i);
3174 }
3175 if (mask != (buffers >> 2)) {
3176 mask = buffers >> 2;
3177 while (mask) {
3178 i = u_bit_scan(&mask);
3179 if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_uint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
3180 glClearBufferuiv(GL_COLOR,
3181 i, (GLuint *)color);
3182 else if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_sint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
3183 glClearBufferiv(GL_COLOR,
3184 i, (GLint *)color);
3185 else
3186 glClearBufferfv(GL_COLOR,
3187 i, (GLfloat *)color);
3188 }
3189 }
3190 else
3191 bits |= GL_COLOR_BUFFER_BIT;
3192 }
3193 if (buffers & PIPE_CLEAR_DEPTH)
3194 bits |= GL_DEPTH_BUFFER_BIT;
3195 if (buffers & PIPE_CLEAR_STENCIL)
3196 bits |= GL_STENCIL_BUFFER_BIT;
3197
3198 if (bits)
3199 glClear(bits);
3200
3201 /* Is it really necessary to restore the old states? The only reason we
3202 * get here is because the guest cleared all those states but gallium
3203 * didn't forward them before calling the clear command
3204 */
3205 if (ctx->sub->hw_rs_state.rasterizer_discard)
3206 glEnable(GL_RASTERIZER_DISCARD);
3207
3208 if (buffers & PIPE_CLEAR_DEPTH) {
3209 if (!ctx->sub->dsa_state.depth.writemask)
3210 glDepthMask(GL_FALSE);
3211 }
3212
3213 /* Restore previous stencil buffer write masks for both front and back faces */
3214 if (buffers & PIPE_CLEAR_STENCIL) {
3215 glStencilMaskSeparate(GL_FRONT, ctx->sub->dsa_state.stencil[0].writemask);
3216 glStencilMaskSeparate(GL_BACK, ctx->sub->dsa_state.stencil[1].writemask);
3217 }
3218
3219 /* Restore previous colormask */
3220 if (buffers & PIPE_CLEAR_COLOR) {
3221 if (ctx->sub->hw_blend_state.independent_blend_enable &&
3222 has_feature(feat_indep_blend)) {
3223 int i;
3224 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
3225 struct pipe_blend_state *blend = &ctx->sub->hw_blend_state;
3226 glColorMaskIndexedEXT(i, blend->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
3227 blend->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
3228 blend->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
3229 blend->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
3230 }
3231 } else {
3232 glColorMask(ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
3233 ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
3234 ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
3235 ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
3236 }
3237 }
3238 }
3239
vrend_update_scissor_state(struct vrend_context * ctx)3240 static void vrend_update_scissor_state(struct vrend_context *ctx)
3241 {
3242 struct pipe_scissor_state *ss;
3243 struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
3244 GLint y;
3245 GLuint idx;
3246 unsigned mask = ctx->sub->scissor_state_dirty;
3247
3248 if (state->scissor)
3249 glEnable(GL_SCISSOR_TEST);
3250 else
3251 glDisable(GL_SCISSOR_TEST);
3252
3253 while (mask) {
3254 idx = u_bit_scan(&mask);
3255 if (idx >= PIPE_MAX_VIEWPORTS) {
3256 vrend_report_buffer_error(ctx, 0);
3257 break;
3258 }
3259 ss = &ctx->sub->ss[idx];
3260 if (ctx->sub->viewport_is_negative)
3261 y = ss->miny;
3262 else
3263 y = ss->miny;
3264
3265 if (idx > 0 && has_feature(feat_viewport_array))
3266 glScissorIndexed(idx, ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny);
3267 else
3268 glScissor(ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny);
3269 }
3270 ctx->sub->scissor_state_dirty = 0;
3271 }
3272
vrend_update_viewport_state(struct vrend_context * ctx)3273 static void vrend_update_viewport_state(struct vrend_context *ctx)
3274 {
3275 GLint cy;
3276 unsigned mask = ctx->sub->viewport_state_dirty;
3277 int idx;
3278 while (mask) {
3279 idx = u_bit_scan(&mask);
3280
3281 if (ctx->sub->viewport_is_negative)
3282 cy = ctx->sub->vps[idx].cur_y - ctx->sub->vps[idx].height;
3283 else
3284 cy = ctx->sub->vps[idx].cur_y;
3285 if (idx > 0 && has_feature(feat_viewport_array))
3286 glViewportIndexedf(idx, ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
3287 else
3288 glViewport(ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
3289 }
3290
3291 ctx->sub->viewport_state_dirty = 0;
3292 }
3293
get_gs_xfb_mode(GLenum mode)3294 static GLenum get_gs_xfb_mode(GLenum mode)
3295 {
3296 switch (mode) {
3297 case GL_POINTS:
3298 return GL_POINTS;
3299 case GL_LINE_STRIP:
3300 return GL_LINES;
3301 case GL_TRIANGLE_STRIP:
3302 return GL_TRIANGLES;
3303 default:
3304 fprintf(stderr, "illegal gs transform feedback mode %d\n", mode);
3305 return GL_POINTS;
3306 }
3307 }
3308
get_tess_xfb_mode(int mode,bool is_point_mode)3309 static GLenum get_tess_xfb_mode(int mode, bool is_point_mode)
3310 {
3311 if (is_point_mode)
3312 return GL_POINTS;
3313 switch (mode) {
3314 case GL_QUADS:
3315 case GL_TRIANGLES:
3316 return GL_TRIANGLES;
3317 case GL_LINES:
3318 return GL_LINES;
3319 default:
3320 fprintf(stderr, "illegal gs transform feedback mode %d\n", mode);
3321 return GL_POINTS;
3322 }
3323 }
3324
get_xfb_mode(GLenum mode)3325 static GLenum get_xfb_mode(GLenum mode)
3326 {
3327 switch (mode) {
3328 case GL_POINTS:
3329 return GL_POINTS;
3330 case GL_TRIANGLES:
3331 case GL_TRIANGLE_STRIP:
3332 case GL_TRIANGLE_FAN:
3333 case GL_QUADS:
3334 case GL_QUAD_STRIP:
3335 case GL_POLYGON:
3336 return GL_TRIANGLES;
3337 case GL_LINES:
3338 case GL_LINE_LOOP:
3339 case GL_LINE_STRIP:
3340 return GL_LINES;
3341 default:
3342 fprintf(stderr, "failed to translate TFB %d\n", mode);
3343 return GL_POINTS;
3344 }
3345 }
3346
vrend_draw_bind_vertex_legacy(struct vrend_context * ctx,struct vrend_vertex_element_array * va)3347 static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
3348 struct vrend_vertex_element_array *va)
3349 {
3350 uint32_t num_enable;
3351 uint32_t enable_bitmask;
3352 uint32_t disable_bitmask;
3353 int i;
3354
3355 num_enable = va->count;
3356 enable_bitmask = 0;
3357 disable_bitmask = ~((1ull << num_enable) - 1);
3358 for (i = 0; i < (int)va->count; i++) {
3359 struct vrend_vertex_element *ve = &va->elements[i];
3360 int vbo_index = ve->base.vertex_buffer_index;
3361 struct vrend_resource *res;
3362 GLint loc;
3363
3364 if (i >= ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs) {
3365 /* XYZZY: debug this? */
3366 num_enable = ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs;
3367 break;
3368 }
3369 res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].buffer;
3370
3371 if (!res) {
3372 fprintf(stderr,"cannot find vbo buf %d %d %d\n", i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
3373 continue;
3374 }
3375
3376 if (vrend_state.use_explicit_locations || has_feature(feat_gles31_vertex_attrib_binding)) {
3377 loc = i;
3378 } else {
3379 if (ctx->sub->prog->attrib_locs) {
3380 loc = ctx->sub->prog->attrib_locs[i];
3381 } else loc = -1;
3382
3383 if (loc == -1) {
3384 fprintf(stderr,"%s: cannot find loc %d %d %d\n", ctx->debug_name, i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
3385 num_enable--;
3386 if (i == 0) {
3387 fprintf(stderr,"%s: shader probably didn't compile - skipping rendering\n", ctx->debug_name);
3388 return;
3389 }
3390 continue;
3391 }
3392 }
3393
3394 if (ve->type == GL_FALSE) {
3395 fprintf(stderr,"failed to translate vertex type - skipping render\n");
3396 return;
3397 }
3398
3399 glBindBuffer(GL_ARRAY_BUFFER, res->id);
3400
3401 if (ctx->sub->vbo[vbo_index].stride == 0) {
3402 void *data;
3403 /* for 0 stride we are kinda screwed */
3404 data = glMapBufferRange(GL_ARRAY_BUFFER, ctx->sub->vbo[vbo_index].buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
3405
3406 switch (ve->nr_chan) {
3407 case 1:
3408 glVertexAttrib1fv(loc, data);
3409 break;
3410 case 2:
3411 glVertexAttrib2fv(loc, data);
3412 break;
3413 case 3:
3414 glVertexAttrib3fv(loc, data);
3415 break;
3416 case 4:
3417 default:
3418 glVertexAttrib4fv(loc, data);
3419 break;
3420 }
3421 glUnmapBuffer(GL_ARRAY_BUFFER);
3422 disable_bitmask |= (1 << loc);
3423 } else {
3424 enable_bitmask |= (1 << loc);
3425 if (util_format_is_pure_integer(ve->base.src_format)) {
3426 glVertexAttribIPointer(loc, ve->nr_chan, ve->type, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
3427 } else {
3428 glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
3429 }
3430 glVertexAttribDivisorARB(loc, ve->base.instance_divisor);
3431 }
3432 }
3433 if (ctx->sub->enabled_attribs_bitmask != enable_bitmask) {
3434 uint32_t mask = ctx->sub->enabled_attribs_bitmask & disable_bitmask;
3435
3436 while (mask) {
3437 i = u_bit_scan(&mask);
3438 glDisableVertexAttribArray(i);
3439 }
3440 ctx->sub->enabled_attribs_bitmask &= ~disable_bitmask;
3441
3442 mask = ctx->sub->enabled_attribs_bitmask ^ enable_bitmask;
3443 while (mask) {
3444 i = u_bit_scan(&mask);
3445 glEnableVertexAttribArray(i);
3446 }
3447
3448 ctx->sub->enabled_attribs_bitmask = enable_bitmask;
3449 }
3450 }
3451
vrend_draw_bind_vertex_binding(struct vrend_context * ctx,struct vrend_vertex_element_array * va)3452 static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx,
3453 struct vrend_vertex_element_array *va)
3454 {
3455 int i;
3456
3457 glBindVertexArray(va->id);
3458
3459 if (ctx->sub->vbo_dirty) {
3460 for (i = 0; i < ctx->sub->num_vbos; i++) {
3461 struct vrend_resource *res = (struct vrend_resource *)ctx->sub->vbo[i].buffer;
3462 if (!res)
3463 glBindVertexBuffer(i, 0, 0, 0);
3464 else
3465 glBindVertexBuffer(i,
3466 res->id,
3467 ctx->sub->vbo[i].buffer_offset,
3468 ctx->sub->vbo[i].stride);
3469 }
3470 for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
3471 glBindVertexBuffer(i, 0, 0, 0);
3472 }
3473 ctx->sub->vbo_dirty = false;
3474 }
3475 }
3476
vrend_draw_bind_samplers_shader(struct vrend_context * ctx,int shader_type,int * sampler_id)3477 static void vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
3478 int shader_type,
3479 int *sampler_id)
3480 {
3481 int index = 0;
3482 for (int i = 0; i < ctx->sub->views[shader_type].num_views; i++) {
3483 struct vrend_sampler_view *tview = ctx->sub->views[shader_type].views[i];
3484
3485 if (!tview)
3486 continue;
3487
3488 if (!(ctx->sub->prog->samplers_used_mask[shader_type] & (1 << i)))
3489 continue;
3490
3491 if (ctx->sub->prog->samp_locs[shader_type])
3492 glUniform1i(ctx->sub->prog->samp_locs[shader_type][index], *sampler_id);
3493
3494 if (ctx->sub->prog->shadow_samp_mask[shader_type] & (1 << i)) {
3495 glUniform4f(ctx->sub->prog->shadow_samp_mask_locs[shader_type][index],
3496 (tview->gl_swizzle_r == GL_ZERO || tview->gl_swizzle_r == GL_ONE) ? 0.0 : 1.0,
3497 (tview->gl_swizzle_g == GL_ZERO || tview->gl_swizzle_g == GL_ONE) ? 0.0 : 1.0,
3498 (tview->gl_swizzle_b == GL_ZERO || tview->gl_swizzle_b == GL_ONE) ? 0.0 : 1.0,
3499 (tview->gl_swizzle_a == GL_ZERO || tview->gl_swizzle_a == GL_ONE) ? 0.0 : 1.0);
3500 glUniform4f(ctx->sub->prog->shadow_samp_add_locs[shader_type][index],
3501 tview->gl_swizzle_r == GL_ONE ? 1.0 : 0.0,
3502 tview->gl_swizzle_g == GL_ONE ? 1.0 : 0.0,
3503 tview->gl_swizzle_b == GL_ONE ? 1.0 : 0.0,
3504 tview->gl_swizzle_a == GL_ONE ? 1.0 : 0.0);
3505 }
3506
3507 glActiveTexture(GL_TEXTURE0 + *sampler_id);
3508 if (tview->texture) {
3509 GLuint id;
3510 struct vrend_resource *texture = tview->texture;
3511 GLenum target = tview->target;
3512
3513 if (texture->is_buffer) {
3514 id = texture->tbo_tex_id;
3515 target = GL_TEXTURE_BUFFER;
3516 } else
3517 id = tview->id;
3518
3519 glBindTexture(target, id);
3520 if (ctx->sub->views[shader_type].old_ids[i] != id || ctx->sub->sampler_state_dirty) {
3521 vrend_apply_sampler_state(ctx, texture, shader_type, i, *sampler_id, tview->srgb_decode);
3522 ctx->sub->views[shader_type].old_ids[i] = id;
3523 }
3524 if (ctx->sub->rs_state.point_quad_rasterization) {
3525 if (vrend_state.use_core_profile == false) {
3526 if (ctx->sub->rs_state.sprite_coord_enable & (1 << i))
3527 glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE);
3528 else
3529 glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_FALSE);
3530 }
3531 }
3532 (*sampler_id)++;
3533 }
3534 index++;
3535 }
3536 }
3537
vrend_draw_bind_ubo_shader(struct vrend_context * ctx,int shader_type,int * ubo_id)3538 static void vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
3539 int shader_type, int *ubo_id)
3540 {
3541 uint32_t mask;
3542 int shader_ubo_idx;
3543 struct pipe_constant_buffer *cb;
3544 struct vrend_resource *res;
3545 struct vrend_shader_info* sinfo;
3546
3547 if (!has_feature(feat_ubo))
3548 return;
3549
3550 if (!ctx->sub->const_bufs_used_mask[shader_type])
3551 return;
3552
3553 if (!ctx->sub->prog->ubo_locs[shader_type])
3554 return;
3555
3556 sinfo = &ctx->sub->prog->ss[shader_type]->sel->sinfo;
3557
3558 mask = ctx->sub->const_bufs_used_mask[shader_type];
3559 while (mask) {
3560 /* The const_bufs_used_mask stores the gallium uniform buffer indices */
3561 int i = u_bit_scan(&mask);
3562
3563 /* The cbs array is indexed using the gallium uniform buffer index */
3564 cb = &ctx->sub->cbs[shader_type][i];
3565 res = (struct vrend_resource *)cb->buffer;
3566
3567 /* Find the index of the uniform buffer in the array of shader ubo data */
3568 for (shader_ubo_idx = 0; shader_ubo_idx < sinfo->num_ubos; shader_ubo_idx++) {
3569 if (sinfo->ubo_idx[shader_ubo_idx] == i)
3570 break;
3571 }
3572 if (shader_ubo_idx == sinfo->num_ubos)
3573 continue;
3574
3575 glBindBufferRange(GL_UNIFORM_BUFFER, *ubo_id, res->id,
3576 cb->buffer_offset, cb->buffer_size);
3577 /* The ubo_locs array is indexed using the shader ubo index */
3578 glUniformBlockBinding(ctx->sub->prog->id, ctx->sub->prog->ubo_locs[shader_type][shader_ubo_idx], *ubo_id);
3579 (*ubo_id)++;
3580 }
3581 }
3582
vrend_draw_bind_const_shader(struct vrend_context * ctx,int shader_type,bool new_program)3583 static void vrend_draw_bind_const_shader(struct vrend_context *ctx,
3584 int shader_type, bool new_program)
3585 {
3586 if (ctx->sub->consts[shader_type].consts &&
3587 ctx->sub->prog->const_locs[shader_type] &&
3588 (ctx->sub->const_dirty[shader_type] || new_program)) {
3589 for (int i = 0; i < ctx->sub->shaders[shader_type]->sinfo.num_consts; i++) {
3590 if (ctx->sub->prog->const_locs[shader_type][i] != -1)
3591 glUniform4uiv(ctx->sub->prog->const_locs[shader_type][i], 1, &ctx->sub->consts[shader_type].consts[i * 4]);
3592 }
3593 ctx->sub->const_dirty[shader_type] = false;
3594 }
3595 }
3596
vrend_draw_bind_ssbo_shader(struct vrend_context * ctx,int shader_type)3597 static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_type)
3598 {
3599 uint32_t mask;
3600 struct vrend_ssbo *ssbo;
3601 struct vrend_resource *res;
3602 int i;
3603
3604 if (!has_feature(feat_ssbo))
3605 return;
3606
3607 if (!ctx->sub->prog->ssbo_locs[shader_type])
3608 return;
3609
3610 if (!ctx->sub->ssbo_used_mask[shader_type])
3611 return;
3612
3613 mask = ctx->sub->ssbo_used_mask[shader_type];
3614 while (mask) {
3615 i = u_bit_scan(&mask);
3616
3617 ssbo = &ctx->sub->ssbo[shader_type][i];
3618 res = (struct vrend_resource *)ssbo->res;
3619 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, i, res->id,
3620 ssbo->buffer_offset, ssbo->buffer_size);
3621 if (ctx->sub->prog->ssbo_locs[shader_type][i] != GL_INVALID_INDEX) {
3622 if (!vrend_state.use_gles)
3623 glShaderStorageBlockBinding(ctx->sub->prog->id, ctx->sub->prog->ssbo_locs[shader_type][i], i);
3624 else
3625 debug_printf("glShaderStorageBlockBinding not supported on gles \n");
3626 }
3627 }
3628 }
3629
vrend_draw_bind_images_shader(struct vrend_context * ctx,int shader_type)3630 static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_type)
3631 {
3632 GLenum access;
3633 GLboolean layered;
3634 struct vrend_image_view *iview;
3635 uint32_t mask, tex_id, level, first_layer;
3636
3637 if (!has_feature(feat_images))
3638 return;
3639
3640 if (!ctx->sub->images_used_mask[shader_type])
3641 return;
3642
3643 if (!ctx->sub->prog->img_locs[shader_type])
3644 return;
3645
3646 mask = ctx->sub->images_used_mask[shader_type];
3647 while (mask) {
3648 unsigned i = u_bit_scan(&mask);
3649
3650 if (!(ctx->sub->prog->images_used_mask[shader_type] & (1 << i)))
3651 continue;
3652 iview = &ctx->sub->image_views[shader_type][i];
3653 tex_id = iview->texture->id;
3654 if (iview->texture->is_buffer) {
3655 if (!iview->texture->tbo_tex_id)
3656 glGenTextures(1, &iview->texture->tbo_tex_id);
3657
3658 /* glTexBuffer doesn't accept GL_RGBA8_SNORM, find an appropriate replacement. */
3659 uint32_t format = (iview->format == GL_RGBA8_SNORM) ? GL_RGBA8UI : iview->format;
3660
3661 glBindBufferARB(GL_TEXTURE_BUFFER, iview->texture->id);
3662 glBindTexture(GL_TEXTURE_BUFFER, iview->texture->tbo_tex_id);
3663 glTexBuffer(GL_TEXTURE_BUFFER, format, iview->texture->id);
3664 tex_id = iview->texture->tbo_tex_id;
3665 level = first_layer = 0;
3666 layered = GL_TRUE;
3667 } else {
3668 level = iview->u.tex.level;
3669 first_layer = iview->u.tex.first_layer;
3670 layered = !((iview->texture->base.array_size > 1 ||
3671 iview->texture->base.depth0 > 1) && (iview->u.tex.first_layer == iview->u.tex.last_layer));
3672 }
3673
3674 if (!vrend_state.use_gles)
3675 glUniform1i(ctx->sub->prog->img_locs[shader_type][i], i);
3676
3677 switch (iview->access) {
3678 case PIPE_IMAGE_ACCESS_READ:
3679 access = GL_READ_ONLY;
3680 break;
3681 case PIPE_IMAGE_ACCESS_WRITE:
3682 access = GL_WRITE_ONLY;
3683 break;
3684 case PIPE_IMAGE_ACCESS_READ_WRITE:
3685 access = GL_READ_WRITE;
3686 break;
3687 default:
3688 fprintf(stderr, "Invalid access specified\n");
3689 return;
3690 }
3691
3692 glBindImageTexture(i, tex_id, level, layered, first_layer, access, iview->format);
3693 }
3694 }
3695
vrend_draw_bind_objects(struct vrend_context * ctx,bool new_program)3696 static void vrend_draw_bind_objects(struct vrend_context *ctx, bool new_program)
3697 {
3698 int ubo_id = 0, sampler_id = 0;
3699 for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
3700 vrend_draw_bind_ubo_shader(ctx, shader_type, &ubo_id);
3701 vrend_draw_bind_const_shader(ctx, shader_type, new_program);
3702 vrend_draw_bind_samplers_shader(ctx, shader_type, &sampler_id);
3703 vrend_draw_bind_images_shader(ctx, shader_type);
3704 vrend_draw_bind_ssbo_shader(ctx, shader_type);
3705 }
3706
3707 if (vrend_state.use_core_profile && ctx->sub->prog->fs_stipple_loc != -1) {
3708 glActiveTexture(GL_TEXTURE0 + sampler_id);
3709 glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
3710 glUniform1i(ctx->sub->prog->fs_stipple_loc, sampler_id);
3711 }
3712 ctx->sub->sampler_state_dirty = false;
3713 }
3714
vrend_draw_vbo(struct vrend_context * ctx,const struct pipe_draw_info * info,uint32_t cso,uint32_t indirect_handle,uint32_t indirect_draw_count_handle)3715 int vrend_draw_vbo(struct vrend_context *ctx,
3716 const struct pipe_draw_info *info,
3717 uint32_t cso, uint32_t indirect_handle,
3718 uint32_t indirect_draw_count_handle)
3719 {
3720 int i;
3721 bool new_program = false;
3722 struct vrend_resource *indirect_res = NULL;
3723
3724 if (ctx->in_error)
3725 return 0;
3726
3727 if (info->instance_count && !has_feature(feat_draw_instance))
3728 return EINVAL;
3729
3730 if (info->start_instance && !has_feature(feat_base_instance))
3731 return EINVAL;
3732
3733 if (indirect_handle) {
3734 if (!has_feature(feat_indirect_draw))
3735 return EINVAL;
3736 indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle);
3737 if (!indirect_res) {
3738 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, indirect_handle);
3739 return 0;
3740 }
3741 }
3742
3743 /* this must be zero until we support the feature */
3744 if (indirect_draw_count_handle) {
3745 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, indirect_handle);
3746 return 0;
3747 }
3748
3749 if (ctx->ctx_switch_pending)
3750 vrend_finish_context_switch(ctx);
3751
3752 vrend_update_frontface_state(ctx);
3753 if (ctx->sub->stencil_state_dirty)
3754 vrend_update_stencil_state(ctx);
3755 if (ctx->sub->scissor_state_dirty)
3756 vrend_update_scissor_state(ctx);
3757
3758 if (ctx->sub->viewport_state_dirty)
3759 vrend_update_viewport_state(ctx);
3760
3761 vrend_patch_blend_state(ctx);
3762
3763 if (ctx->sub->shader_dirty) {
3764 struct vrend_linked_shader_program *prog;
3765 bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
3766 bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
3767 bool same_prog;
3768 if (!ctx->sub->shaders[PIPE_SHADER_VERTEX] || !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
3769 fprintf(stderr,"dropping rendering due to missing shaders: %s\n", ctx->debug_name);
3770 return 0;
3771 }
3772
3773 vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
3774 vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
3775 if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
3776 vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
3777 if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
3778 vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
3779 if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
3780 vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
3781
3782 if (!ctx->sub->shaders[PIPE_SHADER_VERTEX]->current ||
3783 !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current ||
3784 (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && !ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current) ||
3785 (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && !ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current) ||
3786 (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && !ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current)) {
3787 fprintf(stderr, "failure to compile shader variants: %s\n", ctx->debug_name);
3788 return 0;
3789 }
3790 same_prog = true;
3791 if (ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_VERTEX])
3792 same_prog = false;
3793 if (ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT])
3794 same_prog = false;
3795 if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY])
3796 same_prog = false;
3797 if (ctx->sub->prog && ctx->sub->prog->dual_src_linked != dual_src)
3798 same_prog = false;
3799 if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL])
3800 same_prog = false;
3801 if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL])
3802 same_prog = false;
3803
3804 if (!same_prog) {
3805 prog = lookup_shader_program(ctx,
3806 ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id,
3807 ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id,
3808 ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id : 0,
3809 ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0,
3810 ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0,
3811 dual_src);
3812 if (!prog) {
3813 prog = add_shader_program(ctx,
3814 ctx->sub->shaders[PIPE_SHADER_VERTEX]->current,
3815 ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current,
3816 ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
3817 ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
3818 ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
3819 if (!prog)
3820 return 0;
3821 }
3822
3823 ctx->sub->last_shader_idx = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
3824 } else
3825 prog = ctx->sub->prog;
3826 if (ctx->sub->prog != prog) {
3827 new_program = true;
3828 ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id;
3829 ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT] = ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id;
3830 if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
3831 ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY] = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id;
3832 if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
3833 ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL] = ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id;
3834 if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
3835 ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL] = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id;
3836 ctx->sub->prog = prog;
3837 }
3838 }
3839 if (!ctx->sub->prog) {
3840 fprintf(stderr,"dropping rendering due to missing shaders: %s\n", ctx->debug_name);
3841 return 0;
3842 }
3843 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
3844
3845 vrend_use_program(ctx, ctx->sub->prog->id);
3846
3847 vrend_draw_bind_objects(ctx, new_program);
3848
3849 if (!ctx->sub->ve) {
3850 fprintf(stderr,"illegal VE setup - skipping renderering\n");
3851 return 0;
3852 }
3853 glUniform1f(ctx->sub->prog->vs_ws_adjust_loc, ctx->sub->viewport_is_negative ? -1.0 : 1.0);
3854
3855 if (ctx->sub->rs_state.clip_plane_enable) {
3856 for (i = 0 ; i < 8; i++) {
3857 glUniform4fv(ctx->sub->prog->clip_locs[i], 1, (const GLfloat *)&ctx->sub->ucp_state.ucp[i]);
3858 }
3859 }
3860
3861 if (has_feature(feat_gles31_vertex_attrib_binding))
3862 vrend_draw_bind_vertex_binding(ctx, ctx->sub->ve);
3863 else
3864 vrend_draw_bind_vertex_legacy(ctx, ctx->sub->ve);
3865
3866 for (i = 0 ; i < ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs; i++) {
3867 struct vrend_vertex_element_array *va = ctx->sub->ve;
3868 struct vrend_vertex_element *ve = &va->elements[i];
3869 int vbo_index = ve->base.vertex_buffer_index;
3870 if (!ctx->sub->vbo[vbo_index].buffer) {
3871 fprintf(stderr, "VBO missing vertex buffer\n");
3872 return 0;
3873 }
3874 }
3875
3876 if (info->indexed) {
3877 struct vrend_resource *res = (struct vrend_resource *)ctx->sub->ib.buffer;
3878 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, res->id);
3879 } else
3880 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
3881
3882 if (ctx->sub->current_so) {
3883 if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
3884 if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
3885 glBeginTransformFeedback(get_gs_xfb_mode(ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
3886 else if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
3887 glBeginTransformFeedback(get_tess_xfb_mode(ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim,
3888 ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode));
3889 else
3890 glBeginTransformFeedback(get_xfb_mode(info->mode));
3891 ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
3892 } else if (ctx->sub->current_so->xfb_state == XFB_STATE_PAUSED) {
3893 glResumeTransformFeedback();
3894 ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
3895 }
3896 }
3897
3898 if (info->primitive_restart) {
3899 if (vrend_state.use_gles) {
3900 glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
3901 } else if (has_feature(feat_nv_prim_restart)) {
3902 glEnableClientState(GL_PRIMITIVE_RESTART_NV);
3903 glPrimitiveRestartIndexNV(info->restart_index);
3904 } else if (has_feature(feat_gl_prim_restart)) {
3905 glEnable(GL_PRIMITIVE_RESTART);
3906 glPrimitiveRestartIndex(info->restart_index);
3907 }
3908 }
3909
3910 if (has_feature(feat_indirect_draw)) {
3911 if (indirect_res)
3912 glBindBuffer(GL_DRAW_INDIRECT_BUFFER, indirect_res->id);
3913 else
3914 glBindBuffer(GL_DRAW_INDIRECT_BUFFER, 0);
3915 }
3916
3917 if (info->vertices_per_patch && has_feature(feat_tessellation))
3918 glPatchParameteri(GL_PATCH_VERTICES, info->vertices_per_patch);
3919
3920 /* set the vertex state up now on a delay */
3921 if (!info->indexed) {
3922 GLenum mode = info->mode;
3923 int count = cso ? cso : info->count;
3924 int start = cso ? 0 : info->start;
3925
3926 if (indirect_handle)
3927 glDrawArraysIndirect(mode, (GLvoid const *)(unsigned long)info->indirect.offset);
3928 else if (info->instance_count <= 1)
3929 glDrawArrays(mode, start, count);
3930 else if (info->start_instance)
3931 glDrawArraysInstancedBaseInstance(mode, start, count, info->instance_count, info->start_instance);
3932 else
3933 glDrawArraysInstancedARB(mode, start, count, info->instance_count);
3934 } else {
3935 GLenum elsz;
3936 GLenum mode = info->mode;
3937 switch (ctx->sub->ib.index_size) {
3938 case 1:
3939 elsz = GL_UNSIGNED_BYTE;
3940 break;
3941 case 2:
3942 elsz = GL_UNSIGNED_SHORT;
3943 break;
3944 case 4:
3945 default:
3946 elsz = GL_UNSIGNED_INT;
3947 break;
3948 }
3949
3950 if (indirect_handle)
3951 glDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset);
3952 else if (info->index_bias) {
3953 if (info->instance_count > 1)
3954 glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count, info->index_bias);
3955 else if (info->min_index != 0 || info->max_index != (unsigned)-1)
3956 glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
3957 else
3958 glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
3959 } else if (info->instance_count > 1) {
3960 glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count);
3961 } else if (info->min_index != 0 || info->max_index != (unsigned)-1)
3962 glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
3963 else
3964 glDrawElements(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
3965 }
3966
3967 if (info->primitive_restart) {
3968 if (vrend_state.use_gles) {
3969 glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
3970 } else if (has_feature(feat_nv_prim_restart)) {
3971 glDisableClientState(GL_PRIMITIVE_RESTART_NV);
3972 } else if (has_feature(feat_gl_prim_restart)) {
3973 glDisable(GL_PRIMITIVE_RESTART);
3974 }
3975 }
3976
3977 if (ctx->sub->current_so && has_feature(feat_transform_feedback2)) {
3978 if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED) {
3979 glPauseTransformFeedback();
3980 ctx->sub->current_so->xfb_state = XFB_STATE_PAUSED;
3981 }
3982 }
3983 return 0;
3984 }
3985
vrend_launch_grid(struct vrend_context * ctx,UNUSED uint32_t * block,uint32_t * grid,uint32_t indirect_handle,uint32_t indirect_offset)3986 void vrend_launch_grid(struct vrend_context *ctx,
3987 UNUSED uint32_t *block,
3988 uint32_t *grid,
3989 uint32_t indirect_handle,
3990 uint32_t indirect_offset)
3991 {
3992 bool new_program = false;
3993 struct vrend_resource *indirect_res = NULL;
3994
3995 if (!has_feature(feat_compute_shader))
3996 return;
3997
3998 if (ctx->sub->cs_shader_dirty) {
3999 struct vrend_linked_shader_program *prog;
4000 bool same_prog, cs_dirty;
4001 if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]) {
4002 fprintf(stderr,"dropping rendering due to missing shaders: %s\n", ctx->debug_name);
4003 return;
4004 }
4005
4006 vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
4007 if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current) {
4008 fprintf(stderr, "failure to compile shader variants: %s\n", ctx->debug_name);
4009 return;
4010 }
4011 same_prog = true;
4012 if (ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_COMPUTE])
4013 same_prog = false;
4014 if (!same_prog) {
4015 prog = lookup_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id);
4016 if (!prog) {
4017 prog = add_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current);
4018 if (!prog)
4019 return;
4020 }
4021 } else
4022 prog = ctx->sub->prog;
4023
4024 if (ctx->sub->prog != prog) {
4025 new_program = true;
4026 ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = -1;
4027 ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id;
4028 ctx->sub->prog = prog;
4029 }
4030 ctx->sub->shader_dirty = true;
4031 }
4032 vrend_use_program(ctx, ctx->sub->prog->id);
4033
4034 int sampler_id = 0, ubo_id = 0;
4035 vrend_draw_bind_ubo_shader(ctx, PIPE_SHADER_COMPUTE, &ubo_id);
4036 vrend_draw_bind_const_shader(ctx, PIPE_SHADER_COMPUTE, new_program);
4037 vrend_draw_bind_samplers_shader(ctx, PIPE_SHADER_COMPUTE, &sampler_id);
4038 vrend_draw_bind_images_shader(ctx, PIPE_SHADER_COMPUTE);
4039 vrend_draw_bind_ssbo_shader(ctx, PIPE_SHADER_COMPUTE);
4040
4041 if (indirect_handle) {
4042 indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle);
4043 if (!indirect_res) {
4044 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, indirect_handle);
4045 return;
4046 }
4047 }
4048
4049 if (indirect_res)
4050 glBindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirect_res->id);
4051 else
4052 glBindBuffer(GL_DISPATCH_INDIRECT_BUFFER, 0);
4053
4054 if (indirect_res) {
4055 glDispatchComputeIndirect(indirect_offset);
4056 } else {
4057 glDispatchCompute(grid[0], grid[1], grid[2]);
4058 }
4059 }
4060
translate_blend_func(uint32_t pipe_blend)4061 static GLenum translate_blend_func(uint32_t pipe_blend)
4062 {
4063 switch(pipe_blend){
4064 case PIPE_BLEND_ADD: return GL_FUNC_ADD;
4065 case PIPE_BLEND_SUBTRACT: return GL_FUNC_SUBTRACT;
4066 case PIPE_BLEND_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT;
4067 case PIPE_BLEND_MIN: return GL_MIN;
4068 case PIPE_BLEND_MAX: return GL_MAX;
4069 default:
4070 assert("invalid blend token()" == NULL);
4071 return 0;
4072 }
4073 }
4074
translate_blend_factor(uint32_t pipe_factor)4075 static GLenum translate_blend_factor(uint32_t pipe_factor)
4076 {
4077 switch (pipe_factor) {
4078 case PIPE_BLENDFACTOR_ONE: return GL_ONE;
4079 case PIPE_BLENDFACTOR_SRC_COLOR: return GL_SRC_COLOR;
4080 case PIPE_BLENDFACTOR_SRC_ALPHA: return GL_SRC_ALPHA;
4081
4082 case PIPE_BLENDFACTOR_DST_COLOR: return GL_DST_COLOR;
4083 case PIPE_BLENDFACTOR_DST_ALPHA: return GL_DST_ALPHA;
4084
4085 case PIPE_BLENDFACTOR_CONST_COLOR: return GL_CONSTANT_COLOR;
4086 case PIPE_BLENDFACTOR_CONST_ALPHA: return GL_CONSTANT_ALPHA;
4087
4088 case PIPE_BLENDFACTOR_SRC1_COLOR: return GL_SRC1_COLOR;
4089 case PIPE_BLENDFACTOR_SRC1_ALPHA: return GL_SRC1_ALPHA;
4090 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: return GL_SRC_ALPHA_SATURATE;
4091 case PIPE_BLENDFACTOR_ZERO: return GL_ZERO;
4092
4093
4094 case PIPE_BLENDFACTOR_INV_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR;
4095 case PIPE_BLENDFACTOR_INV_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA;
4096
4097 case PIPE_BLENDFACTOR_INV_DST_COLOR: return GL_ONE_MINUS_DST_COLOR;
4098 case PIPE_BLENDFACTOR_INV_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA;
4099
4100 case PIPE_BLENDFACTOR_INV_CONST_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR;
4101 case PIPE_BLENDFACTOR_INV_CONST_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA;
4102
4103 case PIPE_BLENDFACTOR_INV_SRC1_COLOR: return GL_ONE_MINUS_SRC1_COLOR;
4104 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: return GL_ONE_MINUS_SRC1_ALPHA;
4105
4106 default:
4107 assert("invalid blend token()" == NULL);
4108 return 0;
4109 }
4110 }
4111
4112 static GLenum
translate_logicop(GLuint pipe_logicop)4113 translate_logicop(GLuint pipe_logicop)
4114 {
4115 switch (pipe_logicop) {
4116 #define CASE(x) case PIPE_LOGICOP_##x: return GL_##x
4117 CASE(CLEAR);
4118 CASE(NOR);
4119 CASE(AND_INVERTED);
4120 CASE(COPY_INVERTED);
4121 CASE(AND_REVERSE);
4122 CASE(INVERT);
4123 CASE(XOR);
4124 CASE(NAND);
4125 CASE(AND);
4126 CASE(EQUIV);
4127 CASE(NOOP);
4128 CASE(OR_INVERTED);
4129 CASE(COPY);
4130 CASE(OR_REVERSE);
4131 CASE(OR);
4132 CASE(SET);
4133 default:
4134 assert("invalid logicop token()" == NULL);
4135 return 0;
4136 }
4137 #undef CASE
4138 }
4139
4140 static GLenum
translate_stencil_op(GLuint op)4141 translate_stencil_op(GLuint op)
4142 {
4143 switch (op) {
4144 #define CASE(x) case PIPE_STENCIL_OP_##x: return GL_##x
4145 CASE(KEEP);
4146 CASE(ZERO);
4147 CASE(REPLACE);
4148 CASE(INCR);
4149 CASE(DECR);
4150 CASE(INCR_WRAP);
4151 CASE(DECR_WRAP);
4152 CASE(INVERT);
4153 default:
4154 assert("invalid stencilop token()" == NULL);
4155 return 0;
4156 }
4157 #undef CASE
4158 }
4159
is_dst_blend(int blend_factor)4160 static inline bool is_dst_blend(int blend_factor)
4161 {
4162 return (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA ||
4163 blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA);
4164 }
4165
conv_a8_blend(int blend_factor)4166 static inline int conv_a8_blend(int blend_factor)
4167 {
4168 if (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA)
4169 return PIPE_BLENDFACTOR_DST_COLOR;
4170 if (blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA)
4171 return PIPE_BLENDFACTOR_INV_DST_COLOR;
4172 return blend_factor;
4173 }
4174
conv_dst_blend(int blend_factor)4175 static inline int conv_dst_blend(int blend_factor)
4176 {
4177 if (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA)
4178 return PIPE_BLENDFACTOR_ONE;
4179 if (blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA)
4180 return PIPE_BLENDFACTOR_ZERO;
4181 return blend_factor;
4182 }
4183
is_const_blend(int blend_factor)4184 static inline bool is_const_blend(int blend_factor)
4185 {
4186 return (blend_factor == PIPE_BLENDFACTOR_CONST_COLOR ||
4187 blend_factor == PIPE_BLENDFACTOR_CONST_ALPHA ||
4188 blend_factor == PIPE_BLENDFACTOR_INV_CONST_COLOR ||
4189 blend_factor == PIPE_BLENDFACTOR_INV_CONST_ALPHA);
4190 }
4191
vrend_hw_emit_blend(struct vrend_context * ctx,struct pipe_blend_state * state)4192 static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_state *state)
4193 {
4194 if (state->logicop_enable != ctx->sub->hw_blend_state.logicop_enable) {
4195 ctx->sub->hw_blend_state.logicop_enable = state->logicop_enable;
4196 if (vrend_state.use_gles) {
4197 if (state->logicop_enable) {
4198 report_gles_warn(ctx, GLES_WARN_LOGIC_OP, 0);
4199 }
4200 } else if (state->logicop_enable) {
4201 glEnable(GL_COLOR_LOGIC_OP);
4202 glLogicOp(translate_logicop(state->logicop_func));
4203 } else {
4204 glDisable(GL_COLOR_LOGIC_OP);
4205 }
4206 }
4207
4208 if (state->independent_blend_enable &&
4209 has_feature(feat_indep_blend) &&
4210 has_feature(feat_indep_blend_func)) {
4211 /* ARB_draw_buffers_blend is required for this */
4212 int i;
4213
4214 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
4215
4216 if (state->rt[i].blend_enable) {
4217 bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, i);
4218 if (dual_src && !has_feature(feat_dual_src_blend)) {
4219 fprintf(stderr, "dual src blend requested but not supported for rt %d\n", i);
4220 continue;
4221 }
4222
4223 glBlendFuncSeparateiARB(i, translate_blend_factor(state->rt[i].rgb_src_factor),
4224 translate_blend_factor(state->rt[i].rgb_dst_factor),
4225 translate_blend_factor(state->rt[i].alpha_src_factor),
4226 translate_blend_factor(state->rt[i].alpha_dst_factor));
4227 glBlendEquationSeparateiARB(i, translate_blend_func(state->rt[i].rgb_func),
4228 translate_blend_func(state->rt[i].alpha_func));
4229 glEnableIndexedEXT(GL_BLEND, i);
4230 } else
4231 glDisableIndexedEXT(GL_BLEND, i);
4232
4233 if (state->rt[i].colormask != ctx->sub->hw_blend_state.rt[i].colormask) {
4234 ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
4235 glColorMaskIndexedEXT(i, state->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
4236 state->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
4237 state->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
4238 state->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
4239 }
4240 }
4241 } else {
4242 if (state->rt[0].blend_enable) {
4243 bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
4244 if (dual_src && !has_feature(feat_dual_src_blend)) {
4245 fprintf(stderr, "dual src blend requested but not supported for rt 0\n");
4246 }
4247 glBlendFuncSeparate(translate_blend_factor(state->rt[0].rgb_src_factor),
4248 translate_blend_factor(state->rt[0].rgb_dst_factor),
4249 translate_blend_factor(state->rt[0].alpha_src_factor),
4250 translate_blend_factor(state->rt[0].alpha_dst_factor));
4251 glBlendEquationSeparate(translate_blend_func(state->rt[0].rgb_func),
4252 translate_blend_func(state->rt[0].alpha_func));
4253 vrend_blend_enable(ctx, true);
4254 }
4255 else
4256 vrend_blend_enable(ctx, false);
4257
4258 if (state->rt[0].colormask != ctx->sub->hw_blend_state.rt[0].colormask) {
4259 int i;
4260 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
4261 ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
4262 glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
4263 state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
4264 state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
4265 state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
4266 }
4267 }
4268 ctx->sub->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
4269
4270 if (has_feature(feat_multisample)) {
4271 if (state->alpha_to_coverage)
4272 glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE);
4273 else
4274 glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
4275
4276 if (!vrend_state.use_gles) {
4277 if (state->alpha_to_one)
4278 glEnable(GL_SAMPLE_ALPHA_TO_ONE);
4279 else
4280 glDisable(GL_SAMPLE_ALPHA_TO_ONE);
4281 }
4282 }
4283
4284 if (state->dither)
4285 glEnable(GL_DITHER);
4286 else
4287 glDisable(GL_DITHER);
4288 }
4289
4290 /* there are a few reasons we might need to patch the blend state.
4291 a) patching blend factors for dst with no alpha
4292 b) patching colormask/blendcolor/blendfactors for A8/A16 format
4293 emulation using GL_R8/GL_R16.
4294 */
vrend_patch_blend_state(struct vrend_context * ctx)4295 static void vrend_patch_blend_state(struct vrend_context *ctx)
4296 {
4297 struct pipe_blend_state new_state = ctx->sub->blend_state;
4298 struct pipe_blend_state *state = &ctx->sub->blend_state;
4299 bool swizzle_blend_color = false;
4300 struct pipe_blend_color blend_color = ctx->sub->blend_color;
4301 int i;
4302
4303 if (ctx->sub->nr_cbufs == 0)
4304 return;
4305
4306 for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
4307 if (i < ctx->sub->nr_cbufs && ctx->sub->surf[i]) {
4308 if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format)) {
4309 if (state->rt[i].blend_enable) {
4310 new_state.rt[i].rgb_src_factor = conv_a8_blend(state->rt[i].alpha_src_factor);
4311 new_state.rt[i].rgb_dst_factor = conv_a8_blend(state->rt[i].alpha_dst_factor);
4312 new_state.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO;
4313 new_state.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
4314 }
4315 new_state.rt[i].colormask = 0;
4316 if (state->rt[i].colormask & PIPE_MASK_A)
4317 new_state.rt[i].colormask |= PIPE_MASK_R;
4318 if (is_const_blend(new_state.rt[i].rgb_src_factor) ||
4319 is_const_blend(new_state.rt[i].rgb_dst_factor)) {
4320 swizzle_blend_color = true;
4321 }
4322 } else if (!util_format_has_alpha(ctx->sub->surf[i]->format)) {
4323 if (!(is_dst_blend(state->rt[i].rgb_src_factor) ||
4324 is_dst_blend(state->rt[i].rgb_dst_factor) ||
4325 is_dst_blend(state->rt[i].alpha_src_factor) ||
4326 is_dst_blend(state->rt[i].alpha_dst_factor)))
4327 continue;
4328 new_state.rt[i].rgb_src_factor = conv_dst_blend(state->rt[i].rgb_src_factor);
4329 new_state.rt[i].rgb_dst_factor = conv_dst_blend(state->rt[i].rgb_dst_factor);
4330 new_state.rt[i].alpha_src_factor = conv_dst_blend(state->rt[i].alpha_src_factor);
4331 new_state.rt[i].alpha_dst_factor = conv_dst_blend(state->rt[i].alpha_dst_factor);
4332 }
4333 }
4334 }
4335
4336 vrend_hw_emit_blend(ctx, &new_state);
4337
4338 if (swizzle_blend_color) {
4339 blend_color.color[0] = blend_color.color[3];
4340 blend_color.color[1] = 0.0f;
4341 blend_color.color[2] = 0.0f;
4342 blend_color.color[3] = 0.0f;
4343 }
4344
4345 glBlendColor(blend_color.color[0],
4346 blend_color.color[1],
4347 blend_color.color[2],
4348 blend_color.color[3]);
4349 }
4350
vrend_object_bind_blend(struct vrend_context * ctx,uint32_t handle)4351 void vrend_object_bind_blend(struct vrend_context *ctx,
4352 uint32_t handle)
4353 {
4354 struct pipe_blend_state *state;
4355
4356 if (handle == 0) {
4357 memset(&ctx->sub->blend_state, 0, sizeof(ctx->sub->blend_state));
4358 vrend_blend_enable(ctx, false);
4359 return;
4360 }
4361 state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_BLEND);
4362 if (!state) {
4363 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
4364 return;
4365 }
4366
4367 ctx->sub->shader_dirty = true;
4368 ctx->sub->blend_state = *state;
4369
4370 vrend_hw_emit_blend(ctx, &ctx->sub->blend_state);
4371 }
4372
vrend_hw_emit_dsa(struct vrend_context * ctx)4373 static void vrend_hw_emit_dsa(struct vrend_context *ctx)
4374 {
4375 struct pipe_depth_stencil_alpha_state *state = &ctx->sub->dsa_state;
4376
4377 if (state->depth.enabled) {
4378 vrend_depth_test_enable(ctx, true);
4379 glDepthFunc(GL_NEVER + state->depth.func);
4380 if (state->depth.writemask)
4381 glDepthMask(GL_TRUE);
4382 else
4383 glDepthMask(GL_FALSE);
4384 } else
4385 vrend_depth_test_enable(ctx, false);
4386
4387 if (state->alpha.enabled) {
4388 vrend_alpha_test_enable(ctx, true);
4389 if (!vrend_state.use_core_profile)
4390 glAlphaFunc(GL_NEVER + state->alpha.func, state->alpha.ref_value);
4391 } else
4392 vrend_alpha_test_enable(ctx, false);
4393
4394
4395 }
vrend_object_bind_dsa(struct vrend_context * ctx,uint32_t handle)4396 void vrend_object_bind_dsa(struct vrend_context *ctx,
4397 uint32_t handle)
4398 {
4399 struct pipe_depth_stencil_alpha_state *state;
4400
4401 if (handle == 0) {
4402 memset(&ctx->sub->dsa_state, 0, sizeof(ctx->sub->dsa_state));
4403 ctx->sub->dsa = NULL;
4404 ctx->sub->stencil_state_dirty = true;
4405 ctx->sub->shader_dirty = true;
4406 vrend_hw_emit_dsa(ctx);
4407 return;
4408 }
4409
4410 state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_DSA);
4411 if (!state) {
4412 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
4413 return;
4414 }
4415
4416 if (ctx->sub->dsa != state) {
4417 ctx->sub->stencil_state_dirty = true;
4418 ctx->sub->shader_dirty = true;
4419 }
4420 ctx->sub->dsa_state = *state;
4421 ctx->sub->dsa = state;
4422
4423 vrend_hw_emit_dsa(ctx);
4424 }
4425
vrend_update_frontface_state(struct vrend_context * ctx)4426 static void vrend_update_frontface_state(struct vrend_context *ctx)
4427 {
4428 struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
4429 int front_ccw = state->front_ccw;
4430
4431 front_ccw ^= (ctx->sub->inverted_fbo_content ? 0 : 1);
4432 if (front_ccw)
4433 glFrontFace(GL_CCW);
4434 else
4435 glFrontFace(GL_CW);
4436 }
4437
vrend_update_stencil_state(struct vrend_context * ctx)4438 void vrend_update_stencil_state(struct vrend_context *ctx)
4439 {
4440 struct pipe_depth_stencil_alpha_state *state = ctx->sub->dsa;
4441 int i;
4442 if (!state)
4443 return;
4444
4445 if (!state->stencil[1].enabled) {
4446 if (state->stencil[0].enabled) {
4447 vrend_stencil_test_enable(ctx, true);
4448
4449 glStencilOp(translate_stencil_op(state->stencil[0].fail_op),
4450 translate_stencil_op(state->stencil[0].zfail_op),
4451 translate_stencil_op(state->stencil[0].zpass_op));
4452
4453 glStencilFunc(GL_NEVER + state->stencil[0].func,
4454 ctx->sub->stencil_refs[0],
4455 state->stencil[0].valuemask);
4456 glStencilMask(state->stencil[0].writemask);
4457 } else
4458 vrend_stencil_test_enable(ctx, false);
4459 } else {
4460 vrend_stencil_test_enable(ctx, true);
4461
4462 for (i = 0; i < 2; i++) {
4463 GLenum face = (i == 1) ? GL_BACK : GL_FRONT;
4464 glStencilOpSeparate(face,
4465 translate_stencil_op(state->stencil[i].fail_op),
4466 translate_stencil_op(state->stencil[i].zfail_op),
4467 translate_stencil_op(state->stencil[i].zpass_op));
4468
4469 glStencilFuncSeparate(face, GL_NEVER + state->stencil[i].func,
4470 ctx->sub->stencil_refs[i],
4471 state->stencil[i].valuemask);
4472 glStencilMaskSeparate(face, state->stencil[i].writemask);
4473 }
4474 }
4475 ctx->sub->stencil_state_dirty = false;
4476 }
4477
translate_fill(uint32_t mode)4478 static inline GLenum translate_fill(uint32_t mode)
4479 {
4480 switch (mode) {
4481 case PIPE_POLYGON_MODE_POINT:
4482 return GL_POINT;
4483 case PIPE_POLYGON_MODE_LINE:
4484 return GL_LINE;
4485 case PIPE_POLYGON_MODE_FILL:
4486 return GL_FILL;
4487 default:
4488 assert(0);
4489 return 0;
4490 }
4491 }
4492
vrend_hw_emit_rs(struct vrend_context * ctx)4493 static void vrend_hw_emit_rs(struct vrend_context *ctx)
4494 {
4495 struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
4496 int i;
4497
4498 if (vrend_state.use_gles) {
4499 if (!state->depth_clip) {
4500 report_gles_warn(ctx, GLES_WARN_DEPTH_CLIP, 0);
4501 }
4502 } else if (state->depth_clip) {
4503 glDisable(GL_DEPTH_CLAMP);
4504 } else {
4505 glEnable(GL_DEPTH_CLAMP);
4506 }
4507
4508 if (vrend_state.use_gles) {
4509 /* guest send invalid glPointSize parameter */
4510 if (!state->point_size_per_vertex &&
4511 state->point_size != 1.0f &&
4512 state->point_size != 0.0f) {
4513 report_gles_warn(ctx, GLES_WARN_POINT_SIZE, 0);
4514 }
4515 } else if (state->point_size_per_vertex) {
4516 glEnable(GL_PROGRAM_POINT_SIZE);
4517 } else {
4518 glDisable(GL_PROGRAM_POINT_SIZE);
4519 if (state->point_size) {
4520 glPointSize(state->point_size);
4521 }
4522 }
4523
4524 /* line_width < 0 is invalid, the guest sometimes forgot to set it. */
4525 glLineWidth(state->line_width <= 0 ? 1.0f : state->line_width);
4526
4527 if (state->rasterizer_discard != ctx->sub->hw_rs_state.rasterizer_discard) {
4528 ctx->sub->hw_rs_state.rasterizer_discard = state->rasterizer_discard;
4529 if (state->rasterizer_discard)
4530 glEnable(GL_RASTERIZER_DISCARD);
4531 else
4532 glDisable(GL_RASTERIZER_DISCARD);
4533 }
4534
4535 if (vrend_state.use_gles == true) {
4536 if (translate_fill(state->fill_front) != GL_FILL) {
4537 report_gles_warn(ctx, GLES_WARN_POLYGON_MODE, 0);
4538 }
4539 if (translate_fill(state->fill_back) != GL_FILL) {
4540 report_gles_warn(ctx, GLES_WARN_POLYGON_MODE, 0);
4541 }
4542 } else if (vrend_state.use_core_profile == false) {
4543 glPolygonMode(GL_FRONT, translate_fill(state->fill_front));
4544 glPolygonMode(GL_BACK, translate_fill(state->fill_back));
4545 } else if (state->fill_front == state->fill_back) {
4546 glPolygonMode(GL_FRONT_AND_BACK, translate_fill(state->fill_front));
4547 } else
4548 report_core_warn(ctx, CORE_PROFILE_WARN_POLYGON_MODE, 0);
4549
4550 if (state->offset_tri) {
4551 glEnable(GL_POLYGON_OFFSET_FILL);
4552 } else {
4553 glDisable(GL_POLYGON_OFFSET_FILL);
4554 }
4555
4556 if (vrend_state.use_gles) {
4557 if (state->offset_line) {
4558 report_gles_warn(ctx, GLES_WARN_OFFSET_LINE, 0);
4559 }
4560 } else if (state->offset_line) {
4561 glEnable(GL_POLYGON_OFFSET_LINE);
4562 } else {
4563 glDisable(GL_POLYGON_OFFSET_LINE);
4564 }
4565
4566 if (vrend_state.use_gles) {
4567 if (state->offset_point) {
4568 report_gles_warn(ctx, GLES_WARN_OFFSET_POINT, 0);
4569 }
4570 } else if (state->offset_point) {
4571 glEnable(GL_POLYGON_OFFSET_POINT);
4572 } else {
4573 glDisable(GL_POLYGON_OFFSET_POINT);
4574 }
4575
4576
4577 if (state->flatshade != ctx->sub->hw_rs_state.flatshade) {
4578 ctx->sub->hw_rs_state.flatshade = state->flatshade;
4579 if (vrend_state.use_core_profile == false) {
4580 if (state->flatshade) {
4581 glShadeModel(GL_FLAT);
4582 } else {
4583 glShadeModel(GL_SMOOTH);
4584 }
4585 }
4586 }
4587
4588 if (state->flatshade_first != ctx->sub->hw_rs_state.flatshade_first) {
4589 ctx->sub->hw_rs_state.flatshade_first = state->flatshade_first;
4590 if (vrend_state.use_gles) {
4591 if (state->flatshade_first) {
4592 report_gles_warn(ctx, GLES_WARN_FLATSHADE_FIRST, 0);
4593 }
4594 } else if (state->flatshade_first) {
4595 glProvokingVertexEXT(GL_FIRST_VERTEX_CONVENTION_EXT);
4596 } else {
4597 glProvokingVertexEXT(GL_LAST_VERTEX_CONVENTION_EXT);
4598 }
4599 }
4600
4601 if (!vrend_state.use_gles && has_feature(feat_polygon_offset_clamp))
4602 glPolygonOffsetClampEXT(state->offset_scale, state->offset_units, state->offset_clamp);
4603 else
4604 glPolygonOffset(state->offset_scale, state->offset_units);
4605
4606 if (vrend_state.use_core_profile == false) {
4607 if (state->poly_stipple_enable)
4608 glEnable(GL_POLYGON_STIPPLE);
4609 else
4610 glDisable(GL_POLYGON_STIPPLE);
4611 } else if (state->poly_stipple_enable) {
4612 if (!ctx->pstip_inited)
4613 vrend_init_pstipple_texture(ctx);
4614 }
4615
4616 if (state->point_quad_rasterization) {
4617 if (vrend_state.use_core_profile == false &&
4618 vrend_state.use_gles == false) {
4619 glEnable(GL_POINT_SPRITE);
4620 }
4621
4622 if (vrend_state.use_gles == false) {
4623 glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, state->sprite_coord_mode ? GL_UPPER_LEFT : GL_LOWER_LEFT);
4624 }
4625 } else {
4626 if (vrend_state.use_core_profile == false &&
4627 vrend_state.use_gles == false) {
4628 glDisable(GL_POINT_SPRITE);
4629 }
4630 }
4631
4632 if (state->cull_face != PIPE_FACE_NONE) {
4633 switch (state->cull_face) {
4634 case PIPE_FACE_FRONT:
4635 glCullFace(GL_FRONT);
4636 break;
4637 case PIPE_FACE_BACK:
4638 glCullFace(GL_BACK);
4639 break;
4640 case PIPE_FACE_FRONT_AND_BACK:
4641 glCullFace(GL_FRONT_AND_BACK);
4642 break;
4643 default:
4644 fprintf(stderr, "unhandled cull-face: %x\n", state->cull_face);
4645 }
4646 glEnable(GL_CULL_FACE);
4647 } else
4648 glDisable(GL_CULL_FACE);
4649
4650 /* two sided lighting handled in shader for core profile */
4651 if (vrend_state.use_core_profile == false) {
4652 if (state->light_twoside)
4653 glEnable(GL_VERTEX_PROGRAM_TWO_SIDE);
4654 else
4655 glDisable(GL_VERTEX_PROGRAM_TWO_SIDE);
4656 }
4657
4658 if (state->clip_plane_enable != ctx->sub->hw_rs_state.clip_plane_enable) {
4659 ctx->sub->hw_rs_state.clip_plane_enable = state->clip_plane_enable;
4660 for (i = 0; i < 8; i++) {
4661 if (state->clip_plane_enable & (1 << i))
4662 glEnable(GL_CLIP_PLANE0 + i);
4663 else
4664 glDisable(GL_CLIP_PLANE0 + i);
4665 }
4666 }
4667 if (vrend_state.use_core_profile == false) {
4668 glLineStipple(state->line_stipple_factor, state->line_stipple_pattern);
4669 if (state->line_stipple_enable)
4670 glEnable(GL_LINE_STIPPLE);
4671 else
4672 glDisable(GL_LINE_STIPPLE);
4673 } else if (state->line_stipple_enable) {
4674 if (vrend_state.use_gles)
4675 report_core_warn(ctx, GLES_WARN_STIPPLE, 0);
4676 else
4677 report_core_warn(ctx, CORE_PROFILE_WARN_STIPPLE, 0);
4678 }
4679
4680
4681 if (vrend_state.use_gles) {
4682 if (state->line_smooth) {
4683 report_gles_warn(ctx, GLES_WARN_LINE_SMOOTH, 0);
4684 }
4685 } else if (state->line_smooth) {
4686 glEnable(GL_LINE_SMOOTH);
4687 } else {
4688 glDisable(GL_LINE_SMOOTH);
4689 }
4690
4691 if (vrend_state.use_gles) {
4692 if (state->poly_smooth) {
4693 report_gles_warn(ctx, GLES_WARN_POLY_SMOOTH, 0);
4694 }
4695 } else if (state->poly_smooth) {
4696 glEnable(GL_POLYGON_SMOOTH);
4697 } else {
4698 glDisable(GL_POLYGON_SMOOTH);
4699 }
4700
4701 if (vrend_state.use_core_profile == false) {
4702 if (state->clamp_vertex_color)
4703 glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_TRUE);
4704 else
4705 glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_FALSE);
4706
4707 if (state->clamp_fragment_color)
4708 glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_TRUE);
4709 else
4710 glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_FALSE);
4711 } else {
4712 if (state->clamp_vertex_color || state->clamp_fragment_color)
4713 report_core_warn(ctx, CORE_PROFILE_WARN_CLAMP, 0);
4714 }
4715
4716 if (has_feature(feat_multisample)) {
4717 if (has_feature(feat_sample_mask)) {
4718 if (state->multisample)
4719 glEnable(GL_SAMPLE_MASK);
4720 else
4721 glDisable(GL_SAMPLE_MASK);
4722 }
4723
4724 /* GLES doesn't have GL_MULTISAMPLE */
4725 if (!vrend_state.use_gles) {
4726 if (state->multisample)
4727 glEnable(GL_MULTISAMPLE);
4728 else
4729 glDisable(GL_MULTISAMPLE);
4730 }
4731
4732 if (has_feature(feat_sample_shading)) {
4733 if (state->force_persample_interp)
4734 glEnable(GL_SAMPLE_SHADING);
4735 else
4736 glDisable(GL_SAMPLE_SHADING);
4737 }
4738 }
4739 }
4740
vrend_object_bind_rasterizer(struct vrend_context * ctx,uint32_t handle)4741 void vrend_object_bind_rasterizer(struct vrend_context *ctx,
4742 uint32_t handle)
4743 {
4744 struct pipe_rasterizer_state *state;
4745
4746 if (handle == 0) {
4747 memset(&ctx->sub->rs_state, 0, sizeof(ctx->sub->rs_state));
4748 return;
4749 }
4750
4751 state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_RASTERIZER);
4752
4753 if (!state) {
4754 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
4755 return;
4756 }
4757
4758 ctx->sub->rs_state = *state;
4759 ctx->sub->scissor_state_dirty = (1 << 0);
4760 ctx->sub->shader_dirty = true;
4761 vrend_hw_emit_rs(ctx);
4762 }
4763
vrend_bind_sampler_states(struct vrend_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_states,uint32_t * handles)4764 void vrend_bind_sampler_states(struct vrend_context *ctx,
4765 uint32_t shader_type,
4766 uint32_t start_slot,
4767 uint32_t num_states,
4768 uint32_t *handles)
4769 {
4770 uint32_t i;
4771 struct vrend_sampler_state *state;
4772
4773 if (shader_type >= PIPE_SHADER_TYPES) {
4774 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, shader_type);
4775 return;
4776 }
4777
4778 if (num_states > PIPE_MAX_SAMPLERS ||
4779 start_slot > (PIPE_MAX_SAMPLERS - num_states)) {
4780 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, num_states);
4781 return;
4782 }
4783
4784 ctx->sub->num_sampler_states[shader_type] = num_states;
4785
4786 for (i = 0; i < num_states; i++) {
4787 if (handles[i] == 0)
4788 state = NULL;
4789 else
4790 state = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_SAMPLER_STATE);
4791
4792 ctx->sub->sampler_state[shader_type][i + start_slot] = state;
4793 }
4794 ctx->sub->sampler_state_dirty = true;
4795 }
4796
vrend_apply_sampler_state(struct vrend_context * ctx,struct vrend_resource * res,uint32_t shader_type,int id,int sampler_id,uint32_t srgb_decode)4797 static void vrend_apply_sampler_state(struct vrend_context *ctx,
4798 struct vrend_resource *res,
4799 uint32_t shader_type,
4800 int id,
4801 int sampler_id,
4802 uint32_t srgb_decode)
4803 {
4804 struct vrend_texture *tex = (struct vrend_texture *)res;
4805 struct vrend_sampler_state *vstate = ctx->sub->sampler_state[shader_type][id];
4806 struct pipe_sampler_state *state = &vstate->base;
4807 bool set_all = false;
4808 GLenum target = tex->base.target;
4809
4810 if (!state) {
4811 fprintf(stderr, "cannot find sampler state for %d %d\n", shader_type, id);
4812 return;
4813 }
4814 if (res->base.nr_samples > 1) {
4815 tex->state = *state;
4816 return;
4817 }
4818
4819 if (tex->base.is_buffer) {
4820 tex->state = *state;
4821 return;
4822 }
4823
4824 /*
4825 * If we emulate alpha format with red, we need to tell
4826 * the sampler to use the red channel and not the alpha one
4827 * by swizzling the GL_TEXTURE_BORDER_COLOR parameter.
4828 */
4829 bool is_emulated_alpha = vrend_format_is_emulated_alpha(res->base.format);
4830 if (has_feature(feat_samplers)) {
4831 if (is_emulated_alpha) {
4832 union pipe_color_union border_color;
4833 border_color = state->border_color;
4834 border_color.ui[0] = border_color.ui[3];
4835 border_color.ui[3] = 0;
4836 glSamplerParameterIuiv(vstate->id, GL_TEXTURE_BORDER_COLOR, border_color.ui);
4837 }
4838 glBindSampler(sampler_id, vstate->id);
4839 if (has_feature(feat_texture_srgb_decode))
4840 glSamplerParameteri(vstate->id, GL_TEXTURE_SRGB_DECODE_EXT,
4841 srgb_decode);
4842 return;
4843 }
4844
4845 if (tex->state.max_lod == -1)
4846 set_all = true;
4847
4848 if (tex->state.wrap_s != state->wrap_s || set_all)
4849 glTexParameteri(target, GL_TEXTURE_WRAP_S, convert_wrap(state->wrap_s));
4850 if (tex->state.wrap_t != state->wrap_t || set_all)
4851 glTexParameteri(target, GL_TEXTURE_WRAP_T, convert_wrap(state->wrap_t));
4852 if (tex->state.wrap_r != state->wrap_r || set_all)
4853 glTexParameteri(target, GL_TEXTURE_WRAP_R, convert_wrap(state->wrap_r));
4854 if (tex->state.min_img_filter != state->min_img_filter ||
4855 tex->state.min_mip_filter != state->min_mip_filter || set_all)
4856 glTexParameterf(target, GL_TEXTURE_MIN_FILTER, convert_min_filter(state->min_img_filter, state->min_mip_filter));
4857 if (tex->state.mag_img_filter != state->mag_img_filter || set_all)
4858 glTexParameterf(target, GL_TEXTURE_MAG_FILTER, convert_mag_filter(state->mag_img_filter));
4859 if (res->target != GL_TEXTURE_RECTANGLE) {
4860 if (tex->state.min_lod != state->min_lod || set_all)
4861 glTexParameterf(target, GL_TEXTURE_MIN_LOD, state->min_lod);
4862 if (tex->state.max_lod != state->max_lod || set_all)
4863 glTexParameterf(target, GL_TEXTURE_MAX_LOD, state->max_lod);
4864 if (tex->state.lod_bias != state->lod_bias || set_all) {
4865 if (vrend_state.use_gles) {
4866 if (state->lod_bias) {
4867 report_gles_warn(ctx, GLES_WARN_LOD_BIAS, 0);
4868 }
4869 } else {
4870 glTexParameterf(target, GL_TEXTURE_LOD_BIAS, state->lod_bias);
4871 }
4872 }
4873 }
4874
4875 if (tex->state.compare_mode != state->compare_mode || set_all)
4876 glTexParameteri(target, GL_TEXTURE_COMPARE_MODE, state->compare_mode ? GL_COMPARE_R_TO_TEXTURE : GL_NONE);
4877 if (tex->state.compare_func != state->compare_func || set_all)
4878 glTexParameteri(target, GL_TEXTURE_COMPARE_FUNC, GL_NEVER + state->compare_func);
4879
4880 /*
4881 * Oh this is a fun one. On GLES 2.0 all cubemap MUST NOT be seamless.
4882 * But on GLES 3.0 all cubemaps MUST be seamless. Either way there is no
4883 * way to toggle between the behaviour when running on GLES. And adding
4884 * warnings will spew the logs quite bad. Ignore and hope for the best.
4885 */
4886 if (!vrend_state.use_gles) {
4887 if (state->seamless_cube_map) {
4888 glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
4889 } else {
4890 glDisable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
4891 }
4892 }
4893
4894 if (memcmp(&tex->state.border_color, &state->border_color, 16) || set_all ||
4895 is_emulated_alpha) {
4896 if (is_emulated_alpha) {
4897 union pipe_color_union border_color;
4898 border_color = state->border_color;
4899 border_color.ui[0] = border_color.ui[3];
4900 border_color.ui[3] = 0;
4901 glTexParameterIuiv(target, GL_TEXTURE_BORDER_COLOR, border_color.ui);
4902 } else
4903 glTexParameterIuiv(target, GL_TEXTURE_BORDER_COLOR, state->border_color.ui);
4904 }
4905 tex->state = *state;
4906 }
4907
tgsitargettogltarget(const enum pipe_texture_target target,int nr_samples)4908 static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples)
4909 {
4910 switch(target) {
4911 case PIPE_TEXTURE_1D:
4912 return GL_TEXTURE_1D;
4913 case PIPE_TEXTURE_2D:
4914 return (nr_samples > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
4915 case PIPE_TEXTURE_3D:
4916 return GL_TEXTURE_3D;
4917 case PIPE_TEXTURE_RECT:
4918 return GL_TEXTURE_RECTANGLE_NV;
4919 case PIPE_TEXTURE_CUBE:
4920 return GL_TEXTURE_CUBE_MAP;
4921
4922 case PIPE_TEXTURE_1D_ARRAY:
4923 return GL_TEXTURE_1D_ARRAY;
4924 case PIPE_TEXTURE_2D_ARRAY:
4925 return (nr_samples > 1) ? GL_TEXTURE_2D_MULTISAMPLE_ARRAY : GL_TEXTURE_2D_ARRAY;
4926 case PIPE_TEXTURE_CUBE_ARRAY:
4927 return GL_TEXTURE_CUBE_MAP_ARRAY;
4928 case PIPE_BUFFER:
4929 default:
4930 return PIPE_BUFFER;
4931 }
4932 return PIPE_BUFFER;
4933 }
4934
vrend_free_sync_thread(void)4935 static void vrend_free_sync_thread(void)
4936 {
4937 if (!vrend_state.sync_thread)
4938 return;
4939
4940 pipe_mutex_lock(vrend_state.fence_mutex);
4941 vrend_state.stop_sync_thread = true;
4942 pipe_condvar_signal(vrend_state.fence_cond);
4943 pipe_mutex_unlock(vrend_state.fence_mutex);
4944
4945 pipe_thread_wait(vrend_state.sync_thread);
4946 vrend_state.sync_thread = 0;
4947
4948 pipe_condvar_destroy(vrend_state.fence_cond);
4949 pipe_mutex_destroy(vrend_state.fence_mutex);
4950 }
4951
4952 #ifdef HAVE_EVENTFD
4953 static ssize_t
write_full(int fd,const void * ptr,size_t count)4954 write_full(int fd, const void *ptr, size_t count)
4955 {
4956 const char *buf = ptr;
4957 ssize_t ret = 0;
4958 ssize_t total = 0;
4959
4960 while (count) {
4961 ret = write(fd, buf, count);
4962 if (ret < 0) {
4963 if (errno == EINTR)
4964 continue;
4965 break;
4966 }
4967 count -= ret;
4968 buf += ret;
4969 total += ret;
4970 }
4971 return total;
4972 }
4973
wait_sync(struct vrend_fence * fence)4974 static void wait_sync(struct vrend_fence *fence)
4975 {
4976 GLenum glret;
4977 ssize_t n;
4978 uint64_t value = 1;
4979
4980 do {
4981 glret = glClientWaitSync(fence->syncobj, 0, 1000000000);
4982
4983 switch (glret) {
4984 case GL_WAIT_FAILED:
4985 fprintf(stderr, "wait sync failed: illegal fence object %p\n", fence->syncobj);
4986 break;
4987 case GL_ALREADY_SIGNALED:
4988 case GL_CONDITION_SATISFIED:
4989 break;
4990 default:
4991 break;
4992 }
4993 } while (glret == GL_TIMEOUT_EXPIRED);
4994
4995 pipe_mutex_lock(vrend_state.fence_mutex);
4996 list_addtail(&fence->fences, &vrend_state.fence_list);
4997 pipe_mutex_unlock(vrend_state.fence_mutex);
4998
4999 n = write_full(vrend_state.eventfd, &value, sizeof(value));
5000 if (n != sizeof(value)) {
5001 perror("failed to write to eventfd\n");
5002 }
5003 }
5004
thread_sync(UNUSED void * arg)5005 static int thread_sync(UNUSED void *arg)
5006 {
5007 virgl_gl_context gl_context = vrend_state.sync_context;
5008 struct vrend_fence *fence, *stor;
5009
5010
5011 pipe_mutex_lock(vrend_state.fence_mutex);
5012 vrend_clicbs->make_current(0, gl_context);
5013
5014 while (!vrend_state.stop_sync_thread) {
5015 if (LIST_IS_EMPTY(&vrend_state.fence_wait_list) &&
5016 pipe_condvar_wait(vrend_state.fence_cond, vrend_state.fence_mutex) != 0) {
5017 fprintf(stderr, "error while waiting on condition\n");
5018 break;
5019 }
5020
5021 LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) {
5022 if (vrend_state.stop_sync_thread)
5023 break;
5024 list_del(&fence->fences);
5025 pipe_mutex_unlock(vrend_state.fence_mutex);
5026 wait_sync(fence);
5027 pipe_mutex_lock(vrend_state.fence_mutex);
5028 }
5029 }
5030
5031 vrend_clicbs->make_current(0, 0);
5032 vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
5033 pipe_mutex_unlock(vrend_state.fence_mutex);
5034 return 0;
5035 }
5036
vrend_renderer_use_threaded_sync(void)5037 static void vrend_renderer_use_threaded_sync(void)
5038 {
5039 struct virgl_gl_ctx_param ctx_params;
5040
5041 if (getenv("VIRGL_DISABLE_MT"))
5042 return;
5043
5044 ctx_params.shared = true;
5045 ctx_params.major_ver = vrend_state.gl_major_ver;
5046 ctx_params.minor_ver = vrend_state.gl_minor_ver;
5047
5048 vrend_state.stop_sync_thread = false;
5049
5050 vrend_state.sync_context = vrend_clicbs->create_gl_context(0, &ctx_params);
5051 if (vrend_state.sync_context == NULL) {
5052 fprintf(stderr, "failed to create sync opengl context\n");
5053 return;
5054 }
5055
5056 vrend_state.eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
5057 if (vrend_state.eventfd == -1) {
5058 fprintf(stderr, "Failed to create eventfd\n");
5059 vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
5060 return;
5061 }
5062
5063 pipe_condvar_init(vrend_state.fence_cond);
5064 pipe_mutex_init(vrend_state.fence_mutex);
5065
5066 vrend_state.sync_thread = pipe_thread_create(thread_sync, NULL);
5067 if (!vrend_state.sync_thread) {
5068 close(vrend_state.eventfd);
5069 vrend_state.eventfd = -1;
5070 vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
5071 pipe_condvar_destroy(vrend_state.fence_cond);
5072 pipe_mutex_destroy(vrend_state.fence_mutex);
5073 }
5074 }
5075 #else
vrend_renderer_use_threaded_sync(void)5076 static void vrend_renderer_use_threaded_sync(void)
5077 {
5078 }
5079 #endif
5080
vrend_debug_cb(UNUSED GLenum source,GLenum type,UNUSED GLuint id,UNUSED GLenum severity,UNUSED GLsizei length,UNUSED const GLchar * message,UNUSED const void * userParam)5081 static void vrend_debug_cb(UNUSED GLenum source, GLenum type, UNUSED GLuint id,
5082 UNUSED GLenum severity, UNUSED GLsizei length,
5083 UNUSED const GLchar* message, UNUSED const void* userParam)
5084 {
5085 if (type != GL_DEBUG_TYPE_ERROR) {
5086 return;
5087 }
5088
5089 fprintf(stderr, "ERROR: %s\n", message);
5090 }
5091
vrend_renderer_init(struct vrend_if_cbs * cbs,uint32_t flags)5092 int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags)
5093 {
5094 bool gles;
5095 int gl_ver;
5096 virgl_gl_context gl_context;
5097 struct virgl_gl_ctx_param ctx_params;
5098
5099 if (!vrend_state.inited) {
5100 vrend_state.inited = true;
5101 vrend_object_init_resource_table();
5102 vrend_clicbs = cbs;
5103 }
5104
5105 ctx_params.shared = false;
5106 for (uint32_t i = 0; i < ARRAY_SIZE(gl_versions); i++) {
5107 ctx_params.major_ver = gl_versions[i].major;
5108 ctx_params.minor_ver = gl_versions[i].minor;
5109
5110 gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
5111 if (gl_context)
5112 break;
5113 }
5114
5115 vrend_clicbs->make_current(0, gl_context);
5116 gl_ver = epoxy_gl_version();
5117
5118 /* enable error output as early as possible */
5119 if (vrend_use_debug_cb && epoxy_has_gl_extension("GL_KHR_debug")) {
5120 glDebugMessageCallback(vrend_debug_cb, NULL);
5121 glEnable(GL_DEBUG_OUTPUT);
5122 glDisable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
5123 set_feature(feat_debug_cb);
5124 }
5125
5126 /* make sure you have the latest version of libepoxy */
5127 gles = epoxy_is_desktop_gl() == 0;
5128
5129 vrend_state.gl_major_ver = gl_ver / 10;
5130 vrend_state.gl_minor_ver = gl_ver % 10;
5131
5132 if (gles) {
5133 fprintf(stderr, "gl_version %d - es profile enabled\n", gl_ver);
5134 vrend_state.use_gles = true;
5135 /* for now, makes the rest of the code use the most GLES 3.x like path */
5136 vrend_state.use_core_profile = 1;
5137 } else if (gl_ver > 30 && !epoxy_has_gl_extension("GL_ARB_compatibility")) {
5138 fprintf(stderr, "gl_version %d - core profile enabled\n", gl_ver);
5139 vrend_state.use_core_profile = 1;
5140 } else {
5141 fprintf(stderr, "gl_version %d - compat profile\n", gl_ver);
5142 }
5143
5144 init_features(gles ? 0 : gl_ver,
5145 gles ? gl_ver : 0);
5146
5147 glGetIntegerv(GL_MAX_DRAW_BUFFERS, (GLint *) &vrend_state.max_draw_buffers);
5148
5149 if (!has_feature(feat_arb_robustness) &&
5150 !has_feature(feat_gles_khr_robustness)) {
5151 fprintf(stderr,"WARNING: running without ARB/KHR robustness in place may crash\n");
5152 }
5153
5154 /* callbacks for when we are cleaning up the object table */
5155 vrend_resource_set_destroy_callback(vrend_destroy_resource_object);
5156 vrend_object_set_destroy_callback(VIRGL_OBJECT_QUERY, vrend_destroy_query_object);
5157 vrend_object_set_destroy_callback(VIRGL_OBJECT_SURFACE, vrend_destroy_surface_object);
5158 vrend_object_set_destroy_callback(VIRGL_OBJECT_SHADER, vrend_destroy_shader_object);
5159 vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_VIEW, vrend_destroy_sampler_view_object);
5160 vrend_object_set_destroy_callback(VIRGL_OBJECT_STREAMOUT_TARGET, vrend_destroy_so_target_object);
5161 vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_STATE, vrend_destroy_sampler_state_object);
5162 vrend_object_set_destroy_callback(VIRGL_OBJECT_VERTEX_ELEMENTS, vrend_destroy_vertex_elements_object);
5163
5164 /* disable for format testing, spews a lot of errors */
5165 if (has_feature(feat_debug_cb)) {
5166 glDisable(GL_DEBUG_OUTPUT);
5167 }
5168
5169 vrend_build_format_list_common();
5170
5171 if (vrend_state.use_gles) {
5172 vrend_build_format_list_gles();
5173 } else {
5174 vrend_build_format_list_gl();
5175 }
5176
5177 vrend_check_texture_storage(tex_conv_table);
5178
5179 /* disable for format testing */
5180 if (has_feature(feat_debug_cb)) {
5181 glDisable(GL_DEBUG_OUTPUT);
5182 }
5183
5184 vrend_clicbs->destroy_gl_context(gl_context);
5185 list_inithead(&vrend_state.fence_list);
5186 list_inithead(&vrend_state.fence_wait_list);
5187 list_inithead(&vrend_state.waiting_query_list);
5188 list_inithead(&vrend_state.active_ctx_list);
5189 /* create 0 context */
5190 vrend_renderer_context_create_internal(0, 0, NULL);
5191
5192 vrend_state.eventfd = -1;
5193 if (flags & VREND_USE_THREAD_SYNC) {
5194 vrend_renderer_use_threaded_sync();
5195 }
5196
5197 return 0;
5198 }
5199
5200 void
vrend_renderer_fini(void)5201 vrend_renderer_fini(void)
5202 {
5203 if (!vrend_state.inited)
5204 return;
5205
5206 vrend_free_sync_thread();
5207 if (vrend_state.eventfd != -1) {
5208 close(vrend_state.eventfd);
5209 vrend_state.eventfd = -1;
5210 }
5211
5212 vrend_decode_reset(false);
5213 vrend_object_fini_resource_table();
5214 vrend_decode_reset(true);
5215
5216 vrend_state.current_ctx = NULL;
5217 vrend_state.current_hw_ctx = NULL;
5218 vrend_state.inited = false;
5219 }
5220
vrend_destroy_sub_context(struct vrend_sub_context * sub)5221 static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
5222 {
5223 int i, j;
5224 struct vrend_streamout_object *obj, *tmp;
5225
5226 if (sub->fb_id)
5227 glDeleteFramebuffers(1, &sub->fb_id);
5228
5229 if (sub->blit_fb_ids[0])
5230 glDeleteFramebuffers(2, sub->blit_fb_ids);
5231
5232 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
5233
5234 if (!has_feature(feat_gles31_vertex_attrib_binding)) {
5235 while (sub->enabled_attribs_bitmask) {
5236 i = u_bit_scan(&sub->enabled_attribs_bitmask);
5237
5238 glDisableVertexAttribArray(i);
5239 }
5240 glDeleteVertexArrays(1, &sub->vaoid);
5241 }
5242
5243 glBindVertexArray(0);
5244
5245 if (sub->current_so)
5246 glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
5247
5248 LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub->streamout_list, head) {
5249 vrend_destroy_streamout_object(obj);
5250 }
5251
5252 vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_VERTEX], NULL);
5253 vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_FRAGMENT], NULL);
5254 vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_GEOMETRY], NULL);
5255 vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_TESS_CTRL], NULL);
5256 vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_TESS_EVAL], NULL);
5257 vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_COMPUTE], NULL);
5258
5259 vrend_free_programs(sub);
5260 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
5261 free(sub->consts[i].consts);
5262 sub->consts[i].consts = NULL;
5263
5264 for (j = 0; j < PIPE_MAX_SHADER_SAMPLER_VIEWS; j++) {
5265 vrend_sampler_view_reference(&sub->views[i].views[j], NULL);
5266 }
5267 }
5268
5269 if (sub->zsurf)
5270 vrend_surface_reference(&sub->zsurf, NULL);
5271
5272 for (i = 0; i < sub->nr_cbufs; i++) {
5273 if (!sub->surf[i])
5274 continue;
5275 vrend_surface_reference(&sub->surf[i], NULL);
5276 }
5277
5278 vrend_resource_reference((struct vrend_resource **)&sub->ib.buffer, NULL);
5279
5280 vrend_object_fini_ctx_table(sub->object_hash);
5281 vrend_clicbs->destroy_gl_context(sub->gl_context);
5282
5283 list_del(&sub->head);
5284 FREE(sub);
5285
5286 }
5287
vrend_destroy_context(struct vrend_context * ctx)5288 bool vrend_destroy_context(struct vrend_context *ctx)
5289 {
5290 bool switch_0 = (ctx == vrend_state.current_ctx);
5291 struct vrend_context *cur = vrend_state.current_ctx;
5292 struct vrend_sub_context *sub, *tmp;
5293 if (switch_0) {
5294 vrend_state.current_ctx = NULL;
5295 vrend_state.current_hw_ctx = NULL;
5296 }
5297
5298 if (vrend_state.use_core_profile) {
5299 if (ctx->pstip_inited)
5300 glDeleteTextures(1, &ctx->pstipple_tex_id);
5301 ctx->pstip_inited = false;
5302 }
5303 /* reset references on framebuffers */
5304 vrend_set_framebuffer_state(ctx, 0, NULL, 0);
5305
5306 vrend_set_num_sampler_views(ctx, PIPE_SHADER_VERTEX, 0, 0);
5307 vrend_set_num_sampler_views(ctx, PIPE_SHADER_FRAGMENT, 0, 0);
5308 vrend_set_num_sampler_views(ctx, PIPE_SHADER_GEOMETRY, 0, 0);
5309 vrend_set_num_sampler_views(ctx, PIPE_SHADER_TESS_CTRL, 0, 0);
5310 vrend_set_num_sampler_views(ctx, PIPE_SHADER_TESS_EVAL, 0, 0);
5311 vrend_set_num_sampler_views(ctx, PIPE_SHADER_COMPUTE, 0, 0);
5312
5313 vrend_set_streamout_targets(ctx, 0, 0, NULL);
5314 vrend_set_num_vbo(ctx, 0);
5315
5316 vrend_set_index_buffer(ctx, 0, 0, 0);
5317
5318 vrend_renderer_force_ctx_0();
5319 LIST_FOR_EACH_ENTRY_SAFE(sub, tmp, &ctx->sub_ctxs, head)
5320 vrend_destroy_sub_context(sub);
5321
5322 vrend_object_fini_ctx_table(ctx->res_hash);
5323
5324 list_del(&ctx->ctx_entry);
5325
5326 FREE(ctx);
5327
5328 if (!switch_0 && cur)
5329 vrend_hw_switch_context(cur, true);
5330
5331 return switch_0;
5332 }
5333
vrend_create_context(int id,uint32_t nlen,const char * debug_name)5334 struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *debug_name)
5335 {
5336 struct vrend_context *grctx = CALLOC_STRUCT(vrend_context);
5337
5338 if (!grctx)
5339 return NULL;
5340
5341 if (nlen && debug_name) {
5342 strncpy(grctx->debug_name, debug_name, 64);
5343 }
5344
5345 grctx->ctx_id = id;
5346
5347 list_inithead(&grctx->sub_ctxs);
5348 list_inithead(&grctx->active_nontimer_query_list);
5349
5350 grctx->res_hash = vrend_object_init_ctx_table();
5351
5352 grctx->shader_cfg.use_gles = vrend_state.use_gles;
5353 grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile;
5354 grctx->shader_cfg.use_explicit_locations = vrend_state.use_explicit_locations;
5355 grctx->shader_cfg.max_draw_buffers = vrend_state.max_draw_buffers;
5356 vrend_renderer_create_sub_ctx(grctx, 0);
5357 vrend_renderer_set_sub_ctx(grctx, 0);
5358
5359 vrender_get_glsl_version(&grctx->shader_cfg.glsl_version);
5360
5361 list_addtail(&grctx->ctx_entry, &vrend_state.active_ctx_list);
5362 return grctx;
5363 }
5364
vrend_renderer_resource_attach_iov(int res_handle,struct iovec * iov,int num_iovs)5365 int vrend_renderer_resource_attach_iov(int res_handle, struct iovec *iov,
5366 int num_iovs)
5367 {
5368 struct vrend_resource *res;
5369
5370 res = vrend_resource_lookup(res_handle, 0);
5371 if (!res)
5372 return EINVAL;
5373
5374 if (res->iov)
5375 return 0;
5376
5377 /* work out size and max resource size */
5378 res->iov = iov;
5379 res->num_iovs = num_iovs;
5380 return 0;
5381 }
5382
vrend_renderer_resource_detach_iov(int res_handle,struct iovec ** iov_p,int * num_iovs_p)5383 void vrend_renderer_resource_detach_iov(int res_handle,
5384 struct iovec **iov_p,
5385 int *num_iovs_p)
5386 {
5387 struct vrend_resource *res;
5388 res = vrend_resource_lookup(res_handle, 0);
5389 if (!res) {
5390 return;
5391 }
5392 if (iov_p)
5393 *iov_p = res->iov;
5394 if (num_iovs_p)
5395 *num_iovs_p = res->num_iovs;
5396
5397 res->iov = NULL;
5398 res->num_iovs = 0;
5399 }
5400
check_resource_valid(struct vrend_renderer_resource_create_args * args)5401 static int check_resource_valid(struct vrend_renderer_resource_create_args *args)
5402 {
5403 /* do not accept handle 0 */
5404 if (args->handle == 0)
5405 return -1;
5406
5407 /* limit the target */
5408 if (args->target >= PIPE_MAX_TEXTURE_TYPES)
5409 return -1;
5410
5411 if (args->format >= VIRGL_FORMAT_MAX)
5412 return -1;
5413
5414 /* only texture 2d and 2d array can have multiple samples */
5415 if (args->nr_samples > 1) {
5416 if (!has_feature(feat_texture_multisample))
5417 return -1;
5418
5419 if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_2D_ARRAY)
5420 return -1;
5421 /* multisample can't have miplevels */
5422 if (args->last_level > 0)
5423 return -1;
5424 }
5425
5426 if (args->last_level > 0) {
5427 /* buffer and rect textures can't have mipmaps */
5428 if (args->target == PIPE_BUFFER || args->target == PIPE_TEXTURE_RECT)
5429 return -1;
5430 if (args->last_level > (floor(log2(MAX2(args->width, args->height))) + 1))
5431 return -1;
5432 }
5433 if (args->flags != 0 && args->flags != VIRGL_RESOURCE_Y_0_TOP)
5434 return -1;
5435
5436 if (args->flags & VIRGL_RESOURCE_Y_0_TOP)
5437 if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_RECT)
5438 return -1;
5439
5440 /* array size for array textures only */
5441 if (args->target == PIPE_TEXTURE_CUBE) {
5442 if (args->array_size != 6)
5443 return -1;
5444 } else if (args->target == PIPE_TEXTURE_CUBE_ARRAY) {
5445 if (!has_feature(feat_cube_map_array))
5446 return -1;
5447 if (args->array_size % 6)
5448 return -1;
5449 } else if (args->array_size > 1) {
5450 if (args->target != PIPE_TEXTURE_2D_ARRAY &&
5451 args->target != PIPE_TEXTURE_1D_ARRAY)
5452 return -1;
5453
5454 if (!has_feature(feat_texture_array))
5455 return -1;
5456 }
5457
5458 if (args->bind == 0 ||
5459 args->bind == VIRGL_BIND_CUSTOM ||
5460 args->bind == VIRGL_BIND_INDEX_BUFFER ||
5461 args->bind == VIRGL_BIND_STREAM_OUTPUT ||
5462 args->bind == VIRGL_BIND_VERTEX_BUFFER ||
5463 args->bind == VIRGL_BIND_CONSTANT_BUFFER ||
5464 args->bind == VIRGL_BIND_SHADER_BUFFER) {
5465 if (args->target != PIPE_BUFFER)
5466 return -1;
5467 if (args->height != 1 || args->depth != 1)
5468 return -1;
5469 } else {
5470 if (!((args->bind & VIRGL_BIND_SAMPLER_VIEW) ||
5471 (args->bind & VIRGL_BIND_DEPTH_STENCIL) ||
5472 (args->bind & VIRGL_BIND_RENDER_TARGET) ||
5473 (args->bind & VIRGL_BIND_CURSOR)))
5474 return -1;
5475
5476 if (args->target == PIPE_TEXTURE_2D ||
5477 args->target == PIPE_TEXTURE_RECT ||
5478 args->target == PIPE_TEXTURE_CUBE ||
5479 args->target == PIPE_TEXTURE_2D_ARRAY ||
5480 args->target == PIPE_TEXTURE_CUBE_ARRAY) {
5481 if (args->depth != 1)
5482 return -1;
5483 }
5484 if (args->target == PIPE_TEXTURE_1D ||
5485 args->target == PIPE_TEXTURE_1D_ARRAY) {
5486 if (args->height != 1 || args->depth != 1)
5487 return -1;
5488 }
5489 }
5490 return 0;
5491 }
5492
vrend_create_buffer(struct vrend_resource * gr,uint32_t width)5493 static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width)
5494 {
5495 glGenBuffersARB(1, &gr->id);
5496 glBindBufferARB(gr->target, gr->id);
5497 glBufferData(gr->target, width, NULL, GL_STREAM_DRAW);
5498 gr->is_buffer = true;
5499 }
5500
5501 static inline void
vrend_renderer_resource_copy_args(struct vrend_renderer_resource_create_args * args,struct vrend_resource * gr)5502 vrend_renderer_resource_copy_args(struct vrend_renderer_resource_create_args *args,
5503 struct vrend_resource *gr)
5504 {
5505 assert(gr);
5506 assert(args);
5507
5508 gr->handle = args->handle;
5509 gr->base.width0 = args->width;
5510 gr->base.height0 = args->height;
5511 gr->base.depth0 = args->depth;
5512 gr->base.format = args->format;
5513 gr->base.target = args->target;
5514 gr->base.last_level = args->last_level;
5515 gr->base.nr_samples = args->nr_samples;
5516 gr->base.array_size = args->array_size;
5517 }
5518
vrend_renderer_resource_allocate_texture(struct vrend_resource * gr,void * image_oes)5519 static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
5520 void *image_oes)
5521 {
5522 uint level;
5523 GLenum internalformat, glformat, gltype;
5524 struct vrend_texture *gt = (struct vrend_texture *)gr;
5525 struct pipe_resource *pr = &gr->base;
5526 assert(pr->width0 > 0);
5527
5528 bool format_can_texture_storage = has_feature(feat_texture_storage) &&
5529 (tex_conv_table[pr->format].bindings & VIRGL_BIND_CAN_TEXTURE_STORAGE);
5530
5531 gr->target = tgsitargettogltarget(pr->target, pr->nr_samples);
5532
5533 /* ugly workaround for texture rectangle missing on GLES */
5534 if (vrend_state.use_gles && gr->target == GL_TEXTURE_RECTANGLE_NV) {
5535 /* for some guests this is the only usage of rect */
5536 if (pr->width0 != 1 || pr->height0 != 1) {
5537 report_gles_warn(NULL, GLES_WARN_TEXTURE_RECT, 0);
5538 }
5539 gr->target = GL_TEXTURE_2D;
5540 }
5541
5542 /* fallback for 1D textures */
5543 if (vrend_state.use_gles && gr->target == GL_TEXTURE_1D) {
5544 gr->target = GL_TEXTURE_2D;
5545 }
5546
5547 /* fallback for 1D array textures */
5548 if (vrend_state.use_gles && gr->target == GL_TEXTURE_1D_ARRAY) {
5549 gr->target = GL_TEXTURE_2D_ARRAY;
5550 }
5551
5552 glGenTextures(1, &gr->id);
5553 glBindTexture(gr->target, gr->id);
5554
5555 internalformat = tex_conv_table[pr->format].internalformat;
5556 glformat = tex_conv_table[pr->format].glformat;
5557 gltype = tex_conv_table[pr->format].gltype;
5558
5559 if (internalformat == 0) {
5560 fprintf(stderr,"unknown format is %d\n", pr->format);
5561 FREE(gt);
5562 return EINVAL;
5563 }
5564
5565 if (image_oes) {
5566 if (epoxy_has_gl_extension("GL_OES_EGL_image_external")) {
5567 glEGLImageTargetTexture2DOES(gr->target, (GLeglImageOES) image_oes);
5568 } else {
5569 fprintf(stderr, "missing GL_OES_EGL_image_external extension\n");
5570 FREE(gr);
5571 return EINVAL;
5572 }
5573 } else if (pr->nr_samples > 1) {
5574 if (vrend_state.use_gles || has_feature(feat_texture_storage)) {
5575 if (gr->target == GL_TEXTURE_2D_MULTISAMPLE) {
5576 glTexStorage2DMultisample(gr->target, pr->nr_samples,
5577 internalformat, pr->width0, pr->height0,
5578 GL_TRUE);
5579 } else {
5580 glTexStorage3DMultisample(gr->target, pr->nr_samples,
5581 internalformat, pr->width0, pr->height0, pr->array_size,
5582 GL_TRUE);
5583 }
5584 } else {
5585 if (gr->target == GL_TEXTURE_2D_MULTISAMPLE) {
5586 glTexImage2DMultisample(gr->target, pr->nr_samples,
5587 internalformat, pr->width0, pr->height0,
5588 GL_TRUE);
5589 } else {
5590 glTexImage3DMultisample(gr->target, pr->nr_samples,
5591 internalformat, pr->width0, pr->height0, pr->array_size,
5592 GL_TRUE);
5593 }
5594 }
5595 } else if (gr->target == GL_TEXTURE_CUBE_MAP) {
5596 int i;
5597 if (format_can_texture_storage)
5598 glTexStorage2D(GL_TEXTURE_CUBE_MAP, pr->last_level + 1, internalformat, pr->width0, pr->height0);
5599 else {
5600 for (i = 0; i < 6; i++) {
5601 GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i;
5602 for (level = 0; level <= pr->last_level; level++) {
5603 unsigned mwidth = u_minify(pr->width0, level);
5604 unsigned mheight = u_minify(pr->height0, level);
5605
5606 glTexImage2D(ctarget, level, internalformat, mwidth, mheight, 0, glformat,
5607 gltype, NULL);
5608 }
5609 }
5610 }
5611 } else if (gr->target == GL_TEXTURE_3D ||
5612 gr->target == GL_TEXTURE_2D_ARRAY ||
5613 gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) {
5614 if (format_can_texture_storage) {
5615 unsigned depth_param = (gr->target == GL_TEXTURE_2D_ARRAY || gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) ?
5616 pr->array_size : pr->depth0;
5617 glTexStorage3D(gr->target, pr->last_level + 1, internalformat, pr->width0, pr->height0, depth_param);
5618 } else {
5619 for (level = 0; level <= pr->last_level; level++) {
5620 unsigned depth_param = (gr->target == GL_TEXTURE_2D_ARRAY || gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) ?
5621 pr->array_size : u_minify(pr->depth0, level);
5622 unsigned mwidth = u_minify(pr->width0, level);
5623 unsigned mheight = u_minify(pr->height0, level);
5624 glTexImage3D(gr->target, level, internalformat, mwidth, mheight,
5625 depth_param, 0, glformat, gltype, NULL);
5626 }
5627 }
5628 } else if (gr->target == GL_TEXTURE_1D && vrend_state.use_gles) {
5629 report_gles_missing_func(NULL, "glTexImage1D");
5630 } else if (gr->target == GL_TEXTURE_1D) {
5631 if (format_can_texture_storage) {
5632 glTexStorage1D(gr->target, pr->last_level + 1, internalformat, pr->width0);
5633 } else {
5634 for (level = 0; level <= pr->last_level; level++) {
5635 unsigned mwidth = u_minify(pr->width0, level);
5636 glTexImage1D(gr->target, level, internalformat, mwidth, 0,
5637 glformat, gltype, NULL);
5638 }
5639 }
5640 } else {
5641 if (format_can_texture_storage)
5642 glTexStorage2D(gr->target, pr->last_level + 1, internalformat, pr->width0,
5643 gr->target == GL_TEXTURE_1D_ARRAY ? pr->array_size : pr->height0);
5644 else {
5645 for (level = 0; level <= pr->last_level; level++) {
5646 unsigned mwidth = u_minify(pr->width0, level);
5647 unsigned mheight = u_minify(pr->height0, level);
5648 glTexImage2D(gr->target, level, internalformat, mwidth,
5649 gr->target == GL_TEXTURE_1D_ARRAY ? pr->array_size : mheight,
5650 0, glformat, gltype, NULL);
5651 }
5652 }
5653 }
5654
5655 if (!format_can_texture_storage) {
5656 glTexParameteri(gr->target, GL_TEXTURE_BASE_LEVEL, 0);
5657 glTexParameteri(gr->target, GL_TEXTURE_MAX_LEVEL, pr->last_level);
5658 }
5659
5660 gt->state.max_lod = -1;
5661 return 0;
5662 }
5663
vrend_renderer_resource_create(struct vrend_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs,void * image_oes)5664 int vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, struct iovec *iov, uint32_t num_iovs, void *image_oes)
5665 {
5666 struct vrend_resource *gr;
5667 int ret;
5668
5669 ret = check_resource_valid(args);
5670 if (ret)
5671 return EINVAL;
5672
5673 gr = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
5674 if (!gr)
5675 return ENOMEM;
5676
5677 vrend_renderer_resource_copy_args(args, gr);
5678 gr->iov = iov;
5679 gr->num_iovs = num_iovs;
5680
5681 if (args->flags & VIRGL_RESOURCE_Y_0_TOP)
5682 gr->y_0_top = true;
5683
5684 pipe_reference_init(&gr->base.reference, 1);
5685
5686 if (args->bind == VIRGL_BIND_CUSTOM) {
5687 /* custom should only be for buffers */
5688 gr->ptr = malloc(args->width);
5689 if (!gr->ptr) {
5690 FREE(gr);
5691 return ENOMEM;
5692 }
5693 } else if (args->bind == VIRGL_BIND_INDEX_BUFFER) {
5694 gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
5695 vrend_create_buffer(gr, args->width);
5696 } else if (args->bind == VIRGL_BIND_STREAM_OUTPUT) {
5697 gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
5698 vrend_create_buffer(gr, args->width);
5699 } else if (args->bind == VIRGL_BIND_VERTEX_BUFFER) {
5700 gr->target = GL_ARRAY_BUFFER_ARB;
5701 vrend_create_buffer(gr, args->width);
5702 } else if (args->bind == VIRGL_BIND_CONSTANT_BUFFER) {
5703 gr->target = GL_UNIFORM_BUFFER;
5704 vrend_create_buffer(gr, args->width);
5705 } else if (args->target == PIPE_BUFFER && (args->bind == 0 || args->bind == VIRGL_BIND_SHADER_BUFFER)) {
5706 gr->target = GL_ARRAY_BUFFER_ARB;
5707 vrend_create_buffer(gr, args->width);
5708 } else if (args->target == PIPE_BUFFER && (args->bind & VIRGL_BIND_SAMPLER_VIEW)) {
5709 /*
5710 * On Desktop we use GL_ARB_texture_buffer_object on GLES we use
5711 * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
5712 */
5713 #if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
5714 #error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
5715 #endif
5716
5717 /* need to check GL version here */
5718 if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
5719 gr->target = GL_TEXTURE_BUFFER;
5720 } else {
5721 gr->target = GL_PIXEL_PACK_BUFFER_ARB;
5722 }
5723 vrend_create_buffer(gr, args->width);
5724 } else {
5725 int r = vrend_renderer_resource_allocate_texture(gr, image_oes);
5726 if (r)
5727 return r;
5728 }
5729
5730 ret = vrend_resource_insert(gr, args->handle);
5731 if (ret == 0) {
5732 vrend_renderer_resource_destroy(gr, true);
5733 return ENOMEM;
5734 }
5735 return 0;
5736 }
5737
vrend_renderer_resource_destroy(struct vrend_resource * res,bool remove)5738 void vrend_renderer_resource_destroy(struct vrend_resource *res, bool remove)
5739 {
5740 if (res->readback_fb_id)
5741 glDeleteFramebuffers(1, &res->readback_fb_id);
5742
5743 if (res->ptr)
5744 free(res->ptr);
5745 if (res->id) {
5746 if (res->is_buffer) {
5747 glDeleteBuffers(1, &res->id);
5748 if (res->tbo_tex_id)
5749 glDeleteTextures(1, &res->tbo_tex_id);
5750 } else
5751 glDeleteTextures(1, &res->id);
5752 }
5753
5754 if (res->handle && remove)
5755 vrend_resource_remove(res->handle);
5756 free(res);
5757 }
5758
vrend_destroy_resource_object(void * obj_ptr)5759 static void vrend_destroy_resource_object(void *obj_ptr)
5760 {
5761 struct vrend_resource *res = obj_ptr;
5762
5763 if (pipe_reference(&res->base.reference, NULL))
5764 vrend_renderer_resource_destroy(res, false);
5765 }
5766
vrend_renderer_resource_unref(uint32_t res_handle)5767 void vrend_renderer_resource_unref(uint32_t res_handle)
5768 {
5769 struct vrend_resource *res;
5770 struct vrend_context *ctx;
5771
5772 res = vrend_resource_lookup(res_handle, 0);
5773 if (!res)
5774 return;
5775
5776 /* find in all contexts and detach also */
5777
5778 /* remove from any contexts */
5779 LIST_FOR_EACH_ENTRY(ctx, &vrend_state.active_ctx_list, ctx_entry) {
5780 vrend_renderer_detach_res_ctx_p(ctx, res->handle);
5781 }
5782
5783 vrend_resource_remove(res->handle);
5784 }
5785
5786 static int use_sub_data = 0;
5787 struct virgl_sub_upload_data {
5788 GLenum target;
5789 struct pipe_box *box;
5790 };
5791
iov_buffer_upload(void * cookie,uint32_t doff,void * src,int len)5792 static void iov_buffer_upload(void *cookie, uint32_t doff, void *src, int len)
5793 {
5794 struct virgl_sub_upload_data *d = cookie;
5795 glBufferSubData(d->target, d->box->x + doff, len, src);
5796 }
5797
vrend_scale_depth(void * ptr,int size,float scale_val)5798 static void vrend_scale_depth(void *ptr, int size, float scale_val)
5799 {
5800 GLuint *ival = ptr;
5801 const GLfloat myscale = 1.0f / 0xffffff;
5802 int i;
5803 for (i = 0; i < size / 4; i++) {
5804 GLuint value = ival[i];
5805 GLfloat d = ((float)(value >> 8) * myscale) * scale_val;
5806 d = CLAMP(d, 0.0F, 1.0F);
5807 ival[i] = (int)(d / myscale) << 8;
5808 }
5809 }
5810
read_transfer_data(struct pipe_resource * res,struct iovec * iov,unsigned int num_iovs,char * data,uint32_t src_stride,struct pipe_box * box,uint32_t level,uint64_t offset,bool invert)5811 static void read_transfer_data(struct pipe_resource *res,
5812 struct iovec *iov,
5813 unsigned int num_iovs,
5814 char *data,
5815 uint32_t src_stride,
5816 struct pipe_box *box,
5817 uint32_t level,
5818 uint64_t offset,
5819 bool invert)
5820 {
5821 int blsize = util_format_get_blocksize(res->format);
5822 uint32_t size = vrend_get_iovec_size(iov, num_iovs);
5823 uint32_t send_size = util_format_get_nblocks(res->format, box->width,
5824 box->height) * blsize * box->depth;
5825 uint32_t bwx = util_format_get_nblocksx(res->format, box->width) * blsize;
5826 int32_t bh = util_format_get_nblocksy(res->format, box->height);
5827 int d, h;
5828
5829 if ((send_size == size || bh == 1) && !invert && box->depth == 1)
5830 vrend_read_from_iovec(iov, num_iovs, offset, data, send_size);
5831 else {
5832 if (invert) {
5833 for (d = 0; d < box->depth; d++) {
5834 uint32_t myoffset = offset + d * src_stride * u_minify(res->height0, level);
5835 for (h = bh - 1; h >= 0; h--) {
5836 void *ptr = data + (h * bwx) + d * (bh * bwx);
5837 vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx);
5838 myoffset += src_stride;
5839 }
5840 }
5841 } else {
5842 for (d = 0; d < box->depth; d++) {
5843 uint32_t myoffset = offset + d * src_stride * u_minify(res->height0, level);
5844 for (h = 0; h < bh; h++) {
5845 void *ptr = data + (h * bwx) + d * (bh * bwx);
5846 vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx);
5847 myoffset += src_stride;
5848 }
5849 }
5850 }
5851 }
5852 }
5853
write_transfer_data(struct pipe_resource * res,struct iovec * iov,unsigned num_iovs,char * data,uint32_t dst_stride,struct pipe_box * box,uint32_t level,uint64_t offset,bool invert)5854 static void write_transfer_data(struct pipe_resource *res,
5855 struct iovec *iov,
5856 unsigned num_iovs,
5857 char *data,
5858 uint32_t dst_stride,
5859 struct pipe_box *box,
5860 uint32_t level,
5861 uint64_t offset,
5862 bool invert)
5863 {
5864 int blsize = util_format_get_blocksize(res->format);
5865 uint32_t size = vrend_get_iovec_size(iov, num_iovs);
5866 uint32_t send_size = util_format_get_nblocks(res->format, box->width,
5867 box->height) * blsize * box->depth;
5868 uint32_t bwx = util_format_get_nblocksx(res->format, box->width) * blsize;
5869 int32_t bh = util_format_get_nblocksy(res->format, box->height);
5870 int d, h;
5871 uint32_t stride = dst_stride ? dst_stride : util_format_get_nblocksx(res->format, u_minify(res->width0, level)) * blsize;
5872
5873 if ((send_size == size || bh == 1) && !invert && box->depth == 1) {
5874 vrend_write_to_iovec(iov, num_iovs, offset, data, send_size);
5875 } else if (invert) {
5876 for (d = 0; d < box->depth; d++) {
5877 uint32_t myoffset = offset + d * stride * u_minify(res->height0, level);
5878 for (h = bh - 1; h >= 0; h--) {
5879 void *ptr = data + (h * bwx) + d * (bh * bwx);
5880 vrend_write_to_iovec(iov, num_iovs, myoffset, ptr, bwx);
5881 myoffset += stride;
5882 }
5883 }
5884 } else {
5885 for (d = 0; d < box->depth; d++) {
5886 uint32_t myoffset = offset + d * stride * u_minify(res->height0, level);
5887 for (h = 0; h < bh; h++) {
5888 void *ptr = data + (h * bwx) + d * (bh * bwx);
5889 vrend_write_to_iovec(iov, num_iovs, myoffset, ptr, bwx);
5890 myoffset += stride;
5891 }
5892 }
5893 }
5894 }
5895
check_transfer_bounds(struct vrend_resource * res,const struct vrend_transfer_info * info)5896 static bool check_transfer_bounds(struct vrend_resource *res,
5897 const struct vrend_transfer_info *info)
5898 {
5899 int lwidth, lheight;
5900
5901 /* check mipmap level is in bounds */
5902 if (info->level > res->base.last_level)
5903 return false;
5904 if (info->box->x < 0 || info->box->y < 0)
5905 return false;
5906 /* these will catch bad y/z/w/d with 1D textures etc */
5907 lwidth = u_minify(res->base.width0, info->level);
5908 if (info->box->width > lwidth)
5909 return false;
5910 if (info->box->x > lwidth)
5911 return false;
5912 if (info->box->width + info->box->x > lwidth)
5913 return false;
5914
5915 lheight = u_minify(res->base.height0, info->level);
5916 if (info->box->height > lheight)
5917 return false;
5918 if (info->box->y > lheight)
5919 return false;
5920 if (info->box->height + info->box->y > lheight)
5921 return false;
5922
5923 if (res->base.target == PIPE_TEXTURE_3D) {
5924 int ldepth = u_minify(res->base.depth0, info->level);
5925 if (info->box->depth > ldepth)
5926 return false;
5927 if (info->box->z > ldepth)
5928 return false;
5929 if (info->box->z + info->box->depth > ldepth)
5930 return false;
5931 } else {
5932 if (info->box->depth > (int)res->base.array_size)
5933 return false;
5934 if (info->box->z > (int)res->base.array_size)
5935 return false;
5936 if (info->box->z + info->box->depth > (int)res->base.array_size)
5937 return false;
5938 }
5939
5940 return true;
5941 }
5942
check_iov_bounds(struct vrend_resource * res,const struct vrend_transfer_info * info,struct iovec * iov,int num_iovs)5943 static bool check_iov_bounds(struct vrend_resource *res,
5944 const struct vrend_transfer_info *info,
5945 struct iovec *iov, int num_iovs)
5946 {
5947 GLuint send_size;
5948 GLuint iovsize = vrend_get_iovec_size(iov, num_iovs);
5949 GLuint valid_stride, valid_layer_stride;
5950
5951 /* validate the send size */
5952 valid_stride = util_format_get_stride(res->base.format, info->box->width);
5953 if (info->stride) {
5954 /* only validate passed in stride for boxes with height */
5955 if (info->box->height > 1) {
5956 if (info->stride < valid_stride)
5957 return false;
5958 valid_stride = info->stride;
5959 }
5960 }
5961
5962 valid_layer_stride = util_format_get_2d_size(res->base.format, valid_stride,
5963 info->box->height);
5964
5965 /* layer stride only makes sense for 3d,cube and arrays */
5966 if (info->layer_stride) {
5967 if ((res->base.target != PIPE_TEXTURE_3D &&
5968 res->base.target != PIPE_TEXTURE_CUBE &&
5969 res->base.target != PIPE_TEXTURE_1D_ARRAY &&
5970 res->base.target != PIPE_TEXTURE_2D_ARRAY &&
5971 res->base.target != PIPE_TEXTURE_CUBE_ARRAY))
5972 return false;
5973
5974 /* only validate passed in layer_stride for boxes with depth */
5975 if (info->box->depth > 1) {
5976 if (info->layer_stride < valid_layer_stride)
5977 return false;
5978 valid_layer_stride = info->layer_stride;
5979 }
5980 }
5981
5982 send_size = valid_layer_stride * info->box->depth;
5983 if (iovsize < info->offset)
5984 return false;
5985 if (iovsize < send_size)
5986 return false;
5987 if (iovsize < info->offset + send_size)
5988 return false;
5989
5990 return true;
5991 }
5992
vrend_renderer_transfer_write_iov(struct vrend_context * ctx,struct vrend_resource * res,struct iovec * iov,int num_iovs,const struct vrend_transfer_info * info)5993 static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
5994 struct vrend_resource *res,
5995 struct iovec *iov, int num_iovs,
5996 const struct vrend_transfer_info *info)
5997 {
5998 void *data;
5999
6000 if (res->target == 0 && res->ptr) {
6001 vrend_read_from_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, info->box->width);
6002 return 0;
6003 }
6004 if (res->target == GL_TRANSFORM_FEEDBACK_BUFFER ||
6005 res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
6006 res->target == GL_ARRAY_BUFFER_ARB ||
6007 res->target == GL_TEXTURE_BUFFER ||
6008 res->target == GL_UNIFORM_BUFFER) {
6009 struct virgl_sub_upload_data d;
6010 d.box = info->box;
6011 d.target = res->target;
6012
6013 glBindBufferARB(res->target, res->id);
6014 if (use_sub_data == 1) {
6015 vrend_read_from_iovec_cb(iov, num_iovs, info->offset, info->box->width, &iov_buffer_upload, &d);
6016 } else {
6017 data = glMapBufferRange(res->target, info->box->x, info->box->width, GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_WRITE_BIT);
6018 if (data == NULL) {
6019 fprintf(stderr,"map failed for element buffer\n");
6020 vrend_read_from_iovec_cb(iov, num_iovs, info->offset, info->box->width, &iov_buffer_upload, &d);
6021 } else {
6022 vrend_read_from_iovec(iov, num_iovs, info->offset, data, info->box->width);
6023 glUnmapBuffer(res->target);
6024 }
6025 }
6026 } else {
6027 GLenum glformat;
6028 GLenum gltype;
6029 int need_temp = 0;
6030 int elsize = util_format_get_blocksize(res->base.format);
6031 int x = 0, y = 0;
6032 bool compressed;
6033 bool invert = false;
6034 float depth_scale;
6035 GLuint send_size = 0;
6036 uint32_t stride = info->stride;
6037
6038 vrend_use_program(ctx, 0);
6039
6040 if (!stride)
6041 stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level)) * elsize;
6042
6043 compressed = util_format_is_compressed(res->base.format);
6044 if (num_iovs > 1 || compressed) {
6045 need_temp = true;
6046 }
6047
6048 if (vrend_state.use_core_profile == true && (res->y_0_top || (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM))) {
6049 need_temp = true;
6050 if (res->y_0_top)
6051 invert = true;
6052 }
6053
6054 if (need_temp) {
6055 send_size = util_format_get_nblocks(res->base.format, info->box->width,
6056 info->box->height) * elsize * info->box->depth;
6057 data = malloc(send_size);
6058 if (!data)
6059 return ENOMEM;
6060 read_transfer_data(&res->base, iov, num_iovs, data, stride,
6061 info->box, info->level, info->offset, invert);
6062 } else {
6063 data = (char*)iov[0].iov_base + info->offset;
6064 }
6065
6066 if (stride && !need_temp) {
6067 glPixelStorei(GL_UNPACK_ROW_LENGTH, stride / elsize);
6068 glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, u_minify(res->base.height0, info->level));
6069 } else
6070 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
6071
6072 switch (elsize) {
6073 case 1:
6074 case 3:
6075 glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
6076 break;
6077 case 2:
6078 case 6:
6079 glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
6080 break;
6081 case 4:
6082 default:
6083 glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
6084 break;
6085 case 8:
6086 glPixelStorei(GL_UNPACK_ALIGNMENT, 8);
6087 break;
6088 }
6089
6090 glformat = tex_conv_table[res->base.format].glformat;
6091 gltype = tex_conv_table[res->base.format].gltype;
6092
6093 if ((!vrend_state.use_core_profile) && (res->y_0_top)) {
6094 GLuint buffers;
6095
6096 if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level) {
6097 GLuint fb_id;
6098 if (res->readback_fb_id)
6099 glDeleteFramebuffers(1, &res->readback_fb_id);
6100
6101 glGenFramebuffers(1, &fb_id);
6102 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb_id);
6103 vrend_fb_bind_texture(res, 0, info->level, 0);
6104
6105 res->readback_fb_id = fb_id;
6106 res->readback_fb_level = info->level;
6107 } else {
6108 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, res->readback_fb_id);
6109 }
6110
6111 buffers = GL_COLOR_ATTACHMENT0_EXT;
6112 glDrawBuffers(1, &buffers);
6113 vrend_blend_enable(ctx, false);
6114 vrend_depth_test_enable(ctx, false);
6115 vrend_alpha_test_enable(ctx, false);
6116 vrend_stencil_test_enable(ctx, false);
6117 glPixelZoom(1.0f, res->y_0_top ? -1.0f : 1.0f);
6118 glWindowPos2i(info->box->x, res->y_0_top ? (int)res->base.height0 - info->box->y : info->box->y);
6119 glDrawPixels(info->box->width, info->box->height, glformat, gltype,
6120 data);
6121 } else {
6122 uint32_t comp_size;
6123 glBindTexture(res->target, res->id);
6124
6125 if (compressed) {
6126 glformat = tex_conv_table[res->base.format].internalformat;
6127 comp_size = util_format_get_nblocks(res->base.format, info->box->width,
6128 info->box->height) * util_format_get_blocksize(res->base.format);
6129 }
6130
6131 if (glformat == 0) {
6132 glformat = GL_BGRA;
6133 gltype = GL_UNSIGNED_BYTE;
6134 }
6135
6136 x = info->box->x;
6137 y = invert ? (int)res->base.height0 - info->box->y - info->box->height : info->box->y;
6138
6139
6140 /* mipmaps are usually passed in one iov, and we need to keep the offset
6141 * into the data in case we want to read back the data of a surface
6142 * that can not be rendered. Since we can not assume that the whole texture
6143 * is filled, we evaluate the offset for origin (0,0,0). Since it is also
6144 * possible that a resource is reused and resized update the offset every time.
6145 */
6146 if (info->level < VR_MAX_TEXTURE_2D_LEVELS) {
6147 int64_t level_height = u_minify(res->base.height0, info->level);
6148 res->mipmap_offsets[info->level] = info->offset -
6149 ((info->box->z * level_height + y) * stride + x * elsize);
6150 }
6151
6152 if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
6153 /* we get values from the guest as 24-bit scaled integers
6154 but we give them to the host GL and it interprets them
6155 as 32-bit scaled integers, so we need to scale them here */
6156 depth_scale = 256.0;
6157 if (!vrend_state.use_core_profile)
6158 glPixelTransferf(GL_DEPTH_SCALE, depth_scale);
6159 else
6160 vrend_scale_depth(data, send_size, depth_scale);
6161 }
6162 if (res->target == GL_TEXTURE_CUBE_MAP) {
6163 GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z;
6164 if (compressed) {
6165 glCompressedTexSubImage2D(ctarget, info->level, x, y,
6166 info->box->width, info->box->height,
6167 glformat, comp_size, data);
6168 } else {
6169 glTexSubImage2D(ctarget, info->level, x, y, info->box->width, info->box->height,
6170 glformat, gltype, data);
6171 }
6172 } else if (res->target == GL_TEXTURE_3D || res->target == GL_TEXTURE_2D_ARRAY || res->target == GL_TEXTURE_CUBE_MAP_ARRAY) {
6173 if (compressed) {
6174 glCompressedTexSubImage3D(res->target, info->level, x, y, info->box->z,
6175 info->box->width, info->box->height, info->box->depth,
6176 glformat, comp_size, data);
6177 } else {
6178 glTexSubImage3D(res->target, info->level, x, y, info->box->z,
6179 info->box->width, info->box->height, info->box->depth,
6180 glformat, gltype, data);
6181 }
6182 } else if (res->target == GL_TEXTURE_1D) {
6183 if (vrend_state.use_gles) {
6184 /* Covers both compressed and none compressed. */
6185 report_gles_missing_func(ctx, "gl[Compressed]TexSubImage1D");
6186 } else if (compressed) {
6187 glCompressedTexSubImage1D(res->target, info->level, info->box->x,
6188 info->box->width,
6189 glformat, comp_size, data);
6190 } else {
6191 glTexSubImage1D(res->target, info->level, info->box->x, info->box->width,
6192 glformat, gltype, data);
6193 }
6194 } else {
6195 if (compressed) {
6196 glCompressedTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y,
6197 info->box->width, info->box->height,
6198 glformat, comp_size, data);
6199 } else {
6200 glTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y,
6201 info->box->width,
6202 res->target == GL_TEXTURE_1D_ARRAY ? info->box->depth : info->box->height,
6203 glformat, gltype, data);
6204 }
6205 }
6206 if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
6207 if (!vrend_state.use_core_profile)
6208 glPixelTransferf(GL_DEPTH_SCALE, 1.0);
6209 }
6210 }
6211
6212 if (stride && !need_temp) {
6213 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
6214 glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
6215 }
6216
6217 glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
6218
6219 if (need_temp)
6220 free(data);
6221 }
6222 return 0;
6223 }
6224
vrend_get_texture_depth(struct vrend_resource * res,uint32_t level)6225 static uint32_t vrend_get_texture_depth(struct vrend_resource *res, uint32_t level)
6226 {
6227 uint32_t depth = 1;
6228 if (res->target == GL_TEXTURE_3D)
6229 depth = u_minify(res->base.depth0, level);
6230 else if (res->target == GL_TEXTURE_1D_ARRAY || res->target == GL_TEXTURE_2D_ARRAY ||
6231 res->target == GL_TEXTURE_CUBE_MAP || res->target == GL_TEXTURE_CUBE_MAP_ARRAY)
6232 depth = res->base.array_size;
6233
6234 return depth;
6235 }
6236
vrend_transfer_send_getteximage(struct vrend_context * ctx,struct vrend_resource * res,struct iovec * iov,int num_iovs,const struct vrend_transfer_info * info)6237 static int vrend_transfer_send_getteximage(struct vrend_context *ctx,
6238 struct vrend_resource *res,
6239 struct iovec *iov, int num_iovs,
6240 const struct vrend_transfer_info *info)
6241 {
6242 GLenum format, type;
6243 uint32_t tex_size;
6244 char *data;
6245 int elsize = util_format_get_blocksize(res->base.format);
6246 int compressed = util_format_is_compressed(res->base.format);
6247 GLenum target;
6248 uint32_t send_offset = 0;
6249 format = tex_conv_table[res->base.format].glformat;
6250 type = tex_conv_table[res->base.format].gltype;
6251
6252 if (compressed)
6253 format = tex_conv_table[res->base.format].internalformat;
6254
6255 tex_size = util_format_get_nblocks(res->base.format, u_minify(res->base.width0, info->level), u_minify(res->base.height0, info->level)) *
6256 util_format_get_blocksize(res->base.format) * vrend_get_texture_depth(res, info->level);
6257
6258 if (info->box->z && res->target != GL_TEXTURE_CUBE_MAP) {
6259 send_offset = util_format_get_nblocks(res->base.format, u_minify(res->base.width0, info->level), u_minify(res->base.height0, info->level)) * util_format_get_blocksize(res->base.format) * info->box->z;
6260 }
6261
6262 data = malloc(tex_size);
6263 if (!data)
6264 return ENOMEM;
6265
6266 switch (elsize) {
6267 case 1:
6268 glPixelStorei(GL_PACK_ALIGNMENT, 1);
6269 break;
6270 case 2:
6271 glPixelStorei(GL_PACK_ALIGNMENT, 2);
6272 break;
6273 case 4:
6274 default:
6275 glPixelStorei(GL_PACK_ALIGNMENT, 4);
6276 break;
6277 case 8:
6278 glPixelStorei(GL_PACK_ALIGNMENT, 8);
6279 break;
6280 }
6281
6282 glBindTexture(res->target, res->id);
6283 if (res->target == GL_TEXTURE_CUBE_MAP) {
6284 target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z;
6285 } else
6286 target = res->target;
6287
6288 if (compressed) {
6289 if (has_feature(feat_arb_robustness)) {
6290 glGetnCompressedTexImageARB(target, info->level, tex_size, data);
6291 } else if (vrend_state.use_gles) {
6292 report_gles_missing_func(ctx, "glGetCompressedTexImage");
6293 } else {
6294 glGetCompressedTexImage(target, info->level, data);
6295 }
6296 } else {
6297 if (has_feature(feat_arb_robustness)) {
6298 glGetnTexImageARB(target, info->level, format, type, tex_size, data);
6299 } else if (vrend_state.use_gles) {
6300 report_gles_missing_func(ctx, "glGetTexImage");
6301 } else {
6302 glGetTexImage(target, info->level, format, type, data);
6303 }
6304 }
6305
6306 glPixelStorei(GL_PACK_ALIGNMENT, 4);
6307
6308 write_transfer_data(&res->base, iov, num_iovs, data + send_offset,
6309 info->stride, info->box, info->level, info->offset,
6310 false);
6311 free(data);
6312 return 0;
6313 }
6314
vrend_transfer_send_readpixels(struct vrend_context * ctx,struct vrend_resource * res,struct iovec * iov,int num_iovs,const struct vrend_transfer_info * info)6315 static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
6316 struct vrend_resource *res,
6317 struct iovec *iov, int num_iovs,
6318 const struct vrend_transfer_info *info)
6319 {
6320 char *myptr = (char*)iov[0].iov_base + info->offset;
6321 int need_temp = 0;
6322 GLuint fb_id;
6323 char *data;
6324 bool actually_invert, separate_invert = false;
6325 GLenum format, type;
6326 GLint y1;
6327 uint32_t send_size = 0;
6328 uint32_t h = u_minify(res->base.height0, info->level);
6329 int elsize = util_format_get_blocksize(res->base.format);
6330 float depth_scale;
6331 int row_stride = info->stride / elsize;
6332
6333 vrend_use_program(ctx, 0);
6334
6335 format = tex_conv_table[res->base.format].glformat;
6336 type = tex_conv_table[res->base.format].gltype;
6337 /* if we are asked to invert and reading from a front then don't */
6338
6339 actually_invert = res->y_0_top;
6340
6341 if (actually_invert && !has_feature(feat_mesa_invert))
6342 separate_invert = true;
6343
6344 if (num_iovs > 1 || separate_invert)
6345 need_temp = 1;
6346
6347 if (need_temp) {
6348 send_size = util_format_get_nblocks(res->base.format, info->box->width, info->box->height) * info->box->depth * util_format_get_blocksize(res->base.format);
6349 data = malloc(send_size);
6350 if (!data) {
6351 fprintf(stderr,"malloc failed %d\n", send_size);
6352 return ENOMEM;
6353 }
6354 } else {
6355 send_size = iov[0].iov_len - info->offset;
6356 data = myptr;
6357 if (!row_stride)
6358 row_stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level));
6359 }
6360
6361 if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level ||
6362 (int)res->readback_fb_z != info->box->z) {
6363
6364 if (res->readback_fb_id)
6365 glDeleteFramebuffers(1, &res->readback_fb_id);
6366
6367 glGenFramebuffers(1, &fb_id);
6368 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb_id);
6369
6370 vrend_fb_bind_texture(res, 0, info->level, info->box->z);
6371
6372 res->readback_fb_id = fb_id;
6373 res->readback_fb_level = info->level;
6374 res->readback_fb_z = info->box->z;
6375 } else
6376 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, res->readback_fb_id);
6377 if (actually_invert)
6378 y1 = h - info->box->y - info->box->height;
6379 else
6380 y1 = info->box->y;
6381
6382 if (has_feature(feat_mesa_invert) && actually_invert)
6383 glPixelStorei(GL_PACK_INVERT_MESA, 1);
6384 if (!vrend_format_is_ds(res->base.format))
6385 glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
6386 if (!need_temp && row_stride)
6387 glPixelStorei(GL_PACK_ROW_LENGTH, row_stride);
6388
6389 switch (elsize) {
6390 case 1:
6391 glPixelStorei(GL_PACK_ALIGNMENT, 1);
6392 break;
6393 case 2:
6394 glPixelStorei(GL_PACK_ALIGNMENT, 2);
6395 break;
6396 case 4:
6397 default:
6398 glPixelStorei(GL_PACK_ALIGNMENT, 4);
6399 break;
6400 case 8:
6401 glPixelStorei(GL_PACK_ALIGNMENT, 8);
6402 break;
6403 }
6404
6405 if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
6406 /* we get values from the guest as 24-bit scaled integers
6407 but we give them to the host GL and it interprets them
6408 as 32-bit scaled integers, so we need to scale them here */
6409 depth_scale = 1.0 / 256.0;
6410 if (!vrend_state.use_core_profile) {
6411 glPixelTransferf(GL_DEPTH_SCALE, depth_scale);
6412 }
6413 }
6414
6415 /* Warn if the driver doesn't agree about the read format and type.
6416 On desktop GL we can use basically any format and type to glReadPixels,
6417 so we picked the format and type that matches the native format.
6418
6419 But on GLES we are limited to a very few set, luckily most GLES
6420 implementations should return type and format that match the native
6421 formats, and can be used for glReadPixels acording to the GLES spec.
6422
6423 But we have found that at least Mesa returned the wrong formats, again
6424 luckily we are able to change Mesa. But just in case there are more bad
6425 drivers out there, or we mess up the format somewhere, we warn here. */
6426 if (vrend_state.use_gles) {
6427 GLint imp;
6428 if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_INT &&
6429 type != GL_INT && type != GL_FLOAT) {
6430 glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &imp);
6431 if (imp != (GLint)type) {
6432 fprintf(stderr, "GL_IMPLEMENTATION_COLOR_READ_TYPE is not expected native type 0x%x != imp 0x%x\n", type, imp);
6433 }
6434 }
6435 if (format != GL_RGBA && format != GL_RGBA_INTEGER) {
6436 glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &imp);
6437 if (imp != (GLint)format) {
6438 fprintf(stderr, "GL_IMPLEMENTATION_COLOR_READ_FORMAT is not expected native format 0x%x != imp 0x%x\n", format, imp);
6439 }
6440 }
6441 }
6442
6443 if (has_feature(feat_arb_robustness))
6444 glReadnPixelsARB(info->box->x, y1, info->box->width, info->box->height, format, type, send_size, data);
6445 else if (has_feature(feat_gles_khr_robustness))
6446 glReadnPixelsKHR(info->box->x, y1, info->box->width, info->box->height, format, type, send_size, data);
6447 else
6448 glReadPixels(info->box->x, y1, info->box->width, info->box->height, format, type, data);
6449
6450 if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
6451 if (!vrend_state.use_core_profile)
6452 glPixelTransferf(GL_DEPTH_SCALE, 1.0);
6453 else
6454 vrend_scale_depth(data, send_size, depth_scale);
6455 }
6456 if (has_feature(feat_mesa_invert) && actually_invert)
6457 glPixelStorei(GL_PACK_INVERT_MESA, 0);
6458 if (!need_temp && row_stride)
6459 glPixelStorei(GL_PACK_ROW_LENGTH, 0);
6460 glPixelStorei(GL_PACK_ALIGNMENT, 4);
6461 if (need_temp) {
6462 write_transfer_data(&res->base, iov, num_iovs, data,
6463 info->stride, info->box, info->level, info->offset,
6464 separate_invert);
6465 free(data);
6466 }
6467 return 0;
6468 }
6469
vrend_transfer_send_readonly(UNUSED struct vrend_context * ctx,struct vrend_resource * res,struct iovec * iov,int num_iovs,UNUSED const struct vrend_transfer_info * info)6470 static int vrend_transfer_send_readonly(UNUSED struct vrend_context *ctx,
6471 struct vrend_resource *res,
6472 struct iovec *iov, int num_iovs,
6473 UNUSED const struct vrend_transfer_info *info)
6474 {
6475 bool same_iov = true;
6476 uint i;
6477
6478 if (res->num_iovs == (uint32_t)num_iovs) {
6479 for (i = 0; i < res->num_iovs; i++) {
6480 if (res->iov[i].iov_len != iov[i].iov_len ||
6481 res->iov[i].iov_base != iov[i].iov_base) {
6482 same_iov = false;
6483 }
6484 }
6485 } else {
6486 same_iov = false;
6487 }
6488
6489 /*
6490 * When we detect that we are reading back to the same iovs that are
6491 * attached to the resource and we know that the resource can not
6492 * be rendered to (as this function is only called then), we do not
6493 * need to do anything more.
6494 */
6495 if (same_iov) {
6496 return 0;
6497 }
6498
6499 /* Fallback to getteximage, will probably fail on GLES. */
6500 return -1;
6501 }
6502
vrend_renderer_transfer_send_iov(struct vrend_context * ctx,struct vrend_resource * res,struct iovec * iov,int num_iovs,const struct vrend_transfer_info * info)6503 static int vrend_renderer_transfer_send_iov(struct vrend_context *ctx,
6504 struct vrend_resource *res,
6505 struct iovec *iov, int num_iovs,
6506 const struct vrend_transfer_info *info)
6507 {
6508 if (res->target == 0 && res->ptr) {
6509 uint32_t send_size = info->box->width * util_format_get_blocksize(res->base.format);
6510 vrend_write_to_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, send_size);
6511 return 0;
6512 }
6513
6514 if (res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
6515 res->target == GL_ARRAY_BUFFER_ARB ||
6516 res->target == GL_TRANSFORM_FEEDBACK_BUFFER ||
6517 res->target == GL_TEXTURE_BUFFER ||
6518 res->target == GL_UNIFORM_BUFFER) {
6519 uint32_t send_size = info->box->width * util_format_get_blocksize(res->base.format);
6520 void *data;
6521
6522 glBindBufferARB(res->target, res->id);
6523 data = glMapBufferRange(res->target, info->box->x, info->box->width, GL_MAP_READ_BIT);
6524 if (!data)
6525 fprintf(stderr,"unable to open buffer for reading %d\n", res->target);
6526 else
6527 vrend_write_to_iovec(iov, num_iovs, info->offset, data, send_size);
6528 glUnmapBuffer(res->target);
6529 } else {
6530 int ret = -1;
6531 bool can_readpixels = true;
6532
6533 can_readpixels = vrend_format_can_render(res->base.format) || vrend_format_is_ds(res->base.format);
6534
6535 if (can_readpixels) {
6536 ret = vrend_transfer_send_readpixels(ctx, res, iov, num_iovs, info);
6537 } else {
6538 ret = vrend_transfer_send_readonly(ctx, res, iov, num_iovs, info);
6539 }
6540
6541 /* Can hit this on a non-error path as well. */
6542 if (ret != 0) {
6543 ret = vrend_transfer_send_getteximage(ctx, res, iov, num_iovs, info);
6544 }
6545 return ret;
6546 }
6547 return 0;
6548 }
6549
vrend_renderer_transfer_iov(const struct vrend_transfer_info * info,int transfer_mode)6550 int vrend_renderer_transfer_iov(const struct vrend_transfer_info *info,
6551 int transfer_mode)
6552 {
6553 struct vrend_resource *res;
6554 struct vrend_context *ctx;
6555 struct iovec *iov;
6556 int num_iovs;
6557
6558 if (!info->box)
6559 return EINVAL;
6560
6561 ctx = vrend_lookup_renderer_ctx(info->ctx_id);
6562 if (!ctx)
6563 return EINVAL;
6564
6565 if (info->ctx_id == 0)
6566 res = vrend_resource_lookup(info->handle, 0);
6567 else
6568 res = vrend_renderer_ctx_res_lookup(ctx, info->handle);
6569
6570 if (!res) {
6571 if (info->ctx_id)
6572 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, info->handle);
6573 return EINVAL;
6574 }
6575
6576 iov = info->iovec;
6577 num_iovs = info->iovec_cnt;
6578
6579 if (res->iov && (!iov || num_iovs == 0)) {
6580 iov = res->iov;
6581 num_iovs = res->num_iovs;
6582 }
6583
6584 if (!iov) {
6585 if (info->ctx_id)
6586 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, info->handle);
6587 return EINVAL;
6588 }
6589
6590 if (!check_transfer_bounds(res, info))
6591 return EINVAL;
6592
6593 if (!check_iov_bounds(res, info, iov, num_iovs))
6594 return EINVAL;
6595
6596 vrend_hw_switch_context(vrend_lookup_renderer_ctx(0), true);
6597
6598 if (transfer_mode == VREND_TRANSFER_WRITE)
6599 return vrend_renderer_transfer_write_iov(ctx, res, iov, num_iovs,
6600 info);
6601 else
6602 return vrend_renderer_transfer_send_iov(ctx, res, iov, num_iovs,
6603 info);
6604 }
6605
vrend_transfer_inline_write(struct vrend_context * ctx,struct vrend_transfer_info * info,UNUSED unsigned usage)6606 int vrend_transfer_inline_write(struct vrend_context *ctx,
6607 struct vrend_transfer_info *info,
6608 UNUSED unsigned usage)
6609 {
6610 struct vrend_resource *res;
6611
6612 res = vrend_renderer_ctx_res_lookup(ctx, info->handle);
6613 if (!res) {
6614 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, info->handle);
6615 return EINVAL;
6616 }
6617
6618 if (!check_transfer_bounds(res, info)) {
6619 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, info->handle);
6620 return EINVAL;
6621 }
6622
6623 if (!check_iov_bounds(res, info, info->iovec, info->iovec_cnt)) {
6624 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, info->handle);
6625 return EINVAL;
6626 }
6627
6628 return vrend_renderer_transfer_write_iov(ctx, res, info->iovec, info->iovec_cnt, info);
6629
6630 }
6631
vrend_set_stencil_ref(struct vrend_context * ctx,struct pipe_stencil_ref * ref)6632 void vrend_set_stencil_ref(struct vrend_context *ctx,
6633 struct pipe_stencil_ref *ref)
6634 {
6635 if (ctx->sub->stencil_refs[0] != ref->ref_value[0] ||
6636 ctx->sub->stencil_refs[1] != ref->ref_value[1]) {
6637 ctx->sub->stencil_refs[0] = ref->ref_value[0];
6638 ctx->sub->stencil_refs[1] = ref->ref_value[1];
6639 ctx->sub->stencil_state_dirty = true;
6640 }
6641 }
6642
vrend_set_blend_color(struct vrend_context * ctx,struct pipe_blend_color * color)6643 void vrend_set_blend_color(struct vrend_context *ctx,
6644 struct pipe_blend_color *color)
6645 {
6646 ctx->sub->blend_color = *color;
6647 glBlendColor(color->color[0], color->color[1], color->color[2],
6648 color->color[3]);
6649 }
6650
vrend_set_scissor_state(struct vrend_context * ctx,uint32_t start_slot,uint32_t num_scissor,struct pipe_scissor_state * ss)6651 void vrend_set_scissor_state(struct vrend_context *ctx,
6652 uint32_t start_slot,
6653 uint32_t num_scissor,
6654 struct pipe_scissor_state *ss)
6655 {
6656 uint i, idx;
6657
6658 if (start_slot > PIPE_MAX_VIEWPORTS ||
6659 num_scissor > (PIPE_MAX_VIEWPORTS - start_slot)) {
6660 vrend_report_buffer_error(ctx, 0);
6661 return;
6662 }
6663
6664 for (i = 0; i < num_scissor; i++) {
6665 idx = start_slot + i;
6666 ctx->sub->ss[idx] = ss[i];
6667 ctx->sub->scissor_state_dirty |= (1 << idx);
6668 }
6669 }
6670
vrend_set_polygon_stipple(struct vrend_context * ctx,struct pipe_poly_stipple * ps)6671 void vrend_set_polygon_stipple(struct vrend_context *ctx,
6672 struct pipe_poly_stipple *ps)
6673 {
6674 if (vrend_state.use_core_profile) {
6675 static const unsigned bit31 = 1 << 31;
6676 GLubyte *stip = calloc(1, 1024);
6677 int i, j;
6678
6679 if (!ctx->pstip_inited)
6680 vrend_init_pstipple_texture(ctx);
6681
6682 if (!stip)
6683 return;
6684
6685 for (i = 0; i < 32; i++) {
6686 for (j = 0; j < 32; j++) {
6687 if (ps->stipple[i] & (bit31 >> j))
6688 stip[i * 32 + j] = 0;
6689 else
6690 stip[i * 32 + j] = 255;
6691 }
6692 }
6693
6694 glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
6695 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 32, 32,
6696 GL_RED, GL_UNSIGNED_BYTE, stip);
6697
6698 free(stip);
6699 return;
6700 }
6701 glPolygonStipple((const GLubyte *)ps->stipple);
6702 }
6703
vrend_set_clip_state(struct vrend_context * ctx,struct pipe_clip_state * ucp)6704 void vrend_set_clip_state(struct vrend_context *ctx, struct pipe_clip_state *ucp)
6705 {
6706 if (vrend_state.use_core_profile) {
6707 ctx->sub->ucp_state = *ucp;
6708 } else {
6709 int i, j;
6710 GLdouble val[4];
6711
6712 for (i = 0; i < 8; i++) {
6713 for (j = 0; j < 4; j++)
6714 val[j] = ucp->ucp[i][j];
6715 glClipPlane(GL_CLIP_PLANE0 + i, val);
6716 }
6717 }
6718 }
6719
vrend_set_sample_mask(UNUSED struct vrend_context * ctx,unsigned sample_mask)6720 void vrend_set_sample_mask(UNUSED struct vrend_context *ctx, unsigned sample_mask)
6721 {
6722 if (has_feature(feat_sample_mask))
6723 glSampleMaski(0, sample_mask);
6724 }
6725
vrend_set_min_samples(struct vrend_context * ctx,unsigned min_samples)6726 void vrend_set_min_samples(struct vrend_context *ctx, unsigned min_samples)
6727 {
6728 float min_sample_shading = (float)min_samples;
6729 if (ctx->sub->nr_cbufs > 0 && ctx->sub->surf[0]) {
6730 assert(ctx->sub->surf[0]->texture);
6731 min_sample_shading /= MAX2(1, ctx->sub->surf[0]->texture->base.nr_samples);
6732 }
6733
6734 if (has_feature(feat_sample_shading))
6735 glMinSampleShading(min_sample_shading);
6736 }
6737
vrend_set_tess_state(UNUSED struct vrend_context * ctx,const float tess_factors[6])6738 void vrend_set_tess_state(UNUSED struct vrend_context *ctx, const float tess_factors[6])
6739 {
6740 if (has_feature(feat_tessellation)) {
6741 glPatchParameterfv(GL_PATCH_DEFAULT_OUTER_LEVEL, tess_factors);
6742 glPatchParameterfv(GL_PATCH_DEFAULT_INNER_LEVEL, &tess_factors[4]);
6743 }
6744 }
6745
vrend_hw_emit_streamout_targets(UNUSED struct vrend_context * ctx,struct vrend_streamout_object * so_obj)6746 static void vrend_hw_emit_streamout_targets(UNUSED struct vrend_context *ctx, struct vrend_streamout_object *so_obj)
6747 {
6748 uint i;
6749
6750 for (i = 0; i < so_obj->num_targets; i++) {
6751 if (so_obj->so_targets[i]->buffer_offset || so_obj->so_targets[i]->buffer_size < so_obj->so_targets[i]->buffer->base.width0)
6752 glBindBufferRange(GL_TRANSFORM_FEEDBACK_BUFFER, i, so_obj->so_targets[i]->buffer->id, so_obj->so_targets[i]->buffer_offset, so_obj->so_targets[i]->buffer_size);
6753 else
6754 glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, i, so_obj->so_targets[i]->buffer->id);
6755 }
6756 }
6757
vrend_set_streamout_targets(struct vrend_context * ctx,UNUSED uint32_t append_bitmask,uint32_t num_targets,uint32_t * handles)6758 void vrend_set_streamout_targets(struct vrend_context *ctx,
6759 UNUSED uint32_t append_bitmask,
6760 uint32_t num_targets,
6761 uint32_t *handles)
6762 {
6763 struct vrend_so_target *target;
6764 uint i;
6765
6766 if (!has_feature(feat_transform_feedback))
6767 return;
6768
6769 if (num_targets) {
6770 bool found = false;
6771 struct vrend_streamout_object *obj;
6772 LIST_FOR_EACH_ENTRY(obj, &ctx->sub->streamout_list, head) {
6773 if (obj->num_targets == num_targets) {
6774 if (!memcmp(handles, obj->handles, num_targets * 4)) {
6775 found = true;
6776 break;
6777 }
6778 }
6779 }
6780 if (found) {
6781 ctx->sub->current_so = obj;
6782 glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
6783 return;
6784 }
6785
6786 obj = CALLOC_STRUCT(vrend_streamout_object);
6787 if (has_feature(feat_transform_feedback2)) {
6788 glGenTransformFeedbacks(1, &obj->id);
6789 glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
6790 }
6791 obj->num_targets = num_targets;
6792 for (i = 0; i < num_targets; i++) {
6793 obj->handles[i] = handles[i];
6794 target = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_STREAMOUT_TARGET);
6795 if (!target) {
6796 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handles[i]);
6797 free(obj);
6798 return;
6799 }
6800 vrend_so_target_reference(&obj->so_targets[i], target);
6801 }
6802 vrend_hw_emit_streamout_targets(ctx, obj);
6803 list_addtail(&obj->head, &ctx->sub->streamout_list);
6804 ctx->sub->current_so = obj;
6805 obj->xfb_state = XFB_STATE_STARTED_NEED_BEGIN;
6806 } else {
6807 if (has_feature(feat_transform_feedback2))
6808 glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
6809 ctx->sub->current_so = NULL;
6810 }
6811 }
6812
vrend_resource_buffer_copy(UNUSED struct vrend_context * ctx,struct vrend_resource * src_res,struct vrend_resource * dst_res,uint32_t dstx,uint32_t srcx,uint32_t width)6813 static void vrend_resource_buffer_copy(UNUSED struct vrend_context *ctx,
6814 struct vrend_resource *src_res,
6815 struct vrend_resource *dst_res,
6816 uint32_t dstx, uint32_t srcx,
6817 uint32_t width)
6818 {
6819 glBindBuffer(GL_COPY_READ_BUFFER, src_res->id);
6820 glBindBuffer(GL_COPY_WRITE_BUFFER, dst_res->id);
6821
6822 glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, srcx, dstx, width);
6823 glBindBuffer(GL_COPY_READ_BUFFER, 0);
6824 glBindBuffer(GL_COPY_WRITE_BUFFER, 0);
6825 }
6826
vrend_resource_copy_fallback(struct vrend_resource * src_res,struct vrend_resource * dst_res,uint32_t dst_level,uint32_t dstx,uint32_t dsty,uint32_t dstz,uint32_t src_level,const struct pipe_box * src_box)6827 static void vrend_resource_copy_fallback(struct vrend_resource *src_res,
6828 struct vrend_resource *dst_res,
6829 uint32_t dst_level,
6830 uint32_t dstx, uint32_t dsty,
6831 uint32_t dstz, uint32_t src_level,
6832 const struct pipe_box *src_box)
6833 {
6834 char *tptr;
6835 uint32_t total_size, src_stride, dst_stride;
6836 GLenum glformat, gltype;
6837 int elsize = util_format_get_blocksize(dst_res->base.format);
6838 int compressed = util_format_is_compressed(dst_res->base.format);
6839 int cube_slice = 1;
6840 uint32_t slice_size, slice_offset;
6841 int i;
6842 struct pipe_box box;
6843
6844 if (src_res->target == GL_TEXTURE_CUBE_MAP)
6845 cube_slice = 6;
6846
6847 if (src_res->base.format != dst_res->base.format) {
6848 fprintf(stderr, "copy fallback failed due to mismatched formats %d %d\n", src_res->base.format, dst_res->base.format);
6849 return;
6850 }
6851
6852 box = *src_box;
6853 box.depth = vrend_get_texture_depth(src_res, src_level);
6854 dst_stride = util_format_get_stride(dst_res->base.format, dst_res->base.width0);
6855
6856 /* this is ugly need to do a full GetTexImage */
6857 slice_size = util_format_get_nblocks(src_res->base.format, u_minify(src_res->base.width0, src_level), u_minify(src_res->base.height0, src_level)) *
6858 util_format_get_blocksize(src_res->base.format);
6859 total_size = slice_size * vrend_get_texture_depth(src_res, src_level);
6860
6861 tptr = malloc(total_size);
6862 if (!tptr)
6863 return;
6864
6865 glformat = tex_conv_table[src_res->base.format].glformat;
6866 gltype = tex_conv_table[src_res->base.format].gltype;
6867
6868 if (compressed)
6869 glformat = tex_conv_table[src_res->base.format].internalformat;
6870
6871 /* If we are on gles we need to rely on the textures backing
6872 * iovec to have the data we need, otherwise we can use glGetTexture
6873 */
6874 if (vrend_state.use_gles) {
6875 uint64_t src_offset = 0;
6876 uint64_t dst_offset = 0;
6877 if (src_level < VR_MAX_TEXTURE_2D_LEVELS) {
6878 src_offset = src_res->mipmap_offsets[src_level];
6879 dst_offset = dst_res->mipmap_offsets[src_level];
6880 }
6881
6882 src_stride = util_format_get_nblocksx(src_res->base.format,
6883 u_minify(src_res->base.width0, src_level)) * elsize;
6884 read_transfer_data(&src_res->base, src_res->iov, src_res->num_iovs, tptr,
6885 src_stride, &box, src_level, src_offset, false);
6886 /* When on GLES sync the iov that backs the dst resource because
6887 * we might need it in a chain copy A->B, B->C */
6888 write_transfer_data(&dst_res->base, dst_res->iov, dst_res->num_iovs, tptr,
6889 dst_stride, &box, src_level, dst_offset, false);
6890 /* we get values from the guest as 24-bit scaled integers
6891 but we give them to the host GL and it interprets them
6892 as 32-bit scaled integers, so we need to scale them here */
6893 if (dst_res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
6894 float depth_scale = 256.0;
6895 vrend_scale_depth(tptr, total_size, depth_scale);
6896 }
6897 } else {
6898 uint32_t read_chunk_size;
6899 switch (elsize) {
6900 case 1:
6901 case 3:
6902 glPixelStorei(GL_PACK_ALIGNMENT, 1);
6903 break;
6904 case 2:
6905 case 6:
6906 glPixelStorei(GL_PACK_ALIGNMENT, 2);
6907 break;
6908 case 4:
6909 default:
6910 glPixelStorei(GL_PACK_ALIGNMENT, 4);
6911 break;
6912 case 8:
6913 glPixelStorei(GL_PACK_ALIGNMENT, 8);
6914 break;
6915 }
6916 glBindTexture(src_res->target, src_res->id);
6917 slice_offset = 0;
6918 read_chunk_size = (src_res->target == GL_TEXTURE_CUBE_MAP) ? slice_size : total_size;
6919 for (i = 0; i < cube_slice; i++) {
6920 GLenum ctarget = src_res->target == GL_TEXTURE_CUBE_MAP ?
6921 (GLenum)(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i) : src_res->target;
6922 if (compressed) {
6923 if (has_feature(feat_arb_robustness))
6924 glGetnCompressedTexImageARB(ctarget, src_level, read_chunk_size, tptr + slice_offset);
6925 else
6926 glGetCompressedTexImage(ctarget, src_level, tptr + slice_offset);
6927 } else {
6928 if (has_feature(feat_arb_robustness))
6929 glGetnTexImageARB(ctarget, src_level, glformat, gltype, read_chunk_size, tptr + slice_offset);
6930 else
6931 glGetTexImage(ctarget, src_level, glformat, gltype, tptr + slice_offset);
6932 }
6933 slice_offset += slice_size;
6934 }
6935 }
6936
6937 glPixelStorei(GL_PACK_ALIGNMENT, 4);
6938 switch (elsize) {
6939 case 1:
6940 case 3:
6941 glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
6942 break;
6943 case 2:
6944 case 6:
6945 glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
6946 break;
6947 case 4:
6948 default:
6949 glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
6950 break;
6951 case 8:
6952 glPixelStorei(GL_UNPACK_ALIGNMENT, 8);
6953 break;
6954 }
6955
6956 glBindTexture(dst_res->target, dst_res->id);
6957 slice_offset = src_box->z * slice_size;
6958 cube_slice = (src_res->target == GL_TEXTURE_CUBE_MAP) ? src_box->z + src_box->depth : cube_slice;
6959 i = (src_res->target == GL_TEXTURE_CUBE_MAP) ? src_box->z : 0;
6960 for (; i < cube_slice; i++) {
6961 GLenum ctarget = dst_res->target == GL_TEXTURE_CUBE_MAP ?
6962 (GLenum)(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i) : dst_res->target;
6963 if (compressed) {
6964 if (ctarget == GL_TEXTURE_1D) {
6965 glCompressedTexSubImage1D(ctarget, dst_level, dstx,
6966 src_box->width,
6967 glformat, slice_size, tptr + slice_offset);
6968 } else {
6969 glCompressedTexSubImage2D(ctarget, dst_level, dstx, dsty,
6970 src_box->width, src_box->height,
6971 glformat, slice_size, tptr + slice_offset);
6972 }
6973 } else {
6974 if (ctarget == GL_TEXTURE_1D) {
6975 glTexSubImage1D(ctarget, dst_level, dstx, src_box->width, glformat, gltype, tptr + slice_offset);
6976 } else if (ctarget == GL_TEXTURE_3D ||
6977 ctarget == GL_TEXTURE_2D_ARRAY ||
6978 ctarget == GL_TEXTURE_CUBE_MAP_ARRAY) {
6979 glTexSubImage3D(ctarget, dst_level, dstx, dsty, dstz, src_box->width, src_box->height, src_box->depth, glformat, gltype, tptr + slice_offset);
6980 } else {
6981 glTexSubImage2D(ctarget, dst_level, dstx, dsty, src_box->width, src_box->height, glformat, gltype, tptr + slice_offset);
6982 }
6983 }
6984 slice_offset += slice_size;
6985 }
6986
6987 glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
6988 free(tptr);
6989 }
6990
6991
6992 static inline void
vrend_copy_sub_image(struct vrend_resource * src_res,struct vrend_resource * dst_res,uint32_t src_level,const struct pipe_box * src_box,uint32_t dst_level,uint32_t dstx,uint32_t dsty,uint32_t dstz)6993 vrend_copy_sub_image(struct vrend_resource* src_res, struct vrend_resource * dst_res,
6994 uint32_t src_level, const struct pipe_box *src_box,
6995 uint32_t dst_level, uint32_t dstx, uint32_t dsty, uint32_t dstz)
6996 {
6997 glCopyImageSubData(src_res->id,
6998 tgsitargettogltarget(src_res->base.target, src_res->base.nr_samples),
6999 src_level, src_box->x, src_box->y, src_box->z,
7000 dst_res->id,
7001 tgsitargettogltarget(dst_res->base.target, dst_res->base.nr_samples),
7002 dst_level, dstx, dsty, dstz,
7003 src_box->width, src_box->height,src_box->depth);
7004 }
7005
7006
vrend_renderer_resource_copy_region(struct vrend_context * ctx,uint32_t dst_handle,uint32_t dst_level,uint32_t dstx,uint32_t dsty,uint32_t dstz,uint32_t src_handle,uint32_t src_level,const struct pipe_box * src_box)7007 void vrend_renderer_resource_copy_region(struct vrend_context *ctx,
7008 uint32_t dst_handle, uint32_t dst_level,
7009 uint32_t dstx, uint32_t dsty, uint32_t dstz,
7010 uint32_t src_handle, uint32_t src_level,
7011 const struct pipe_box *src_box)
7012 {
7013 struct vrend_resource *src_res, *dst_res;
7014 GLbitfield glmask = 0;
7015 GLint sy1, sy2, dy1, dy2;
7016
7017 if (ctx->in_error)
7018 return;
7019
7020 src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
7021 dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
7022
7023 if (!src_res) {
7024 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle);
7025 return;
7026 }
7027 if (!dst_res) {
7028 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
7029 return;
7030 }
7031
7032 if (src_res->base.target == PIPE_BUFFER && dst_res->base.target == PIPE_BUFFER) {
7033 /* do a buffer copy */
7034 vrend_resource_buffer_copy(ctx, src_res, dst_res, dstx,
7035 src_box->x, src_box->width);
7036 return;
7037 }
7038
7039 if (has_feature(feat_copy_image) &&
7040 format_is_copy_compatible(src_res->base.format,dst_res->base.format, true) &&
7041 src_res->base.nr_samples == dst_res->base.nr_samples) {
7042 vrend_copy_sub_image(src_res, dst_res, src_level, src_box,
7043 dst_level, dstx, dsty, dstz);
7044 return;
7045 }
7046
7047 if (!vrend_format_can_render(src_res->base.format) ||
7048 !vrend_format_can_render(dst_res->base.format)) {
7049 vrend_resource_copy_fallback(src_res, dst_res, dst_level, dstx,
7050 dsty, dstz, src_level, src_box);
7051 return;
7052 }
7053
7054 glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
7055 /* clean out fb ids */
7056 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
7057 GL_TEXTURE_2D, 0, 0);
7058 vrend_fb_bind_texture(src_res, 0, src_level, src_box->z);
7059
7060 glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
7061 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
7062 GL_TEXTURE_2D, 0, 0);
7063 vrend_fb_bind_texture(dst_res, 0, dst_level, dstz);
7064 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
7065
7066 glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
7067
7068 glmask = GL_COLOR_BUFFER_BIT;
7069 glDisable(GL_SCISSOR_TEST);
7070
7071 if (!src_res->y_0_top) {
7072 sy1 = src_box->y;
7073 sy2 = src_box->y + src_box->height;
7074 } else {
7075 sy1 = src_res->base.height0 - src_box->y - src_box->height;
7076 sy2 = src_res->base.height0 - src_box->y;
7077 }
7078
7079 if (!dst_res->y_0_top) {
7080 dy1 = dsty;
7081 dy2 = dsty + src_box->height;
7082 } else {
7083 dy1 = dst_res->base.height0 - dsty - src_box->height;
7084 dy2 = dst_res->base.height0 - dsty;
7085 }
7086
7087 glBlitFramebuffer(src_box->x, sy1,
7088 src_box->x + src_box->width,
7089 sy2,
7090 dstx, dy1,
7091 dstx + src_box->width,
7092 dy2,
7093 glmask, GL_NEAREST);
7094
7095 }
7096
vrend_renderer_blit_int(struct vrend_context * ctx,struct vrend_resource * src_res,struct vrend_resource * dst_res,const struct pipe_blit_info * info)7097 static void vrend_renderer_blit_int(struct vrend_context *ctx,
7098 struct vrend_resource *src_res,
7099 struct vrend_resource *dst_res,
7100 const struct pipe_blit_info *info)
7101 {
7102 GLbitfield glmask = 0;
7103 int src_y1, src_y2, dst_y1, dst_y2;
7104 GLenum filter;
7105 int n_layers = 1, i;
7106 bool use_gl = false;
7107 bool make_intermediate_copy = false;
7108 GLuint intermediate_fbo = 0;
7109 struct vrend_resource *intermediate_copy = 0;
7110
7111 filter = convert_mag_filter(info->filter);
7112
7113 /* if we can't make FBO's use the fallback path */
7114 if (!vrend_format_can_render(src_res->base.format) &&
7115 !vrend_format_is_ds(src_res->base.format))
7116 use_gl = true;
7117 if (!vrend_format_can_render(dst_res->base.format) &&
7118 !vrend_format_is_ds(dst_res->base.format))
7119 use_gl = true;
7120
7121 if (util_format_is_srgb(src_res->base.format) &&
7122 !util_format_is_srgb(dst_res->base.format))
7123 use_gl = true;
7124
7125 /* different depth formats */
7126 if (vrend_format_is_ds(src_res->base.format) &&
7127 vrend_format_is_ds(dst_res->base.format)) {
7128 if (src_res->base.format != dst_res->base.format) {
7129 if (!(src_res->base.format == PIPE_FORMAT_S8_UINT_Z24_UNORM &&
7130 (dst_res->base.format == PIPE_FORMAT_Z24X8_UNORM))) {
7131 use_gl = true;
7132 }
7133 }
7134 }
7135 /* glBlitFramebuffer - can support depth stencil with NEAREST
7136 which we use for mipmaps */
7137 if ((info->mask & (PIPE_MASK_Z | PIPE_MASK_S)) && info->filter == PIPE_TEX_FILTER_LINEAR)
7138 use_gl = true;
7139
7140 /* for scaled MS blits we either need extensions or hand roll */
7141 if (info->mask & PIPE_MASK_RGBA &&
7142 src_res->base.nr_samples > 1 &&
7143 src_res->base.nr_samples != dst_res->base.nr_samples &&
7144 (info->src.box.width != info->dst.box.width ||
7145 info->src.box.height != info->dst.box.height)) {
7146 if (has_feature(feat_ms_scaled_blit))
7147 filter = GL_SCALED_RESOLVE_NICEST_EXT;
7148 else
7149 use_gl = true;
7150 }
7151
7152 /* for 3D mipmapped blits - hand roll time */
7153 if (info->src.box.depth != info->dst.box.depth)
7154 use_gl = true;
7155
7156 if (vrend_format_needs_swizzle(info->dst.format) ||
7157 vrend_format_needs_swizzle(info->src.format))
7158 use_gl = true;
7159
7160 if (use_gl) {
7161 vrend_renderer_blit_gl(ctx, src_res, dst_res, info,
7162 has_feature(feat_texture_srgb_decode));
7163 vrend_clicbs->make_current(0, ctx->sub->gl_context);
7164 return;
7165 }
7166
7167 if (info->mask & PIPE_MASK_Z)
7168 glmask |= GL_DEPTH_BUFFER_BIT;
7169 if (info->mask & PIPE_MASK_S)
7170 glmask |= GL_STENCIL_BUFFER_BIT;
7171 if (info->mask & PIPE_MASK_RGBA)
7172 glmask |= GL_COLOR_BUFFER_BIT;
7173
7174 if (!dst_res->y_0_top) {
7175 dst_y1 = info->dst.box.y + info->dst.box.height;
7176 dst_y2 = info->dst.box.y;
7177 } else {
7178 dst_y1 = dst_res->base.height0 - info->dst.box.y - info->dst.box.height;
7179 dst_y2 = dst_res->base.height0 - info->dst.box.y;
7180 }
7181
7182 if (!src_res->y_0_top) {
7183 src_y1 = info->src.box.y + info->src.box.height;
7184 src_y2 = info->src.box.y;
7185 } else {
7186 src_y1 = src_res->base.height0 - info->src.box.y - info->src.box.height;
7187 src_y2 = src_res->base.height0 - info->src.box.y;
7188 }
7189
7190 if (info->scissor_enable) {
7191 glScissor(info->scissor.minx, info->scissor.miny, info->scissor.maxx - info->scissor.minx, info->scissor.maxy - info->scissor.miny);
7192 glEnable(GL_SCISSOR_TEST);
7193 } else
7194 glDisable(GL_SCISSOR_TEST);
7195 ctx->sub->scissor_state_dirty = (1 << 0);
7196
7197 /* An GLES GL_INVALID_OPERATION is generated if one wants to blit from a
7198 * multi-sample fbo to a non multi-sample fbo and the source and destination
7199 * rectangles are not defined with the same (X0, Y0) and (X1, Y1) bounds.
7200 *
7201 * Since stencil data can only be written in a fragment shader when
7202 * ARB_shader_stencil_export is available, the workaround using GL as given
7203 * above is usually not available. Instead, to work around the blit
7204 * limitations on GLES first copy the full frame to a non-multisample
7205 * surface and then copy the according area to the final target surface.
7206 */
7207 if (vrend_state.use_gles &&
7208 (info->mask & PIPE_MASK_ZS) &&
7209 ((src_res->base.nr_samples > 1) &&
7210 (src_res->base.nr_samples != dst_res->base.nr_samples)) &&
7211 ((info->src.box.x != info->dst.box.x) ||
7212 (src_y1 != dst_y1) ||
7213 (info->src.box.width != info->dst.box.width) ||
7214 (src_y2 != dst_y2))) {
7215
7216 make_intermediate_copy = true;
7217
7218 /* Create a texture that is the same like the src_res texture, but
7219 * without multi-sample */
7220 struct vrend_renderer_resource_create_args args;
7221 memset(&args, 0, sizeof(struct vrend_renderer_resource_create_args));
7222 args.width = src_res->base.width0;
7223 args.height = src_res->base.height0;
7224 args.depth = src_res->base.depth0;
7225 args.format = src_res->base.format;
7226 args.target = src_res->base.target;
7227 args.last_level = src_res->base.last_level;
7228 args.array_size = src_res->base.array_size;
7229 intermediate_copy = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
7230 vrend_renderer_resource_copy_args(&args, intermediate_copy);
7231 vrend_renderer_resource_allocate_texture(intermediate_copy, NULL);
7232
7233 glGenFramebuffers(1, &intermediate_fbo);
7234 } else {
7235 /* If no intermediate copy is needed make the variables point to the
7236 * original source to simplify the code below.
7237 */
7238 intermediate_fbo = ctx->sub->blit_fb_ids[0];
7239 intermediate_copy = src_res;
7240 }
7241
7242 glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
7243 if (info->mask & PIPE_MASK_RGBA)
7244 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
7245 GL_TEXTURE_2D, 0, 0);
7246 else
7247 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0,
7248 GL_TEXTURE_2D, 0, 0);
7249 glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
7250 if (info->mask & PIPE_MASK_RGBA)
7251 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
7252 GL_TEXTURE_2D, 0, 0);
7253 else if (info->mask & (PIPE_MASK_Z | PIPE_MASK_S))
7254 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0,
7255 GL_TEXTURE_2D, 0, 0);
7256 if (info->src.box.depth == info->dst.box.depth)
7257 n_layers = info->dst.box.depth;
7258 for (i = 0; i < n_layers; i++) {
7259 glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
7260 vrend_fb_bind_texture(src_res, 0, info->src.level, info->src.box.z + i);
7261
7262 if (make_intermediate_copy) {
7263 int level_width = u_minify(src_res->base.width0, info->src.level);
7264 int level_height = u_minify(src_res->base.width0, info->src.level);
7265 glBindFramebuffer(GL_FRAMEBUFFER_EXT, intermediate_fbo);
7266 glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0,
7267 GL_TEXTURE_2D, 0, 0);
7268 vrend_fb_bind_texture(intermediate_copy, 0, info->src.level, info->src.box.z + i);
7269
7270 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, intermediate_fbo);
7271 glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
7272 glBlitFramebuffer(0, 0, level_width, level_height,
7273 0, 0, level_width, level_height,
7274 glmask, filter);
7275 }
7276
7277 glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
7278 vrend_fb_bind_texture(dst_res, 0, info->dst.level, info->dst.box.z + i);
7279 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
7280
7281 if (!vrend_state.use_gles) {
7282 if (util_format_is_srgb(dst_res->base.format))
7283 glEnable(GL_FRAMEBUFFER_SRGB);
7284 else
7285 glDisable(GL_FRAMEBUFFER_SRGB);
7286 }
7287
7288 glBindFramebuffer(GL_READ_FRAMEBUFFER, intermediate_fbo);
7289
7290 glBlitFramebuffer(info->src.box.x,
7291 src_y1,
7292 info->src.box.x + info->src.box.width,
7293 src_y2,
7294 info->dst.box.x,
7295 dst_y1,
7296 info->dst.box.x + info->dst.box.width,
7297 dst_y2,
7298 glmask, filter);
7299 }
7300
7301 if (make_intermediate_copy) {
7302 vrend_renderer_resource_destroy(intermediate_copy, false);
7303 glDeleteFramebuffers(1, &intermediate_fbo);
7304 }
7305 }
7306
vrend_renderer_blit(struct vrend_context * ctx,uint32_t dst_handle,uint32_t src_handle,const struct pipe_blit_info * info)7307 void vrend_renderer_blit(struct vrend_context *ctx,
7308 uint32_t dst_handle, uint32_t src_handle,
7309 const struct pipe_blit_info *info)
7310 {
7311 struct vrend_resource *src_res, *dst_res;
7312 src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
7313 dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
7314
7315 if (!src_res) {
7316 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle);
7317 return;
7318 }
7319 if (!dst_res) {
7320 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
7321 return;
7322 }
7323
7324 if (ctx->in_error)
7325 return;
7326
7327 if (info->render_condition_enable == false)
7328 vrend_pause_render_condition(ctx, true);
7329
7330 /* The Gallium blit function can be called for a general blit that may
7331 * scale, convert the data, and apply some rander states, or it is called via
7332 * glCopyImageSubData. If the src or the dst image are equal, or the two
7333 * images formats are the same, then Galliums such calles are redirected
7334 * to resource_copy_region, in this case and if no render states etx need
7335 * to be applied, forward the call to glCopyImageSubData, otherwise do a
7336 * normal blit. */
7337 if (has_feature(feat_copy_image) && !info->render_condition_enable &&
7338 (src_res->base.format != dst_res->base.format) &&
7339 format_is_copy_compatible(info->src.format,info->dst.format, false) &&
7340 !info->scissor_enable && (info->filter == PIPE_TEX_FILTER_NEAREST) &&
7341 !info->alpha_blend && (info->mask == PIPE_MASK_RGBA) &&
7342 (src_res->base.nr_samples == dst_res->base.nr_samples) &&
7343 info->src.box.width == info->dst.box.width &&
7344 info->src.box.height == info->dst.box.height &&
7345 info->src.box.depth == info->dst.box.depth) {
7346 vrend_copy_sub_image(src_res, dst_res, info->src.level, &info->src.box,
7347 info->dst.level, info->dst.box.x, info->dst.box.y,
7348 info->dst.box.z);
7349 } else {
7350 vrend_renderer_blit_int(ctx, src_res, dst_res, info);
7351 }
7352
7353 if (info->render_condition_enable == false)
7354 vrend_pause_render_condition(ctx, false);
7355 }
7356
vrend_renderer_create_fence(int client_fence_id,uint32_t ctx_id)7357 int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
7358 {
7359 struct vrend_fence *fence;
7360
7361 fence = malloc(sizeof(struct vrend_fence));
7362 if (!fence)
7363 return ENOMEM;
7364
7365 fence->ctx_id = ctx_id;
7366 fence->fence_id = client_fence_id;
7367 fence->syncobj = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
7368 glFlush();
7369
7370 if (fence->syncobj == NULL)
7371 goto fail;
7372
7373 if (vrend_state.sync_thread) {
7374 pipe_mutex_lock(vrend_state.fence_mutex);
7375 list_addtail(&fence->fences, &vrend_state.fence_wait_list);
7376 pipe_condvar_signal(vrend_state.fence_cond);
7377 pipe_mutex_unlock(vrend_state.fence_mutex);
7378 } else
7379 list_addtail(&fence->fences, &vrend_state.fence_list);
7380 return 0;
7381
7382 fail:
7383 fprintf(stderr, "failed to create fence sync object\n");
7384 free(fence);
7385 return ENOMEM;
7386 }
7387
free_fence_locked(struct vrend_fence * fence)7388 static void free_fence_locked(struct vrend_fence *fence)
7389 {
7390 list_del(&fence->fences);
7391 glDeleteSync(fence->syncobj);
7392 free(fence);
7393 }
7394
flush_eventfd(int fd)7395 static void flush_eventfd(int fd)
7396 {
7397 ssize_t len;
7398 uint64_t value;
7399 do {
7400 len = read(fd, &value, sizeof(value));
7401 } while ((len == -1 && errno == EINTR) || len == sizeof(value));
7402 }
7403
vrend_renderer_check_fences(void)7404 void vrend_renderer_check_fences(void)
7405 {
7406 struct vrend_fence *fence, *stor;
7407 uint32_t latest_id = 0;
7408 GLenum glret;
7409
7410 if (!vrend_state.inited)
7411 return;
7412
7413 if (vrend_state.sync_thread) {
7414 flush_eventfd(vrend_state.eventfd);
7415 pipe_mutex_lock(vrend_state.fence_mutex);
7416 LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
7417 if (fence->fence_id > latest_id)
7418 latest_id = fence->fence_id;
7419 free_fence_locked(fence);
7420 }
7421 pipe_mutex_unlock(vrend_state.fence_mutex);
7422 } else {
7423 vrend_renderer_force_ctx_0();
7424
7425 LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
7426 glret = glClientWaitSync(fence->syncobj, 0, 0);
7427 if (glret == GL_ALREADY_SIGNALED){
7428 latest_id = fence->fence_id;
7429 free_fence_locked(fence);
7430 }
7431 /* don't bother checking any subsequent ones */
7432 else if (glret == GL_TIMEOUT_EXPIRED) {
7433 break;
7434 }
7435 }
7436 }
7437
7438 if (latest_id == 0)
7439 return;
7440 vrend_clicbs->write_fence(latest_id);
7441 }
7442
vrend_get_one_query_result(GLuint query_id,bool use_64,uint64_t * result)7443 static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *result)
7444 {
7445 GLuint ready;
7446 GLuint passed;
7447 GLuint64 pass64;
7448
7449 glGetQueryObjectuiv(query_id, GL_QUERY_RESULT_AVAILABLE_ARB, &ready);
7450
7451 if (!ready)
7452 return false;
7453
7454 if (use_64) {
7455 glGetQueryObjectui64v(query_id, GL_QUERY_RESULT_ARB, &pass64);
7456 *result = pass64;
7457 } else {
7458 glGetQueryObjectuiv(query_id, GL_QUERY_RESULT_ARB, &passed);
7459 *result = passed;
7460 }
7461 return true;
7462 }
7463
vrend_check_query(struct vrend_query * query)7464 static bool vrend_check_query(struct vrend_query *query)
7465 {
7466 uint64_t result;
7467 struct virgl_host_query_state *state;
7468 bool ret;
7469
7470 ret = vrend_get_one_query_result(query->id, vrend_is_timer_query(query->gltype), &result);
7471 if (ret == false)
7472 return false;
7473
7474 state = (struct virgl_host_query_state *)query->res->ptr;
7475 state->result = result;
7476 state->query_state = VIRGL_QUERY_STATE_DONE;
7477 return true;
7478 }
7479
vrend_renderer_check_queries(void)7480 void vrend_renderer_check_queries(void)
7481 {
7482 struct vrend_query *query, *stor;
7483
7484 if (!vrend_state.inited)
7485 return;
7486
7487 LIST_FOR_EACH_ENTRY_SAFE(query, stor, &vrend_state.waiting_query_list, waiting_queries) {
7488 vrend_hw_switch_context(vrend_lookup_renderer_ctx(query->ctx_id), true);
7489 if (vrend_check_query(query))
7490 list_delinit(&query->waiting_queries);
7491 }
7492 }
7493
vrend_hw_switch_context(struct vrend_context * ctx,bool now)7494 bool vrend_hw_switch_context(struct vrend_context *ctx, bool now)
7495 {
7496 if (ctx == vrend_state.current_ctx && ctx->ctx_switch_pending == false)
7497 return true;
7498
7499 if (ctx->ctx_id != 0 && ctx->in_error) {
7500 return false;
7501 }
7502
7503 ctx->ctx_switch_pending = true;
7504 if (now == true) {
7505 vrend_finish_context_switch(ctx);
7506 }
7507 vrend_state.current_ctx = ctx;
7508 return true;
7509 }
7510
vrend_finish_context_switch(struct vrend_context * ctx)7511 static void vrend_finish_context_switch(struct vrend_context *ctx)
7512 {
7513 if (ctx->ctx_switch_pending == false)
7514 return;
7515 ctx->ctx_switch_pending = false;
7516
7517 if (vrend_state.current_hw_ctx == ctx)
7518 return;
7519
7520 vrend_state.current_hw_ctx = ctx;
7521
7522 vrend_clicbs->make_current(0, ctx->sub->gl_context);
7523 }
7524
7525 void
vrend_renderer_object_destroy(struct vrend_context * ctx,uint32_t handle)7526 vrend_renderer_object_destroy(struct vrend_context *ctx, uint32_t handle)
7527 {
7528 vrend_object_remove(ctx->sub->object_hash, handle, 0);
7529 }
7530
vrend_renderer_object_insert(struct vrend_context * ctx,void * data,uint32_t size,uint32_t handle,enum virgl_object_type type)7531 uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data,
7532 uint32_t size, uint32_t handle, enum virgl_object_type type)
7533 {
7534 return vrend_object_insert(ctx->sub->object_hash, data, size, handle, type);
7535 }
7536
vrend_create_query(struct vrend_context * ctx,uint32_t handle,uint32_t query_type,uint32_t query_index,uint32_t res_handle,UNUSED uint32_t offset)7537 int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
7538 uint32_t query_type, uint32_t query_index,
7539 uint32_t res_handle, UNUSED uint32_t offset)
7540 {
7541 struct vrend_query *q;
7542 struct vrend_resource *res;
7543 uint32_t ret_handle;
7544 res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
7545 if (!res) {
7546 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
7547 return EINVAL;
7548 }
7549
7550 q = CALLOC_STRUCT(vrend_query);
7551 if (!q)
7552 return ENOMEM;
7553
7554 list_inithead(&q->waiting_queries);
7555 q->type = query_type;
7556 q->index = query_index;
7557 q->ctx_id = ctx->ctx_id;
7558
7559 vrend_resource_reference(&q->res, res);
7560
7561 switch (q->type) {
7562 case PIPE_QUERY_OCCLUSION_COUNTER:
7563 q->gltype = GL_SAMPLES_PASSED_ARB;
7564 break;
7565 case PIPE_QUERY_OCCLUSION_PREDICATE:
7566 q->gltype = GL_ANY_SAMPLES_PASSED;
7567 break;
7568 case PIPE_QUERY_TIMESTAMP:
7569 q->gltype = GL_TIMESTAMP;
7570 break;
7571 case PIPE_QUERY_TIME_ELAPSED:
7572 q->gltype = GL_TIME_ELAPSED;
7573 break;
7574 case PIPE_QUERY_PRIMITIVES_GENERATED:
7575 q->gltype = GL_PRIMITIVES_GENERATED;
7576 break;
7577 case PIPE_QUERY_PRIMITIVES_EMITTED:
7578 q->gltype = GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN;
7579 break;
7580 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
7581 q->gltype = GL_ANY_SAMPLES_PASSED_CONSERVATIVE;
7582 break;
7583 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
7584 if (!has_feature(feat_transform_feedback_overflow_query))
7585 return EINVAL;
7586 q->gltype = GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB;
7587 break;
7588 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
7589 if (!has_feature(feat_transform_feedback_overflow_query))
7590 return EINVAL;
7591 q->gltype = GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB;
7592 break;
7593 default:
7594 fprintf(stderr,"unknown query object received %d\n", q->type);
7595 break;
7596 }
7597
7598 glGenQueries(1, &q->id);
7599
7600 ret_handle = vrend_renderer_object_insert(ctx, q, sizeof(struct vrend_query), handle,
7601 VIRGL_OBJECT_QUERY);
7602 if (!ret_handle) {
7603 FREE(q);
7604 return ENOMEM;
7605 }
7606 return 0;
7607 }
7608
vrend_destroy_query(struct vrend_query * query)7609 static void vrend_destroy_query(struct vrend_query *query)
7610 {
7611 vrend_resource_reference(&query->res, NULL);
7612 list_del(&query->waiting_queries);
7613 glDeleteQueries(1, &query->id);
7614 free(query);
7615 }
7616
vrend_destroy_query_object(void * obj_ptr)7617 static void vrend_destroy_query_object(void *obj_ptr)
7618 {
7619 struct vrend_query *query = obj_ptr;
7620 vrend_destroy_query(query);
7621 }
7622
vrend_begin_query(struct vrend_context * ctx,uint32_t handle)7623 int vrend_begin_query(struct vrend_context *ctx, uint32_t handle)
7624 {
7625 struct vrend_query *q;
7626
7627 q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
7628 if (!q)
7629 return EINVAL;
7630
7631 if (q->index > 0 && !has_feature(feat_transform_feedback3))
7632 return EINVAL;
7633
7634 if (q->gltype == GL_TIMESTAMP)
7635 return 0;
7636
7637 if (q->index > 0)
7638 glBeginQueryIndexed(q->gltype, q->index, q->id);
7639 else
7640 glBeginQuery(q->gltype, q->id);
7641 return 0;
7642 }
7643
vrend_end_query(struct vrend_context * ctx,uint32_t handle)7644 int vrend_end_query(struct vrend_context *ctx, uint32_t handle)
7645 {
7646 struct vrend_query *q;
7647 q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
7648 if (!q)
7649 return EINVAL;
7650
7651 if (q->index > 0 && !has_feature(feat_transform_feedback3))
7652 return EINVAL;
7653
7654 if (vrend_is_timer_query(q->gltype)) {
7655 if (vrend_state.use_gles && q->gltype == GL_TIMESTAMP) {
7656 report_gles_warn(ctx, GLES_WARN_TIMESTAMP, 0);
7657 } else if (q->gltype == GL_TIMESTAMP) {
7658 glQueryCounter(q->id, q->gltype);
7659 } else {
7660 /* remove from active query list for this context */
7661 glEndQuery(q->gltype);
7662 }
7663 return 0;
7664 }
7665
7666 if (q->index > 0)
7667 glEndQueryIndexed(q->gltype, q->index);
7668 else
7669 glEndQuery(q->gltype);
7670 return 0;
7671 }
7672
vrend_get_query_result(struct vrend_context * ctx,uint32_t handle,UNUSED uint32_t wait)7673 void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle,
7674 UNUSED uint32_t wait)
7675 {
7676 struct vrend_query *q;
7677 bool ret;
7678
7679 q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
7680 if (!q)
7681 return;
7682
7683 ret = vrend_check_query(q);
7684 if (ret == false)
7685 list_addtail(&q->waiting_queries, &vrend_state.waiting_query_list);
7686 }
7687
vrend_pause_render_condition(struct vrend_context * ctx,bool pause)7688 static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause)
7689 {
7690 if (pause) {
7691 if (ctx->sub->cond_render_q_id) {
7692 if (has_feature(feat_gl_conditional_render))
7693 glEndConditionalRender();
7694 else if (has_feature(feat_nv_conditional_render))
7695 glEndConditionalRenderNV();
7696 }
7697 } else {
7698 if (ctx->sub->cond_render_q_id) {
7699 if (has_feature(feat_gl_conditional_render))
7700 glBeginConditionalRender(ctx->sub->cond_render_q_id,
7701 ctx->sub->cond_render_gl_mode);
7702 else if (has_feature(feat_nv_conditional_render))
7703 glBeginConditionalRenderNV(ctx->sub->cond_render_q_id,
7704 ctx->sub->cond_render_gl_mode);
7705 }
7706 }
7707 }
7708
vrend_render_condition(struct vrend_context * ctx,uint32_t handle,bool condition,uint mode)7709 void vrend_render_condition(struct vrend_context *ctx,
7710 uint32_t handle,
7711 bool condition,
7712 uint mode)
7713 {
7714 struct vrend_query *q;
7715 GLenum glmode = 0;
7716
7717 if (handle == 0) {
7718 if (has_feature(feat_gl_conditional_render))
7719 glEndConditionalRender();
7720 else if (has_feature(feat_nv_conditional_render))
7721 glEndConditionalRenderNV();
7722 ctx->sub->cond_render_q_id = 0;
7723 ctx->sub->cond_render_gl_mode = 0;
7724 return;
7725 }
7726
7727 q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
7728 if (!q)
7729 return;
7730
7731 if (condition && !has_feature(feat_conditional_render_inverted))
7732 return;
7733 switch (mode) {
7734 case PIPE_RENDER_COND_WAIT:
7735 glmode = condition ? GL_QUERY_WAIT_INVERTED : GL_QUERY_WAIT;
7736 break;
7737 case PIPE_RENDER_COND_NO_WAIT:
7738 glmode = condition ? GL_QUERY_NO_WAIT_INVERTED : GL_QUERY_NO_WAIT;
7739 break;
7740 case PIPE_RENDER_COND_BY_REGION_WAIT:
7741 glmode = condition ? GL_QUERY_BY_REGION_WAIT_INVERTED : GL_QUERY_BY_REGION_WAIT;
7742 break;
7743 case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
7744 glmode = condition ? GL_QUERY_BY_REGION_NO_WAIT_INVERTED : GL_QUERY_BY_REGION_NO_WAIT;
7745 break;
7746 default:
7747 fprintf(stderr, "unhandled condition %x\n", mode);
7748 }
7749
7750 ctx->sub->cond_render_q_id = q->id;
7751 ctx->sub->cond_render_gl_mode = glmode;
7752 if (has_feature(feat_gl_conditional_render))
7753 glBeginConditionalRender(q->id, glmode);
7754 if (has_feature(feat_nv_conditional_render))
7755 glBeginConditionalRenderNV(q->id, glmode);
7756 }
7757
vrend_create_so_target(struct vrend_context * ctx,uint32_t handle,uint32_t res_handle,uint32_t buffer_offset,uint32_t buffer_size)7758 int vrend_create_so_target(struct vrend_context *ctx,
7759 uint32_t handle,
7760 uint32_t res_handle,
7761 uint32_t buffer_offset,
7762 uint32_t buffer_size)
7763 {
7764 struct vrend_so_target *target;
7765 struct vrend_resource *res;
7766 int ret_handle;
7767 res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
7768 if (!res) {
7769 report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
7770 return EINVAL;
7771 }
7772
7773 target = CALLOC_STRUCT(vrend_so_target);
7774 if (!target)
7775 return ENOMEM;
7776
7777 pipe_reference_init(&target->reference, 1);
7778 target->res_handle = res_handle;
7779 target->buffer_offset = buffer_offset;
7780 target->buffer_size = buffer_size;
7781 target->sub_ctx = ctx->sub;
7782 vrend_resource_reference(&target->buffer, res);
7783
7784 ret_handle = vrend_renderer_object_insert(ctx, target, sizeof(*target), handle,
7785 VIRGL_OBJECT_STREAMOUT_TARGET);
7786 if (ret_handle == 0) {
7787 FREE(target);
7788 return ENOMEM;
7789 }
7790 return 0;
7791 }
7792
vrender_get_glsl_version(int * glsl_version)7793 static void vrender_get_glsl_version(int *glsl_version)
7794 {
7795 int major_local, minor_local;
7796 const GLubyte *version_str;
7797 int c;
7798 int version;
7799
7800 version_str = glGetString(GL_SHADING_LANGUAGE_VERSION);
7801 if (vrend_state.use_gles) {
7802 char tmp[20];
7803 c = sscanf((const char *)version_str, "%s %s %s %s %i.%i",
7804 tmp, tmp, tmp, tmp, &major_local, &minor_local);
7805 assert(c == 6);
7806 } else {
7807 c = sscanf((const char *)version_str, "%i.%i",
7808 &major_local, &minor_local);
7809 assert(c == 2);
7810 }
7811
7812 version = (major_local * 100) + minor_local;
7813 if (glsl_version)
7814 *glsl_version = version;
7815 }
7816
vrend_fill_caps_glsl_version(int gl_ver,int gles_ver,union virgl_caps * caps)7817 static void vrend_fill_caps_glsl_version(int gl_ver, int gles_ver,
7818 union virgl_caps *caps)
7819 {
7820 if (gles_ver > 0) {
7821 caps->v1.glsl_level = 120;
7822
7823 if (gles_ver >= 31)
7824 caps->v1.glsl_level = 310;
7825 else if (gles_ver >= 30)
7826 caps->v1.glsl_level = 130;
7827 }
7828
7829 if (gl_ver > 0) {
7830 caps->v1.glsl_level = 130;
7831
7832 if (gl_ver == 31)
7833 caps->v1.glsl_level = 140;
7834 else if (gl_ver == 32)
7835 caps->v1.glsl_level = 150;
7836 else if (gl_ver == 33)
7837 caps->v1.glsl_level = 330;
7838 else if (gl_ver == 40)
7839 caps->v1.glsl_level = 400;
7840 else if (gl_ver == 41)
7841 caps->v1.glsl_level = 410;
7842 else if (gl_ver == 42)
7843 caps->v1.glsl_level = 420;
7844 else if (gl_ver >= 43)
7845 caps->v1.glsl_level = 430;
7846 }
7847 }
7848
7849 /*
7850 * Does all of the common caps setting,
7851 * if it dedects a early out returns true.
7852 */
vrend_renderer_fill_caps_v1(int gl_ver,int gles_ver,union virgl_caps * caps)7853 static void vrend_renderer_fill_caps_v1(int gl_ver, int gles_ver, union virgl_caps *caps)
7854 {
7855 int i;
7856 GLint max;
7857
7858 /*
7859 * We can't fully support this feature on GLES,
7860 * but it is needed for OpenGL 2.1 so lie.
7861 */
7862 caps->v1.bset.occlusion_query = 1;
7863
7864 /* Set supported prims here as we now know what shaders we support. */
7865 caps->v1.prim_mask = (1 << PIPE_PRIM_POINTS) | (1 << PIPE_PRIM_LINES) |
7866 (1 << PIPE_PRIM_LINE_STRIP) | (1 << PIPE_PRIM_LINE_LOOP) |
7867 (1 << PIPE_PRIM_TRIANGLES) | (1 << PIPE_PRIM_TRIANGLE_STRIP) |
7868 (1 << PIPE_PRIM_TRIANGLE_FAN);
7869
7870 if (gl_ver > 0 && !vrend_state.use_core_profile) {
7871 caps->v1.bset.poly_stipple = 1;
7872 caps->v1.bset.color_clamping = 1;
7873 caps->v1.prim_mask |= (1 << PIPE_PRIM_QUADS) |
7874 (1 << PIPE_PRIM_QUAD_STRIP) |
7875 (1 << PIPE_PRIM_POLYGON);
7876 }
7877
7878 if (caps->v1.glsl_level >= 150) {
7879 caps->v1.prim_mask |= (1 << PIPE_PRIM_LINES_ADJACENCY) |
7880 (1 << PIPE_PRIM_LINE_STRIP_ADJACENCY) |
7881 (1 << PIPE_PRIM_TRIANGLES_ADJACENCY) |
7882 (1 << PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY);
7883 }
7884 if (caps->v1.glsl_level >= 400)
7885 caps->v1.prim_mask |= (1 << PIPE_PRIM_PATCHES);
7886
7887 if (epoxy_has_gl_extension("GL_ARB_vertex_type_10f_11f_11f_rev")) {
7888 int val = VIRGL_FORMAT_R11G11B10_FLOAT;
7889 uint32_t offset = val / 32;
7890 uint32_t index = val % 32;
7891 caps->v1.vertexbuffer.bitmask[offset] |= (1 << index);
7892 }
7893
7894 if (has_feature(feat_nv_conditional_render) ||
7895 has_feature(feat_gl_conditional_render))
7896 caps->v1.bset.conditional_render = 1;
7897
7898 if (has_feature(feat_indep_blend))
7899 caps->v1.bset.indep_blend_enable = 1;
7900
7901 if (has_feature(feat_draw_instance))
7902 caps->v1.bset.instanceid = 1;
7903
7904 if (has_feature(feat_ubo)) {
7905 glGetIntegerv(GL_MAX_VERTEX_UNIFORM_BLOCKS, &max);
7906 vrend_state.max_uniform_blocks = max;
7907 caps->v1.max_uniform_blocks = max + 1;
7908 }
7909
7910 if (gl_ver >= 32) {
7911 caps->v1.bset.fragment_coord_conventions = 1;
7912 caps->v1.bset.depth_clip_disable = 1;
7913 caps->v1.bset.seamless_cube_map = 1;
7914 } else {
7915 if (epoxy_has_gl_extension("GL_ARB_fragment_coord_conventions"))
7916 caps->v1.bset.fragment_coord_conventions = 1;
7917 if (epoxy_has_gl_extension("GL_ARB_seamless_cube_map"))
7918 caps->v1.bset.seamless_cube_map = 1;
7919 }
7920
7921 if (epoxy_has_gl_extension("GL_AMD_seamless_cube_map_per_texture")) {
7922 caps->v1.bset.seamless_cube_map_per_texture = 1;
7923 }
7924
7925 if (has_feature(feat_texture_multisample))
7926 caps->v1.bset.texture_multisample = 1;
7927
7928 if (has_feature(feat_tessellation))
7929 caps->v1.bset.has_tessellation_shaders = 1;
7930
7931 if (has_feature(feat_sample_shading))
7932 caps->v1.bset.has_sample_shading = 1;
7933
7934 if (has_feature(feat_indirect_draw))
7935 caps->v1.bset.has_indirect_draw = 1;
7936
7937 if (has_feature(feat_indep_blend_func))
7938 caps->v1.bset.indep_blend_func = 1;
7939
7940 if (has_feature(feat_cube_map_array))
7941 caps->v1.bset.cube_map_array = 1;
7942
7943 if (gl_ver >= 40) {
7944 caps->v1.bset.texture_query_lod = 1;
7945 caps->v1.bset.has_fp64 = 1;
7946 } else {
7947 if (epoxy_has_gl_extension("GL_ARB_texture_query_lod"))
7948 caps->v1.bset.texture_query_lod = 1;
7949 /* need gpu shader 5 for bitfield insert */
7950 if (epoxy_has_gl_extension("GL_ARB_gpu_shader_fp64") &&
7951 epoxy_has_gl_extension("GL_ARB_gpu_shader5"))
7952 caps->v1.bset.has_fp64 = 1;
7953 }
7954
7955 if (has_feature(feat_base_instance))
7956 caps->v1.bset.start_instance = 1;
7957
7958 if (epoxy_has_gl_extension("GL_ARB_shader_stencil_export")) {
7959 caps->v1.bset.shader_stencil_export = 1;
7960 }
7961
7962 if (has_feature(feat_conditional_render_inverted))
7963 caps->v1.bset.conditional_render_inverted = 1;
7964
7965 if (gl_ver >= 45) {
7966 caps->v1.bset.has_cull = 1;
7967 caps->v1.bset.derivative_control = 1;
7968 } else {
7969 if (epoxy_has_gl_extension("GL_ARB_cull_distance"))
7970 caps->v1.bset.has_cull = 1;
7971 if (epoxy_has_gl_extension("GL_ARB_derivative_control"))
7972 caps->v1.bset.derivative_control = 1;
7973 }
7974
7975 if (has_feature(feat_polygon_offset_clamp))
7976 caps->v1.bset.polygon_offset_clamp = 1;
7977
7978 if (has_feature(feat_transform_feedback_overflow_query))
7979 caps->v1.bset.transform_feedback_overflow_query = 1;
7980
7981 if (epoxy_has_gl_extension("GL_EXT_texture_mirror_clamp") ||
7982 epoxy_has_gl_extension("GL_ARB_texture_mirror_clamp_to_edge")) {
7983 caps->v1.bset.mirror_clamp = true;
7984 }
7985
7986 if (has_feature(feat_texture_array)) {
7987 glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &max);
7988 caps->v1.max_texture_array_layers = max;
7989 }
7990
7991 /* we need tf3 so we can do gallium skip buffers */
7992 if (has_feature(feat_transform_feedback)) {
7993 if (has_feature(feat_transform_feedback2))
7994 caps->v1.bset.streamout_pause_resume = 1;
7995
7996 if (has_feature(feat_transform_feedback3)) {
7997 glGetIntegerv(GL_MAX_TRANSFORM_FEEDBACK_BUFFERS, &max);
7998 caps->v1.max_streamout_buffers = max;
7999 } else if (gles_ver > 0) {
8000 glGetIntegerv(GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS, &max);
8001 /* As with the earlier version of transform feedback this min 4. */
8002 if (max >= 4) {
8003 caps->v1.max_streamout_buffers = 4;
8004 }
8005 } else
8006 caps->v1.max_streamout_buffers = 4;
8007 }
8008
8009 if (has_feature(feat_dual_src_blend)) {
8010 glGetIntegerv(GL_MAX_DUAL_SOURCE_DRAW_BUFFERS, &max);
8011 caps->v1.max_dual_source_render_targets = max;
8012 }
8013
8014 if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
8015 glGetIntegerv(GL_MAX_TEXTURE_BUFFER_SIZE, &max);
8016 caps->v1.max_tbo_size = max;
8017 }
8018
8019 if (has_feature(feat_texture_gather)) {
8020 if (gl_ver > 0) {
8021 glGetIntegerv(GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB, &max);
8022 caps->v1.max_texture_gather_components = max;
8023 } else {
8024 caps->v1.max_texture_gather_components = 4;
8025 }
8026 }
8027
8028 if (has_feature(feat_viewport_array)) {
8029 glGetIntegerv(GL_MAX_VIEWPORTS, &max);
8030 caps->v1.max_viewports = max;
8031 } else {
8032 caps->v1.max_viewports = 1;
8033 }
8034
8035 /* Common limits for all backends. */
8036 caps->v1.max_render_targets = vrend_state.max_draw_buffers;
8037
8038 glGetIntegerv(GL_MAX_SAMPLES, &max);
8039 caps->v1.max_samples = max;
8040
8041 /* All of the formats are common. */
8042 for (i = 0; i < VIRGL_FORMAT_MAX; i++) {
8043 uint32_t offset = i / 32;
8044 uint32_t index = i % 32;
8045
8046 if (tex_conv_table[i].internalformat != 0) {
8047 if (vrend_format_can_sample(i)) {
8048 caps->v1.sampler.bitmask[offset] |= (1 << index);
8049 if (vrend_format_can_render(i))
8050 caps->v1.render.bitmask[offset] |= (1 << index);
8051 }
8052 }
8053 }
8054
8055 /* These are filled in by the init code, so are common. */
8056 if (has_feature(feat_nv_prim_restart) ||
8057 has_feature(feat_gl_prim_restart)) {
8058 caps->v1.bset.primitive_restart = 1;
8059 }
8060 }
8061
vrend_renderer_fill_caps_v2(int gl_ver,int gles_ver,union virgl_caps * caps)8062 static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_caps *caps)
8063 {
8064 GLint max;
8065 GLfloat range[2];
8066
8067 glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, range);
8068 caps->v2.min_aliased_point_size = range[0];
8069 caps->v2.max_aliased_point_size = range[1];
8070
8071 glGetFloatv(GL_ALIASED_LINE_WIDTH_RANGE, range);
8072 caps->v2.min_aliased_line_width = range[0];
8073 caps->v2.max_aliased_line_width = range[1];
8074
8075 if (gl_ver > 0) {
8076 glGetFloatv(GL_SMOOTH_POINT_SIZE_RANGE, range);
8077 caps->v2.min_smooth_point_size = range[0];
8078 caps->v2.max_smooth_point_size = range[1];
8079
8080 glGetFloatv(GL_SMOOTH_LINE_WIDTH_RANGE, range);
8081 caps->v2.min_smooth_line_width = range[0];
8082 caps->v2.max_smooth_line_width = range[1];
8083 }
8084
8085 glGetFloatv(GL_MAX_TEXTURE_LOD_BIAS, &caps->v2.max_texture_lod_bias);
8086 glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, (GLint*)&caps->v2.max_vertex_attribs);
8087 glGetIntegerv(GL_MAX_VERTEX_OUTPUT_COMPONENTS, &max);
8088 caps->v2.max_vertex_outputs = max / 4;
8089
8090 glGetIntegerv(GL_MIN_PROGRAM_TEXEL_OFFSET, &caps->v2.min_texel_offset);
8091 glGetIntegerv(GL_MAX_PROGRAM_TEXEL_OFFSET, &caps->v2.max_texel_offset);
8092
8093 glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, (GLint*)&caps->v2.uniform_buffer_offset_alignment);
8094
8095 glGetIntegerv(GL_MAX_TEXTURE_SIZE, (GLint*)&caps->v2.max_texture_2d_size);
8096 glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, (GLint*)&caps->v2.max_texture_3d_size);
8097 glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, (GLint*)&caps->v2.max_texture_cube_size);
8098
8099 if (has_feature(feat_geometry_shader)) {
8100 glGetIntegerv(GL_MAX_GEOMETRY_OUTPUT_VERTICES, (GLint*)&caps->v2.max_geom_output_vertices);
8101 glGetIntegerv(GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS, (GLint*)&caps->v2.max_geom_total_output_components);
8102 }
8103
8104 if (has_feature(feat_tessellation)) {
8105 glGetIntegerv(GL_MAX_TESS_PATCH_COMPONENTS, &max);
8106 caps->v2.max_shader_patch_varyings = max / 4;
8107 } else
8108 caps->v2.max_shader_patch_varyings = 0;
8109
8110 if (has_feature(feat_texture_gather)) {
8111 glGetIntegerv(GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET, &caps->v2.min_texture_gather_offset);
8112 glGetIntegerv(GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET, &caps->v2.max_texture_gather_offset);
8113 }
8114
8115 if (gl_ver >= 43) {
8116 glGetIntegerv(GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT, (GLint*)&caps->v2.texture_buffer_offset_alignment);
8117 }
8118
8119 if (has_feature(feat_ssbo)) {
8120 glGetIntegerv(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT, (GLint*)&caps->v2.shader_buffer_offset_alignment);
8121
8122 glGetIntegerv(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, &max);
8123 if (max > PIPE_MAX_SHADER_BUFFERS)
8124 max = PIPE_MAX_SHADER_BUFFERS;
8125 caps->v2.max_shader_buffer_other_stages = max;
8126 glGetIntegerv(GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS, &max);
8127 if (max > PIPE_MAX_SHADER_BUFFERS)
8128 max = PIPE_MAX_SHADER_BUFFERS;
8129 caps->v2.max_shader_buffer_frag_compute = max;
8130 }
8131
8132 if (has_feature(feat_images)) {
8133 glGetIntegerv(GL_MAX_VERTEX_IMAGE_UNIFORMS, &max);
8134 if (max > PIPE_MAX_SHADER_IMAGES)
8135 max = PIPE_MAX_SHADER_IMAGES;
8136 caps->v2.max_shader_image_other_stages = max;
8137 glGetIntegerv(GL_MAX_FRAGMENT_IMAGE_UNIFORMS, &max);
8138 if (max > PIPE_MAX_SHADER_IMAGES)
8139 max = PIPE_MAX_SHADER_IMAGES;
8140 caps->v2.max_shader_image_frag_compute = max;
8141
8142 glGetIntegerv(GL_MAX_IMAGE_SAMPLES, (GLint*)&caps->v2.max_image_samples);
8143 }
8144
8145 if (has_feature(feat_storage_multisample))
8146 caps->v1.max_samples = vrend_renderer_query_multisample_caps(caps->v1.max_samples, &caps->v2);
8147
8148 caps->v2.capability_bits |= VIRGL_CAP_TGSI_INVARIANT | VIRGL_CAP_SET_MIN_SAMPLES | VIRGL_CAP_TGSI_PRECISE;
8149
8150 if (gl_ver >= 44 || gles_ver >= 31)
8151 glGetIntegerv(GL_MAX_VERTEX_ATTRIB_STRIDE, (GLint*)&caps->v2.max_vertex_attrib_stride);
8152
8153 if (has_feature(feat_compute_shader)) {
8154 glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, (GLint*)&caps->v2.max_compute_work_group_invocations);
8155 glGetIntegerv(GL_MAX_COMPUTE_SHARED_MEMORY_SIZE, (GLint*)&caps->v2.max_compute_shared_memory_size);
8156 glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 0, (GLint*)&caps->v2.max_compute_grid_size[0]);
8157 glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 1, (GLint*)&caps->v2.max_compute_grid_size[1]);
8158 glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 2, (GLint*)&caps->v2.max_compute_grid_size[2]);
8159 glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 0, (GLint*)&caps->v2.max_compute_block_size[0]);
8160 glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 1, (GLint*)&caps->v2.max_compute_block_size[1]);
8161 glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 2, (GLint*)&caps->v2.max_compute_block_size[2]);
8162
8163 caps->v2.capability_bits |= VIRGL_CAP_COMPUTE_SHADER;
8164 }
8165
8166 if (has_feature(feat_fb_no_attach))
8167 caps->v2.capability_bits |= VIRGL_CAP_FB_NO_ATTACH;
8168
8169 if (has_feature(feat_texture_view))
8170 caps->v2.capability_bits |= VIRGL_CAP_TEXTURE_VIEW;
8171
8172 if (has_feature(feat_txqs))
8173 caps->v2.capability_bits |= VIRGL_CAP_TXQS;
8174
8175 if (has_feature(feat_barrier))
8176 caps->v2.capability_bits |= VIRGL_CAP_MEMORY_BARRIER;
8177
8178 if (has_feature(feat_copy_image))
8179 caps->v2.capability_bits |= VIRGL_CAP_COPY_IMAGE;
8180
8181 if (has_feature(feat_robust_buffer_access))
8182 caps->v2.capability_bits |= VIRGL_CAP_ROBUST_BUFFER_ACCESS;
8183
8184 if (has_feature(feat_framebuffer_fetch))
8185 caps->v2.capability_bits |= VIRGL_CAP_TGSI_FBFETCH;
8186
8187 if (has_feature(feat_shader_clock))
8188 caps->v2.capability_bits |= VIRGL_CAP_SHADER_CLOCK;
8189
8190 if (has_feature(feat_texture_barrier))
8191 caps->v2.capability_bits |= VIRGL_CAP_TEXTURE_BARRIER;
8192 }
8193
vrend_renderer_fill_caps(uint32_t set,UNUSED uint32_t version,union virgl_caps * caps)8194 void vrend_renderer_fill_caps(uint32_t set, UNUSED uint32_t version,
8195 union virgl_caps *caps)
8196 {
8197 int gl_ver, gles_ver;
8198 bool fill_capset2 = false;
8199
8200 if (!caps)
8201 return;
8202
8203 if (set > 2) {
8204 caps->max_version = 0;
8205 return;
8206 }
8207
8208 if (set == 1) {
8209 memset(caps, 0, sizeof(struct virgl_caps_v1));
8210 caps->max_version = 1;
8211 } else if (set == 2) {
8212 memset(caps, 0, sizeof(*caps));
8213 caps->max_version = 2;
8214 fill_capset2 = true;
8215 }
8216
8217 if (vrend_state.use_gles) {
8218 gles_ver = epoxy_gl_version();
8219 gl_ver = 0;
8220 } else {
8221 gles_ver = 0;
8222 gl_ver = epoxy_gl_version();
8223 }
8224
8225 vrend_fill_caps_glsl_version(gl_ver, gles_ver, caps);
8226 vrend_renderer_fill_caps_v1(gl_ver, gles_ver, caps);
8227
8228 if (!fill_capset2)
8229 return;
8230
8231 vrend_renderer_fill_caps_v2(gl_ver, gles_ver, caps);
8232 }
8233
vrend_renderer_get_timestamp(void)8234 GLint64 vrend_renderer_get_timestamp(void)
8235 {
8236 GLint64 v;
8237 glGetInteger64v(GL_TIMESTAMP, &v);
8238 return v;
8239 }
8240
vrend_renderer_get_cursor_contents(uint32_t res_handle,uint32_t * width,uint32_t * height)8241 void *vrend_renderer_get_cursor_contents(uint32_t res_handle, uint32_t *width, uint32_t *height)
8242 {
8243 GLenum format, type;
8244 struct vrend_resource *res;
8245 int blsize;
8246 char *data, *data2;
8247 int size;
8248 uint h;
8249
8250 res = vrend_resource_lookup(res_handle, 0);
8251 if (!res)
8252 return NULL;
8253
8254 if (res->base.width0 > 128 || res->base.height0 > 128)
8255 return NULL;
8256
8257 if (res->target != GL_TEXTURE_2D)
8258 return NULL;
8259
8260 if (width)
8261 *width = res->base.width0;
8262 if (height)
8263 *height = res->base.height0;
8264 format = tex_conv_table[res->base.format].glformat;
8265 type = tex_conv_table[res->base.format].gltype;
8266 blsize = util_format_get_blocksize(res->base.format);
8267 size = util_format_get_nblocks(res->base.format, res->base.width0, res->base.height0) * blsize;
8268 data = malloc(size);
8269 data2 = malloc(size);
8270
8271 if (!data || !data2) {
8272 free(data);
8273 free(data2);
8274 return NULL;
8275 }
8276
8277 if (has_feature(feat_arb_robustness)) {
8278 glBindTexture(res->target, res->id);
8279 glGetnTexImageARB(res->target, 0, format, type, size, data);
8280 } else if (vrend_state.use_gles) {
8281 GLuint fb_id;
8282
8283 if (res->readback_fb_id == 0 || res->readback_fb_level != 0 || res->readback_fb_z != 0) {
8284
8285 if (res->readback_fb_id)
8286 glDeleteFramebuffers(1, &res->readback_fb_id);
8287
8288 glGenFramebuffers(1, &fb_id);
8289 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb_id);
8290
8291 vrend_fb_bind_texture(res, 0, 0, 0);
8292
8293 res->readback_fb_id = fb_id;
8294 res->readback_fb_level = 0;
8295 res->readback_fb_z = 0;
8296 } else {
8297 glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, res->readback_fb_id);
8298 }
8299
8300 if (has_feature(feat_arb_robustness)) {
8301 glReadnPixelsARB(0, 0, *width, *height, format, type, size, data);
8302 } else if (has_feature(feat_gles_khr_robustness)) {
8303 glReadnPixelsKHR(0, 0, *width, *height, format, type, size, data);
8304 } else {
8305 glReadPixels(0, 0, *width, *height, format, type, data);
8306 }
8307
8308 } else {
8309 glBindTexture(res->target, res->id);
8310 glGetTexImage(res->target, 0, format, type, data);
8311 }
8312
8313 for (h = 0; h < res->base.height0; h++) {
8314 uint32_t doff = (res->base.height0 - h - 1) * res->base.width0 * blsize;
8315 uint32_t soff = h * res->base.width0 * blsize;
8316
8317 memcpy(data2 + doff, data + soff, res->base.width0 * blsize);
8318 }
8319 free(data);
8320
8321 return data2;
8322 }
8323
vrend_renderer_force_ctx_0(void)8324 void vrend_renderer_force_ctx_0(void)
8325 {
8326 struct vrend_context *ctx0 = vrend_lookup_renderer_ctx(0);
8327 vrend_state.current_ctx = NULL;
8328 vrend_state.current_hw_ctx = NULL;
8329 vrend_hw_switch_context(ctx0, true);
8330 vrend_clicbs->make_current(0, ctx0->sub->gl_context);
8331 }
8332
vrend_renderer_get_rect(int res_handle,struct iovec * iov,unsigned int num_iovs,uint32_t offset,int x,int y,int width,int height)8333 void vrend_renderer_get_rect(int res_handle, struct iovec *iov, unsigned int num_iovs,
8334 uint32_t offset, int x, int y, int width, int height)
8335 {
8336 struct vrend_resource *res = vrend_resource_lookup(res_handle, 0);
8337 struct vrend_transfer_info transfer_info;
8338 struct pipe_box box;
8339 int elsize;
8340
8341 memset(&transfer_info, 0, sizeof(transfer_info));
8342
8343 elsize = util_format_get_blocksize(res->base.format);
8344 box.x = x;
8345 box.y = y;
8346 box.z = 0;
8347 box.width = width;
8348 box.height = height;
8349 box.depth = 1;
8350
8351 transfer_info.box = &box;
8352
8353 transfer_info.stride = util_format_get_nblocksx(res->base.format, res->base.width0) * elsize;
8354 transfer_info.offset = offset;
8355 transfer_info.handle = res->handle;
8356 transfer_info.iovec = iov;
8357 transfer_info.iovec_cnt = num_iovs;
8358 vrend_renderer_transfer_iov(&transfer_info, VREND_TRANSFER_READ);
8359 }
8360
vrend_renderer_attach_res_ctx(int ctx_id,int resource_id)8361 void vrend_renderer_attach_res_ctx(int ctx_id, int resource_id)
8362 {
8363 struct vrend_context *ctx = vrend_lookup_renderer_ctx(ctx_id);
8364 struct vrend_resource *res;
8365
8366 if (!ctx)
8367 return;
8368
8369 res = vrend_resource_lookup(resource_id, 0);
8370 if (!res)
8371 return;
8372
8373 vrend_object_insert_nofree(ctx->res_hash, res, sizeof(*res), resource_id, 1, false);
8374 }
8375
vrend_renderer_detach_res_ctx_p(struct vrend_context * ctx,int res_handle)8376 static void vrend_renderer_detach_res_ctx_p(struct vrend_context *ctx, int res_handle)
8377 {
8378 struct vrend_resource *res;
8379 res = vrend_object_lookup(ctx->res_hash, res_handle, 1);
8380 if (!res)
8381 return;
8382
8383 vrend_object_remove(ctx->res_hash, res_handle, 1);
8384 }
8385
vrend_renderer_detach_res_ctx(int ctx_id,int res_handle)8386 void vrend_renderer_detach_res_ctx(int ctx_id, int res_handle)
8387 {
8388 struct vrend_context *ctx = vrend_lookup_renderer_ctx(ctx_id);
8389 if (!ctx)
8390 return;
8391 vrend_renderer_detach_res_ctx_p(ctx, res_handle);
8392 }
8393
vrend_renderer_ctx_res_lookup(struct vrend_context * ctx,int res_handle)8394 static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
8395 {
8396 struct vrend_resource *res = vrend_object_lookup(ctx->res_hash, res_handle, 1);
8397
8398 return res;
8399 }
8400
vrend_renderer_resource_get_info(int res_handle,struct vrend_renderer_resource_info * info)8401 int vrend_renderer_resource_get_info(int res_handle,
8402 struct vrend_renderer_resource_info *info)
8403 {
8404 struct vrend_resource *res;
8405 int elsize;
8406
8407 if (!info)
8408 return EINVAL;
8409 res = vrend_resource_lookup(res_handle, 0);
8410 if (!res)
8411 return EINVAL;
8412
8413 elsize = util_format_get_blocksize(res->base.format);
8414
8415 info->handle = res_handle;
8416 info->tex_id = res->id;
8417 info->width = res->base.width0;
8418 info->height = res->base.height0;
8419 info->depth = res->base.depth0;
8420 info->format = res->base.format;
8421 info->flags = res->y_0_top ? VIRGL_RESOURCE_Y_0_TOP : 0;
8422 info->stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, 0)) * elsize;
8423
8424 return 0;
8425 }
8426
vrend_renderer_get_cap_set(uint32_t cap_set,uint32_t * max_ver,uint32_t * max_size)8427 void vrend_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
8428 uint32_t *max_size)
8429 {
8430 switch (cap_set) {
8431 case VREND_CAP_SET:
8432 *max_ver = 1;
8433 *max_size = sizeof(struct virgl_caps_v1);
8434 break;
8435 case VREND_CAP_SET2:
8436 /* we should never need to increase this - it should be possible to just grow virgl_caps */
8437 *max_ver = 2;
8438 *max_size = sizeof(struct virgl_caps_v2);
8439 break;
8440 default:
8441 *max_ver = 0;
8442 *max_size = 0;
8443 break;
8444 }
8445 }
8446
vrend_renderer_create_sub_ctx(struct vrend_context * ctx,int sub_ctx_id)8447 void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
8448 {
8449 struct vrend_sub_context *sub;
8450 struct virgl_gl_ctx_param ctx_params;
8451 GLuint i;
8452
8453 LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
8454 if (sub->sub_ctx_id == sub_ctx_id) {
8455 return;
8456 }
8457 }
8458
8459 sub = CALLOC_STRUCT(vrend_sub_context);
8460 if (!sub)
8461 return;
8462
8463 ctx_params.shared = (ctx->ctx_id == 0 && sub_ctx_id == 0) ? false : true;
8464 ctx_params.major_ver = vrend_state.gl_major_ver;
8465 ctx_params.minor_ver = vrend_state.gl_minor_ver;
8466 sub->gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
8467 vrend_clicbs->make_current(0, sub->gl_context);
8468
8469 /* enable if vrend_renderer_init function has done it as well */
8470 if (has_feature(feat_debug_cb)) {
8471 glDebugMessageCallback(vrend_debug_cb, NULL);
8472 glEnable(GL_DEBUG_OUTPUT);
8473 glDisable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
8474 }
8475
8476 sub->sub_ctx_id = sub_ctx_id;
8477
8478 /* initialize the depth far_val to 1 */
8479 for (i = 0; i < PIPE_MAX_VIEWPORTS; i++) {
8480 sub->vps[i].far_val = 1.0;
8481 }
8482
8483 if (!has_feature(feat_gles31_vertex_attrib_binding)) {
8484 glGenVertexArrays(1, &sub->vaoid);
8485 glBindVertexArray(sub->vaoid);
8486 }
8487
8488 glGenFramebuffers(1, &sub->fb_id);
8489 glGenFramebuffers(2, sub->blit_fb_ids);
8490
8491 list_inithead(&sub->programs);
8492 list_inithead(&sub->streamout_list);
8493
8494 sub->object_hash = vrend_object_init_ctx_table();
8495
8496 ctx->sub = sub;
8497 list_add(&sub->head, &ctx->sub_ctxs);
8498 if (sub_ctx_id == 0)
8499 ctx->sub0 = sub;
8500 }
8501
vrend_renderer_destroy_sub_ctx(struct vrend_context * ctx,int sub_ctx_id)8502 void vrend_renderer_destroy_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
8503 {
8504 struct vrend_sub_context *sub, *tofree = NULL;
8505
8506 /* never destroy sub context id 0 */
8507 if (sub_ctx_id == 0)
8508 return;
8509
8510 LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
8511 if (sub->sub_ctx_id == sub_ctx_id) {
8512 tofree = sub;
8513 }
8514 }
8515
8516 if (tofree) {
8517 if (ctx->sub == tofree) {
8518 ctx->sub = ctx->sub0;
8519 vrend_clicbs->make_current(0, ctx->sub->gl_context);
8520 }
8521 vrend_destroy_sub_context(tofree);
8522 }
8523 }
8524
vrend_renderer_set_sub_ctx(struct vrend_context * ctx,int sub_ctx_id)8525 void vrend_renderer_set_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
8526 {
8527 struct vrend_sub_context *sub;
8528 /* find the sub ctx */
8529
8530 if (ctx->sub && ctx->sub->sub_ctx_id == sub_ctx_id)
8531 return;
8532
8533 LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
8534 if (sub->sub_ctx_id == sub_ctx_id) {
8535 ctx->sub = sub;
8536 vrend_clicbs->make_current(0, sub->gl_context);
8537 break;
8538 }
8539 }
8540 }
8541
vrend_reset_fences(void)8542 static void vrend_reset_fences(void)
8543 {
8544 struct vrend_fence *fence, *stor;
8545
8546 if (vrend_state.sync_thread)
8547 pipe_mutex_lock(vrend_state.fence_mutex);
8548
8549 LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
8550 free_fence_locked(fence);
8551 }
8552
8553 if (vrend_state.sync_thread)
8554 pipe_mutex_unlock(vrend_state.fence_mutex);
8555 }
8556
vrend_renderer_reset(void)8557 void vrend_renderer_reset(void)
8558 {
8559 if (vrend_state.sync_thread) {
8560 vrend_free_sync_thread();
8561 vrend_state.stop_sync_thread = false;
8562 }
8563 vrend_reset_fences();
8564 vrend_decode_reset(false);
8565 vrend_object_fini_resource_table();
8566 vrend_decode_reset(true);
8567 vrend_object_init_resource_table();
8568 vrend_renderer_context_create_internal(0, 0, NULL);
8569 }
8570
vrend_renderer_get_poll_fd(void)8571 int vrend_renderer_get_poll_fd(void)
8572 {
8573 if (!vrend_state.inited)
8574 return -1;
8575
8576 return vrend_state.eventfd;
8577 }
8578