1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_memory.h"
24 #include "util/format/u_format.h"
25 #include "util/format/u_format_s3tc.h"
26 #include "util/u_screen.h"
27 #include "util/u_video.h"
28 #include "util/u_math.h"
29 #include "util/u_inlines.h"
30 #include "util/os_time.h"
31 #include "util/xmlconfig.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_screen.h"
34 #include "nir/nir_to_tgsi.h"
35 #include "vl/vl_decoder.h"
36 #include "vl/vl_video_buffer.h"
37
38 #include "virgl_screen.h"
39 #include "virgl_resource.h"
40 #include "virgl_public.h"
41 #include "virgl_context.h"
42 #include "virgl_encode.h"
43
44 int virgl_debug = 0;
45 const struct debug_named_value virgl_debug_options[] = {
46 { "verbose", VIRGL_DEBUG_VERBOSE, NULL },
47 { "tgsi", VIRGL_DEBUG_TGSI, NULL },
48 { "noemubgra", VIRGL_DEBUG_NO_EMULATE_BGRA, "Disable tweak to emulate BGRA as RGBA on GLES hosts" },
49 { "nobgraswz", VIRGL_DEBUG_NO_BGRA_DEST_SWIZZLE, "Disable tweak to swizzle emulated BGRA on GLES hosts" },
50 { "sync", VIRGL_DEBUG_SYNC, "Sync after every flush" },
51 { "xfer", VIRGL_DEBUG_XFER, "Do not optimize for transfers" },
52 { "r8srgb-readback", VIRGL_DEBUG_L8_SRGB_ENABLE_READBACK, "Enable redaback for L8 sRGB textures" },
53 { "nocoherent", VIRGL_DEBUG_NO_COHERENT, "Disable coherent memory" },
54 { "video", VIRGL_DEBUG_VIDEO, "Video codec" },
55 { "shader_sync", VIRGL_DEBUG_SHADER_SYNC, "Sync after every shader link" },
56 DEBUG_NAMED_VALUE_END
57 };
58 DEBUG_GET_ONCE_FLAGS_OPTION(virgl_debug, "VIRGL_DEBUG", virgl_debug_options, 0)
59
60 static const char *
virgl_get_vendor(struct pipe_screen * screen)61 virgl_get_vendor(struct pipe_screen *screen)
62 {
63 return "Mesa";
64 }
65
66
67 static const char *
virgl_get_name(struct pipe_screen * screen)68 virgl_get_name(struct pipe_screen *screen)
69 {
70 struct virgl_screen *vscreen = virgl_screen(screen);
71 if (vscreen->caps.caps.v2.host_feature_check_version >= 5)
72 return vscreen->caps.caps.v2.renderer;
73
74 return "virgl";
75 }
76
77 #define VIRGL_SHADER_STAGE_CAP_V2(CAP, STAGE) \
78 vscreen->caps.caps.v2. CAP[virgl_shader_stage_convert(STAGE)]
79
80 static int
virgl_get_shader_param(struct pipe_screen * screen,enum pipe_shader_type shader,enum pipe_shader_cap param)81 virgl_get_shader_param(struct pipe_screen *screen,
82 enum pipe_shader_type shader,
83 enum pipe_shader_cap param)
84 {
85 struct virgl_screen *vscreen = virgl_screen(screen);
86
87 if ((shader == PIPE_SHADER_TESS_CTRL || shader == PIPE_SHADER_TESS_EVAL) &&
88 !vscreen->caps.caps.v1.bset.has_tessellation_shaders)
89 return 0;
90
91 if (shader == PIPE_SHADER_COMPUTE &&
92 !(vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_COMPUTE_SHADER))
93 return 0;
94
95 switch(shader)
96 {
97 case PIPE_SHADER_FRAGMENT:
98 case PIPE_SHADER_VERTEX:
99 case PIPE_SHADER_GEOMETRY:
100 case PIPE_SHADER_TESS_CTRL:
101 case PIPE_SHADER_TESS_EVAL:
102 case PIPE_SHADER_COMPUTE:
103 switch (param) {
104 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
105 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
106 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
107 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
108 return INT_MAX;
109 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
110 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
111 return 1;
112 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
113 return vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_INDIRECT_INPUT_ADDR;
114 case PIPE_SHADER_CAP_MAX_INPUTS:
115 if (vscreen->caps.caps.v1.glsl_level < 150)
116 return vscreen->caps.caps.v2.max_vertex_attribs;
117 return (shader == PIPE_SHADER_VERTEX ||
118 shader == PIPE_SHADER_GEOMETRY) ? vscreen->caps.caps.v2.max_vertex_attribs : 32;
119 case PIPE_SHADER_CAP_MAX_OUTPUTS:
120 switch (shader) {
121 case PIPE_SHADER_FRAGMENT:
122 return vscreen->caps.caps.v1.max_render_targets;
123 case PIPE_SHADER_TESS_CTRL:
124 if (vscreen->caps.caps.v2.host_feature_check_version >= 19)
125 return vscreen->caps.caps.v2.max_tcs_outputs;
126 FALLTHROUGH;
127 case PIPE_SHADER_TESS_EVAL:
128 if (vscreen->caps.caps.v2.host_feature_check_version >= 19)
129 return vscreen->caps.caps.v2.max_tes_outputs;
130 FALLTHROUGH;
131 default:
132 return vscreen->caps.caps.v2.max_vertex_outputs;
133 }
134 // case PIPE_SHADER_CAP_MAX_CONSTS:
135 // return 4096;
136 case PIPE_SHADER_CAP_MAX_TEMPS:
137 return 256;
138 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
139 return MIN2(vscreen->caps.caps.v1.max_uniform_blocks, PIPE_MAX_CONSTANT_BUFFERS);
140 // case PIPE_SHADER_CAP_MAX_ADDRS:
141 // return 1;
142 case PIPE_SHADER_CAP_SUBROUTINES:
143 return 1;
144 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
145 return MIN2(vscreen->caps.caps.v2.max_texture_samplers, PIPE_MAX_SAMPLERS);
146 case PIPE_SHADER_CAP_INTEGERS:
147 return vscreen->caps.caps.v1.glsl_level >= 130;
148 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
149 return 32;
150 case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
151 if (vscreen->caps.caps.v2.host_feature_check_version < 12)
152 return 4096 * sizeof(float[4]);
153 return VIRGL_SHADER_STAGE_CAP_V2(max_const_buffer_size, shader);
154 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS: {
155 int max_shader_buffers = VIRGL_SHADER_STAGE_CAP_V2(max_shader_storage_blocks, shader);
156 if (max_shader_buffers != INT_MAX) {
157 return max_shader_buffers;
158 } else if (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) {
159 return vscreen->caps.caps.v2.max_shader_buffer_frag_compute;
160 } else {
161 return vscreen->caps.caps.v2.max_shader_buffer_other_stages;
162 }
163 }
164 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
165 if (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE)
166 return vscreen->caps.caps.v2.max_shader_image_frag_compute;
167 else
168 return vscreen->caps.caps.v2.max_shader_image_other_stages;
169 case PIPE_SHADER_CAP_SUPPORTED_IRS:
170 return (1 << PIPE_SHADER_IR_TGSI) | (1 << PIPE_SHADER_IR_NIR);
171 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
172 return VIRGL_SHADER_STAGE_CAP_V2(max_atomic_counters, shader);
173 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
174 return VIRGL_SHADER_STAGE_CAP_V2(max_atomic_counter_buffers, shader);
175 case PIPE_SHADER_CAP_INT64_ATOMICS:
176 case PIPE_SHADER_CAP_FP16:
177 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
178 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
179 case PIPE_SHADER_CAP_INT16:
180 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
181 return 0;
182 default:
183 return 0;
184 }
185 default:
186 return 0;
187 }
188 }
189
190 static int
virgl_get_video_param(struct pipe_screen * screen,enum pipe_video_profile profile,enum pipe_video_entrypoint entrypoint,enum pipe_video_cap param)191 virgl_get_video_param(struct pipe_screen *screen,
192 enum pipe_video_profile profile,
193 enum pipe_video_entrypoint entrypoint,
194 enum pipe_video_cap param)
195 {
196 unsigned i;
197 bool drv_supported;
198 struct virgl_video_caps *vcaps = NULL;
199 struct virgl_screen *vscreen;
200
201 if (!screen)
202 return 0;
203
204 vscreen = virgl_screen(screen);
205 if (vscreen->caps.caps.v2.num_video_caps > ARRAY_SIZE(vscreen->caps.caps.v2.video_caps))
206 return 0;
207
208 /* Profiles and entrypoints supported by the driver */
209 switch (u_reduce_video_profile(profile)) {
210 case PIPE_VIDEO_FORMAT_MPEG4_AVC: /* fall through */
211 case PIPE_VIDEO_FORMAT_HEVC:
212 drv_supported = (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM ||
213 entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE);
214 break;
215 case PIPE_VIDEO_FORMAT_MPEG12:
216 case PIPE_VIDEO_FORMAT_VC1:
217 case PIPE_VIDEO_FORMAT_JPEG:
218 case PIPE_VIDEO_FORMAT_VP9:
219 case PIPE_VIDEO_FORMAT_AV1:
220 drv_supported = (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM);
221 break;
222 default:
223 drv_supported = false;
224 break;
225 }
226
227 if (drv_supported) {
228 /* Check if the device supports it, vcaps is NULL means not supported */
229 for (i = 0; i < vscreen->caps.caps.v2.num_video_caps; i++) {
230 if (vscreen->caps.caps.v2.video_caps[i].profile == profile &&
231 vscreen->caps.caps.v2.video_caps[i].entrypoint == entrypoint) {
232 vcaps = &vscreen->caps.caps.v2.video_caps[i];
233 break;
234 }
235 }
236 }
237
238 /*
239 * Since there are calls like this:
240 * pot_buffers = !pipe->screen->get_video_param
241 * (
242 * pipe->screen,
243 * PIPE_VIDEO_PROFILE_UNKNOWN,
244 * PIPE_VIDEO_ENTRYPOINT_UNKNOWN,
245 * PIPE_VIDEO_CAP_NPOT_TEXTURES
246 * );
247 * All parameters need to check the vcaps.
248 */
249 switch (param) {
250 case PIPE_VIDEO_CAP_SUPPORTED:
251 return vcaps != NULL;
252 case PIPE_VIDEO_CAP_NPOT_TEXTURES:
253 return vcaps ? vcaps->npot_texture : true;
254 case PIPE_VIDEO_CAP_MAX_WIDTH:
255 return vcaps ? vcaps->max_width : 0;
256 case PIPE_VIDEO_CAP_MAX_HEIGHT:
257 return vcaps ? vcaps->max_height : 0;
258 case PIPE_VIDEO_CAP_PREFERED_FORMAT:
259 return vcaps ? virgl_to_pipe_format(vcaps->prefered_format) : PIPE_FORMAT_NV12;
260 case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
261 return vcaps ? vcaps->prefers_interlaced : false;
262 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
263 return vcaps ? vcaps->supports_interlaced : false;
264 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
265 return vcaps ? vcaps->supports_progressive : true;
266 case PIPE_VIDEO_CAP_MAX_LEVEL:
267 return vcaps ? vcaps->max_level : 0;
268 case PIPE_VIDEO_CAP_STACKED_FRAMES:
269 return vcaps ? vcaps->stacked_frames : 0;
270 case PIPE_VIDEO_CAP_MAX_MACROBLOCKS:
271 return vcaps ? vcaps->max_macroblocks : 0;
272 case PIPE_VIDEO_CAP_MAX_TEMPORAL_LAYERS:
273 return vcaps ? vcaps->max_temporal_layers : 0;
274 default:
275 return 0;
276 }
277 }
278
279 static int
virgl_get_compute_param(struct pipe_screen * screen,enum pipe_shader_ir ir_type,enum pipe_compute_cap param,void * ret)280 virgl_get_compute_param(struct pipe_screen *screen,
281 enum pipe_shader_ir ir_type,
282 enum pipe_compute_cap param,
283 void *ret)
284 {
285 struct virgl_screen *vscreen = virgl_screen(screen);
286 if (!(vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_COMPUTE_SHADER))
287 return 0;
288 switch (param) {
289 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
290 if (ret) {
291 uint64_t *grid_size = ret;
292 grid_size[0] = vscreen->caps.caps.v2.max_compute_grid_size[0];
293 grid_size[1] = vscreen->caps.caps.v2.max_compute_grid_size[1];
294 grid_size[2] = vscreen->caps.caps.v2.max_compute_grid_size[2];
295 }
296 return 3 * sizeof(uint64_t) ;
297 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
298 if (ret) {
299 uint64_t *block_size = ret;
300 block_size[0] = vscreen->caps.caps.v2.max_compute_block_size[0];
301 block_size[1] = vscreen->caps.caps.v2.max_compute_block_size[1];
302 block_size[2] = vscreen->caps.caps.v2.max_compute_block_size[2];
303 }
304 return 3 * sizeof(uint64_t);
305 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
306 if (ret) {
307 uint64_t *max_threads_per_block = ret;
308 *max_threads_per_block = vscreen->caps.caps.v2.max_compute_work_group_invocations;
309 }
310 return sizeof(uint64_t);
311 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
312 if (ret) {
313 uint64_t *max_local_size = ret;
314 /* Value reported by the closed source driver. */
315 *max_local_size = vscreen->caps.caps.v2.max_compute_shared_memory_size;
316 }
317 return sizeof(uint64_t);
318 default:
319 break;
320 }
321 return 0;
322 }
323
324 static void
virgl_init_screen_caps(struct virgl_screen * vscreen)325 virgl_init_screen_caps(struct virgl_screen *vscreen)
326 {
327 struct pipe_caps *caps = (struct pipe_caps *)&vscreen->base.caps;
328
329 u_init_pipe_screen_caps(&vscreen->base, -1);
330
331 caps->npot_textures = true;
332 caps->fragment_shader_texture_lod = true;
333 caps->fragment_shader_derivatives = true;
334 caps->anisotropic_filter = vscreen->caps.caps.v2.max_anisotropy > 1.0;
335 caps->max_render_targets = vscreen->caps.caps.v1.max_render_targets;
336 caps->max_dual_source_render_targets =
337 vscreen->caps.caps.v1.max_dual_source_render_targets;
338 caps->occlusion_query = vscreen->caps.caps.v1.bset.occlusion_query;
339 caps->texture_mirror_clamp_to_edge =
340 vscreen->caps.caps.v2.host_feature_check_version >= 20 ?
341 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_MIRROR_CLAMP_TO_EDGE :
342 vscreen->caps.caps.v1.bset.mirror_clamp &&
343 !(vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_HOST_IS_GLES);
344 caps->texture_mirror_clamp =
345 vscreen->caps.caps.v2.host_feature_check_version >= 22 ?
346 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_MIRROR_CLAMP :
347 vscreen->caps.caps.v1.bset.mirror_clamp &&
348 !(vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_HOST_IS_GLES);
349 caps->texture_swizzle = true;
350 caps->max_texture_2d_size = vscreen->caps.caps.v2.max_texture_2d_size ?
351 vscreen->caps.caps.v2.max_texture_2d_size : 16384;
352 caps->max_texture_3d_levels = vscreen->caps.caps.v2.max_texture_3d_size ?
353 1 + util_logbase2(vscreen->caps.caps.v2.max_texture_3d_size) :
354 9; /* 256 x 256 x 256 */
355 caps->max_texture_cube_levels = vscreen->caps.caps.v2.max_texture_cube_size ?
356 1 + util_logbase2(vscreen->caps.caps.v2.max_texture_cube_size) :
357 13; /* 4K x 4K */
358 caps->blend_equation_separate = true;
359 caps->indep_blend_enable = vscreen->caps.caps.v1.bset.indep_blend_enable;
360 caps->indep_blend_func = vscreen->caps.caps.v1.bset.indep_blend_func;
361 caps->fs_coord_origin_upper_left = true;
362 caps->fs_coord_pixel_center_half_integer = true;
363 caps->fs_coord_pixel_center_integer = true;
364 caps->fs_coord_origin_lower_left =
365 vscreen->caps.caps.v1.bset.fragment_coord_conventions;
366 caps->depth_clip_disable = vscreen->caps.caps.v1.bset.depth_clip_disable;
367 caps->max_stream_output_buffers = vscreen->caps.caps.v1.max_streamout_buffers;
368 caps->max_stream_output_separate_components =
369 caps->max_stream_output_interleaved_components = 16*4;
370 caps->supported_prim_modes =
371 BITFIELD_MASK(MESA_PRIM_COUNT) &
372 ~BITFIELD_BIT(MESA_PRIM_QUADS) &
373 ~BITFIELD_BIT(MESA_PRIM_QUAD_STRIP);
374 caps->primitive_restart =
375 caps->primitive_restart_fixed_index = vscreen->caps.caps.v1.bset.primitive_restart;
376 caps->shader_stencil_export = vscreen->caps.caps.v1.bset.shader_stencil_export;
377 caps->vs_instanceid = true;
378 caps->vertex_element_instance_divisor = true;
379 caps->seamless_cube_map = vscreen->caps.caps.v1.bset.seamless_cube_map;
380 caps->seamless_cube_map_per_texture =
381 vscreen->caps.caps.v1.bset.seamless_cube_map_per_texture;
382 caps->max_texture_array_layers = vscreen->caps.caps.v1.max_texture_array_layers;
383 caps->min_texel_offset = vscreen->caps.caps.v2.min_texel_offset;
384 caps->min_texture_gather_offset = vscreen->caps.caps.v2.min_texture_gather_offset;
385 caps->max_texel_offset = vscreen->caps.caps.v2.max_texel_offset;
386 caps->max_texture_gather_offset = vscreen->caps.caps.v2.max_texture_gather_offset;
387 caps->conditional_render = vscreen->caps.caps.v1.bset.conditional_render;
388 caps->texture_barrier =
389 vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER;
390 caps->vertex_color_unclamped = true;
391 caps->fragment_color_clamped =
392 caps->vertex_color_clamped = vscreen->caps.caps.v1.bset.color_clamping;
393 caps->mixed_colorbuffer_formats =
394 (vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_FBO_MIXED_COLOR_FORMATS) ||
395 (vscreen->caps.caps.v2.host_feature_check_version < 1);
396 caps->glsl_feature_level_compatibility =
397 vscreen->caps.caps.v2.host_feature_check_version < 6 ?
398 MIN2(vscreen->caps.caps.v1.glsl_level, 140) :
399 vscreen->caps.caps.v1.glsl_level;
400 caps->glsl_feature_level = vscreen->caps.caps.v1.glsl_level;
401 caps->quads_follow_provoking_vertex_convention = true;
402 caps->depth_clip_disable_separate = false;
403 caps->compute = vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_COMPUTE_SHADER;
404 caps->user_vertex_buffers = false;
405 caps->constant_buffer_offset_alignment =
406 vscreen->caps.caps.v2.uniform_buffer_offset_alignment;
407 caps->stream_output_pause_resume =
408 caps->stream_output_interleave_buffers =
409 vscreen->caps.caps.v1.bset.streamout_pause_resume;
410 caps->start_instance = vscreen->caps.caps.v1.bset.start_instance;
411 caps->tgsi_can_compact_constants = false;
412 caps->texture_transfer_modes = false;
413 caps->nir_images_as_deref = false;
414 caps->query_timestamp =
415 caps->query_time_elapsed =
416 vscreen->caps.caps.v2.host_feature_check_version >= 15 ?
417 vscreen->caps.caps.v1.bset.timer_query :
418 true; /* older versions had this always enabled */
419 caps->tgsi_texcoord = vscreen->caps.caps.v2.host_feature_check_version >= 10;
420 caps->min_map_buffer_alignment = VIRGL_MAP_BUFFER_ALIGNMENT;
421 caps->texture_buffer_objects = vscreen->caps.caps.v1.max_tbo_size > 0;
422 caps->texture_buffer_offset_alignment =
423 vscreen->caps.caps.v2.texture_buffer_offset_alignment;
424 caps->buffer_sampler_view_rgba_only = false;
425 caps->cube_map_array = vscreen->caps.caps.v1.bset.cube_map_array;
426 caps->texture_multisample = vscreen->caps.caps.v1.bset.texture_multisample;
427 caps->max_viewports = vscreen->caps.caps.v1.max_viewports;
428 caps->max_texel_buffer_elements = vscreen->caps.caps.v1.max_tbo_size;
429 caps->texture_border_color_quirk = 0;
430 caps->endianness = PIPE_ENDIAN_LITTLE;
431 caps->query_pipeline_statistics =
432 !!(vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_PIPELINE_STATISTICS_QUERY);
433 caps->mixed_framebuffer_sizes = true;
434 caps->mixed_color_depth_bits = true;
435 caps->vs_layer_viewport =
436 (vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_VS_VERTEX_LAYER) &&
437 (vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_VS_VIEWPORT_INDEX);
438 caps->max_geometry_output_vertices = vscreen->caps.caps.v2.max_geom_output_vertices;
439 caps->max_geometry_total_output_components =
440 vscreen->caps.caps.v2.max_geom_total_output_components;
441 caps->texture_query_lod = vscreen->caps.caps.v1.bset.texture_query_lod;
442 caps->max_texture_gather_components =
443 vscreen->caps.caps.v1.max_texture_gather_components;
444 caps->draw_indirect = vscreen->caps.caps.v1.bset.has_indirect_draw;
445 caps->sample_shading =
446 caps->force_persample_interp = vscreen->caps.caps.v1.bset.has_sample_shading;
447 caps->cull_distance = vscreen->caps.caps.v1.bset.has_cull;
448 caps->max_vertex_streams =
449 ((vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFORM_FEEDBACK3) ||
450 (vscreen->caps.caps.v2.host_feature_check_version < 2)) ? 4 : 1;
451 caps->conditional_render_inverted =
452 vscreen->caps.caps.v1.bset.conditional_render_inverted;
453 caps->fs_fine_derivative = vscreen->caps.caps.v1.bset.derivative_control;
454 caps->polygon_offset_clamp = vscreen->caps.caps.v1.bset.polygon_offset_clamp;
455 caps->query_so_overflow =
456 vscreen->caps.caps.v1.bset.transform_feedback_overflow_query;
457 caps->shader_buffer_offset_alignment =
458 vscreen->caps.caps.v2.shader_buffer_offset_alignment;
459 caps->doubles =
460 vscreen->caps.caps.v1.bset.has_fp64 ||
461 (vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_HOST_IS_GLES);
462 caps->max_shader_patch_varyings = vscreen->caps.caps.v2.max_shader_patch_varyings;
463 caps->sampler_view_target =
464 vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW;
465 caps->max_vertex_attrib_stride = vscreen->caps.caps.v2.max_vertex_attrib_stride;
466 caps->copy_between_compressed_and_plain_formats =
467 vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_IMAGE;
468 caps->texture_query_samples = vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_TXQS;
469 caps->framebuffer_no_attachment =
470 vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH;
471 caps->robust_buffer_access_behavior =
472 vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_ROBUST_BUFFER_ACCESS;
473 caps->fbfetch =
474 (vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_TGSI_FBFETCH) ? 1 : 0;
475 caps->blend_equation_advanced =
476 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_BLEND_EQUATION;
477 caps->shader_clock = vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_SHADER_CLOCK;
478 caps->shader_array_components =
479 vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_TGSI_COMPONENTS;
480 caps->max_combined_shader_buffers = vscreen->caps.caps.v2.max_combined_shader_buffers;
481 caps->max_combined_hw_atomic_counters =
482 vscreen->caps.caps.v2.max_combined_atomic_counters;
483 caps->max_combined_hw_atomic_counter_buffers =
484 vscreen->caps.caps.v2.max_combined_atomic_counter_buffers;
485 caps->texture_float_linear = true;
486 caps->texture_half_float_linear = true; /* TODO: need to introduce a hw-cap for this */
487 caps->query_buffer_object = vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_QBO;
488 caps->max_varyings = vscreen->caps.caps.v1.glsl_level < 150 ?
489 vscreen->caps.caps.v2.max_vertex_attribs : 32;
490 /* If the host supports only one sample (e.g., if it is using softpipe),
491 * fake multisampling to able to advertise higher GL versions. */
492 caps->fake_sw_msaa = vscreen->caps.caps.v1.max_samples == 1;
493 caps->multi_draw_indirect =
494 !!(vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_MULTI_DRAW_INDIRECT);
495 caps->multi_draw_indirect_params =
496 !!(vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_INDIRECT_PARAMS);
497 caps->buffer_map_persistent_coherent =
498 (vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_ARB_BUFFER_STORAGE) &&
499 (vscreen->caps.caps.v2.host_feature_check_version >= 4) &&
500 vscreen->vws->supports_coherent && !vscreen->no_coherent;
501 caps->pci_group =
502 caps->pci_bus =
503 caps->pci_device =
504 caps->pci_function = 0;
505 caps->allow_mapped_buffers_during_execution = 0;
506 caps->clip_halfz = vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_CLIP_HALFZ;
507 caps->max_gs_invocations = 32;
508 caps->max_shader_buffer_size = 1 << 27;
509 caps->vendor_id = 0x1af4;
510 caps->device_id = 0x1010;
511 caps->video_memory =
512 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_VIDEO_MEMORY ?
513 vscreen->caps.caps.v2.max_video_memory : 0;
514 caps->uma = !!caps->video_memory;
515 caps->texture_shadow_lod =
516 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_TEXTURE_SHADOW_LOD;
517 caps->native_fence_fd = vscreen->vws->supports_fences;
518 caps->dest_surface_srgb_control =
519 (vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_SRGB_WRITE_CONTROL) ||
520 (vscreen->caps.caps.v2.host_feature_check_version < 1);
521 /* Shader creation emits the shader through the context's command buffer
522 * in virgl_encode_shader_state().
523 */
524 caps->shareable_shaders = false;
525 caps->query_memory_info =
526 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_MEMINFO;
527 caps->string_marker =
528 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_STRING_MARKER;
529 caps->surface_sample_count =
530 vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_IMPLICIT_MSAA;
531 caps->draw_parameters =
532 !!(vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_DRAW_PARAMETERS);
533 caps->shader_group_vote =
534 !!(vscreen->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_GROUP_VOTE);
535 caps->image_store_formatted = true;
536 caps->gl_spirv = true;
537
538 if (vscreen->caps.caps.v2.host_feature_check_version >= 13)
539 caps->max_constant_buffer_size = vscreen->caps.caps.v2.max_uniform_block_size;
540
541 caps->min_line_width =
542 caps->min_line_width_aa =
543 caps->min_point_size =
544 caps->min_point_size_aa = 1;
545 caps->point_size_granularity =
546 caps->line_width_granularity = 0.1;
547 caps->max_line_width = vscreen->caps.caps.v2.max_aliased_line_width;
548 caps->max_line_width_aa = vscreen->caps.caps.v2.max_smooth_line_width;
549 caps->max_point_size = vscreen->caps.caps.v2.max_aliased_point_size;
550 caps->max_point_size_aa = vscreen->caps.caps.v2.max_smooth_point_size;
551 caps->max_texture_anisotropy = vscreen->caps.caps.v2.max_anisotropy;
552 caps->max_texture_lod_bias = vscreen->caps.caps.v2.max_texture_lod_bias;
553 }
554
555 static bool
has_format_bit(struct virgl_supported_format_mask * mask,enum virgl_formats fmt)556 has_format_bit(struct virgl_supported_format_mask *mask,
557 enum virgl_formats fmt)
558 {
559 assert(fmt < VIRGL_FORMAT_MAX);
560 unsigned val = (unsigned)fmt;
561 unsigned idx = val / 32;
562 unsigned bit = val % 32;
563 assert(idx < ARRAY_SIZE(mask->bitmask));
564 return (mask->bitmask[idx] & (1u << bit)) != 0;
565 }
566
567 bool
virgl_has_readback_format(struct pipe_screen * screen,enum virgl_formats fmt,bool allow_tweak)568 virgl_has_readback_format(struct pipe_screen *screen,
569 enum virgl_formats fmt, bool allow_tweak)
570 {
571 struct virgl_screen *vscreen = virgl_screen(screen);
572 if (has_format_bit(&vscreen->caps.caps.v2.supported_readback_formats,
573 fmt))
574 return true;
575
576 if (allow_tweak && fmt == VIRGL_FORMAT_L8_SRGB && vscreen->tweak_l8_srgb_readback) {
577 return true;
578 }
579
580 return false;
581 }
582
583 static bool
virgl_is_vertex_format_supported(struct pipe_screen * screen,enum pipe_format format)584 virgl_is_vertex_format_supported(struct pipe_screen *screen,
585 enum pipe_format format)
586 {
587 struct virgl_screen *vscreen = virgl_screen(screen);
588 const struct util_format_description *format_desc;
589 int i;
590
591 format_desc = util_format_description(format);
592
593 if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
594 int vformat = VIRGL_FORMAT_R11G11B10_FLOAT;
595 int big = vformat / 32;
596 int small = vformat % 32;
597 if (!(vscreen->caps.caps.v1.vertexbuffer.bitmask[big] & (1 << small)))
598 return false;
599 return true;
600 }
601
602 i = util_format_get_first_non_void_channel(format);
603 if (i == -1)
604 return false;
605
606 if (format_desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
607 return false;
608
609 if (format_desc->channel[i].type == UTIL_FORMAT_TYPE_FIXED)
610 return false;
611 return true;
612 }
613
614 static bool
virgl_format_check_bitmask(enum pipe_format format,uint32_t bitmask[16],bool may_emulate_bgra)615 virgl_format_check_bitmask(enum pipe_format format,
616 uint32_t bitmask[16],
617 bool may_emulate_bgra)
618 {
619 enum virgl_formats vformat = pipe_to_virgl_format(format);
620 int big = vformat / 32;
621 int small = vformat % 32;
622 if ((bitmask[big] & (1u << small)))
623 return true;
624
625 /* On GLES hosts we don't advertise BGRx_SRGB, but we may be able
626 * emulate it by using a swizzled RGBx */
627 if (may_emulate_bgra) {
628 if (format == PIPE_FORMAT_B8G8R8A8_SRGB)
629 format = PIPE_FORMAT_R8G8B8A8_SRGB;
630 else if (format == PIPE_FORMAT_B8G8R8X8_SRGB)
631 format = PIPE_FORMAT_R8G8B8X8_SRGB;
632 else {
633 return false;
634 }
635
636 vformat = pipe_to_virgl_format(format);
637 big = vformat / 32;
638 small = vformat % 32;
639 if (bitmask[big] & (1 << small))
640 return true;
641 }
642 return false;
643 }
644
virgl_has_scanout_format(struct virgl_screen * vscreen,enum pipe_format format,bool may_emulate_bgra)645 bool virgl_has_scanout_format(struct virgl_screen *vscreen,
646 enum pipe_format format,
647 bool may_emulate_bgra)
648 {
649 return virgl_format_check_bitmask(format,
650 vscreen->caps.caps.v2.scanout.bitmask,
651 may_emulate_bgra);
652 }
653
654 /**
655 * Query format support for creating a texture, drawing surface, etc.
656 * \param format the format to test
657 * \param type one of PIPE_TEXTURE, PIPE_SURFACE
658 */
659 static bool
virgl_is_format_supported(struct pipe_screen * screen,enum pipe_format format,enum pipe_texture_target target,unsigned sample_count,unsigned storage_sample_count,unsigned bind)660 virgl_is_format_supported( struct pipe_screen *screen,
661 enum pipe_format format,
662 enum pipe_texture_target target,
663 unsigned sample_count,
664 unsigned storage_sample_count,
665 unsigned bind)
666 {
667 struct virgl_screen *vscreen = virgl_screen(screen);
668 const struct util_format_description *format_desc;
669 int i;
670
671 union virgl_caps *caps = &vscreen->caps.caps;
672 bool may_emulate_bgra = (caps->v2.capability_bits &
673 VIRGL_CAP_APP_TWEAK_SUPPORT) &&
674 vscreen->tweak_gles_emulate_bgra;
675
676 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
677 return false;
678
679 if (!util_is_power_of_two_or_zero(sample_count))
680 return false;
681
682 assert(target == PIPE_BUFFER ||
683 target == PIPE_TEXTURE_1D ||
684 target == PIPE_TEXTURE_1D_ARRAY ||
685 target == PIPE_TEXTURE_2D ||
686 target == PIPE_TEXTURE_2D_ARRAY ||
687 target == PIPE_TEXTURE_RECT ||
688 target == PIPE_TEXTURE_3D ||
689 target == PIPE_TEXTURE_CUBE ||
690 target == PIPE_TEXTURE_CUBE_ARRAY);
691
692 format_desc = util_format_description(format);
693
694 if (util_format_is_intensity(format))
695 return false;
696
697 if (sample_count > 1) {
698 if (!caps->v1.bset.texture_multisample)
699 return false;
700
701 if (bind & PIPE_BIND_SHADER_IMAGE) {
702 if (sample_count > caps->v2.max_image_samples)
703 return false;
704 }
705
706 if (sample_count > caps->v1.max_samples)
707 return false;
708
709 if (caps->v2.host_feature_check_version >= 9 &&
710 !has_format_bit(&caps->v2.supported_multisample_formats,
711 pipe_to_virgl_format(format)))
712 return false;
713 }
714
715 if (bind & PIPE_BIND_VERTEX_BUFFER) {
716 return virgl_is_vertex_format_supported(screen, format);
717 }
718
719 if (util_format_is_compressed(format) && target == PIPE_BUFFER)
720 return false;
721
722 /* Allow 3-comp 32 bit textures only for TBOs (needed for ARB_tbo_rgb32) */
723 if ((format == PIPE_FORMAT_R32G32B32_FLOAT ||
724 format == PIPE_FORMAT_R32G32B32_SINT ||
725 format == PIPE_FORMAT_R32G32B32_UINT) &&
726 target != PIPE_BUFFER)
727 return false;
728
729 if ((format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC ||
730 format_desc->layout == UTIL_FORMAT_LAYOUT_ETC ||
731 format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) &&
732 target == PIPE_TEXTURE_3D)
733 return false;
734
735
736 if (bind & PIPE_BIND_RENDER_TARGET) {
737 /* For ARB_framebuffer_no_attachments. */
738 if (format == PIPE_FORMAT_NONE)
739 return true;
740
741 if (format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS)
742 return false;
743
744 /*
745 * Although possible, it is unnatural to render into compressed or YUV
746 * surfaces. So disable these here to avoid going into weird paths
747 * inside gallium frontends.
748 */
749 if (format_desc->block.width != 1 ||
750 format_desc->block.height != 1)
751 return false;
752
753 if (!virgl_format_check_bitmask(format,
754 caps->v1.render.bitmask,
755 may_emulate_bgra))
756 return false;
757 }
758
759 if (bind & PIPE_BIND_DEPTH_STENCIL) {
760 if (format_desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS)
761 return false;
762 }
763
764 if (bind & PIPE_BIND_SCANOUT) {
765 if (!virgl_format_check_bitmask(format, caps->v2.scanout.bitmask, false))
766 return false;
767 }
768
769 /*
770 * All other operations (sampling, transfer, etc).
771 */
772
773 if (format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
774 goto out_lookup;
775 }
776 if (format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
777 goto out_lookup;
778 }
779 if (format_desc->layout == UTIL_FORMAT_LAYOUT_BPTC) {
780 goto out_lookup;
781 }
782 if (format_desc->layout == UTIL_FORMAT_LAYOUT_ETC) {
783 goto out_lookup;
784 }
785
786 if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
787 goto out_lookup;
788 } else if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
789 goto out_lookup;
790 }
791
792 if (format_desc->layout == UTIL_FORMAT_LAYOUT_ASTC) {
793 goto out_lookup;
794 }
795
796 i = util_format_get_first_non_void_channel(format);
797 if (i == -1)
798 return false;
799
800 /* no L4A4 */
801 if (format_desc->nr_channels < 4 && format_desc->channel[i].size == 4)
802 return false;
803
804 out_lookup:
805 return virgl_format_check_bitmask(format,
806 caps->v1.sampler.bitmask,
807 may_emulate_bgra);
808 }
809
virgl_is_video_format_supported(struct pipe_screen * screen,enum pipe_format format,enum pipe_video_profile profile,enum pipe_video_entrypoint entrypoint)810 static bool virgl_is_video_format_supported(struct pipe_screen *screen,
811 enum pipe_format format,
812 enum pipe_video_profile profile,
813 enum pipe_video_entrypoint entrypoint)
814 {
815 return vl_video_buffer_is_format_supported(screen, format, profile, entrypoint);
816 }
817
818
virgl_flush_frontbuffer(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_resource * res,unsigned level,unsigned layer,void * winsys_drawable_handle,unsigned nboxes,struct pipe_box * sub_box)819 static void virgl_flush_frontbuffer(struct pipe_screen *screen,
820 struct pipe_context *ctx,
821 struct pipe_resource *res,
822 unsigned level, unsigned layer,
823 void *winsys_drawable_handle, unsigned nboxes, struct pipe_box *sub_box)
824 {
825 struct virgl_screen *vscreen = virgl_screen(screen);
826 struct virgl_winsys *vws = vscreen->vws;
827 struct virgl_resource *vres = virgl_resource(res);
828 struct virgl_context *vctx = virgl_context(ctx);
829
830 if (vws->flush_frontbuffer) {
831 virgl_flush_eq(vctx, vctx, NULL);
832 vws->flush_frontbuffer(vws, vctx->cbuf, vres->hw_res, level, layer, winsys_drawable_handle,
833 nboxes == 1 ? sub_box : NULL);
834 }
835 }
836
virgl_fence_reference(struct pipe_screen * screen,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)837 static void virgl_fence_reference(struct pipe_screen *screen,
838 struct pipe_fence_handle **ptr,
839 struct pipe_fence_handle *fence)
840 {
841 struct virgl_screen *vscreen = virgl_screen(screen);
842 struct virgl_winsys *vws = vscreen->vws;
843
844 vws->fence_reference(vws, ptr, fence);
845 }
846
virgl_fence_finish(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)847 static bool virgl_fence_finish(struct pipe_screen *screen,
848 struct pipe_context *ctx,
849 struct pipe_fence_handle *fence,
850 uint64_t timeout)
851 {
852 struct virgl_screen *vscreen = virgl_screen(screen);
853 struct virgl_winsys *vws = vscreen->vws;
854 struct virgl_context *vctx = virgl_context(ctx);
855
856 if (vctx && timeout)
857 virgl_flush_eq(vctx, NULL, NULL);
858
859 return vws->fence_wait(vws, fence, timeout);
860 }
861
virgl_fence_get_fd(struct pipe_screen * screen,struct pipe_fence_handle * fence)862 static int virgl_fence_get_fd(struct pipe_screen *screen,
863 struct pipe_fence_handle *fence)
864 {
865 struct virgl_screen *vscreen = virgl_screen(screen);
866 struct virgl_winsys *vws = vscreen->vws;
867
868 return vws->fence_get_fd(vws, fence);
869 }
870
871 static void
virgl_destroy_screen(struct pipe_screen * screen)872 virgl_destroy_screen(struct pipe_screen *screen)
873 {
874 struct virgl_screen *vscreen = virgl_screen(screen);
875 struct virgl_winsys *vws = vscreen->vws;
876
877 slab_destroy_parent(&vscreen->transfer_pool);
878
879 if (vws)
880 vws->destroy(vws);
881
882 disk_cache_destroy(vscreen->disk_cache);
883
884 FREE(vscreen);
885 }
886
887 static void
fixup_formats(union virgl_caps * caps,struct virgl_supported_format_mask * mask)888 fixup_formats(union virgl_caps *caps, struct virgl_supported_format_mask *mask)
889 {
890 const size_t size = ARRAY_SIZE(mask->bitmask);
891 for (int i = 0; i < size; ++i) {
892 if (mask->bitmask[i] != 0)
893 return; /* we got some formats, we definitely have a new protocol */
894 }
895
896 /* old protocol used; fall back to considering all sampleable formats valid
897 * readback-formats
898 */
899 for (int i = 0; i < size; ++i)
900 mask->bitmask[i] = caps->v1.sampler.bitmask[i];
901 }
902
virgl_query_memory_info(struct pipe_screen * screen,struct pipe_memory_info * info)903 static void virgl_query_memory_info(struct pipe_screen *screen, struct pipe_memory_info *info)
904 {
905 struct virgl_screen *vscreen = virgl_screen(screen);
906 struct pipe_context *ctx = screen->context_create(screen, NULL, 0);
907 struct virgl_context *vctx = virgl_context(ctx);
908 struct virgl_resource *res;
909 struct virgl_memory_info virgl_info = {0};
910 const static struct pipe_resource templ = {
911 .target = PIPE_BUFFER,
912 .format = PIPE_FORMAT_R8_UNORM,
913 .bind = PIPE_BIND_CUSTOM,
914 .width0 = sizeof(struct virgl_memory_info),
915 .height0 = 1,
916 .depth0 = 1,
917 .array_size = 1,
918 .last_level = 0,
919 .nr_samples = 0,
920 .flags = 0
921 };
922
923 res = (struct virgl_resource*) screen->resource_create(screen, &templ);
924
925 virgl_encode_get_memory_info(vctx, res);
926 ctx->flush(ctx, NULL, 0);
927 vscreen->vws->resource_wait(vscreen->vws, res->hw_res);
928 pipe_buffer_read(ctx, &res->b, 0, sizeof(struct virgl_memory_info), &virgl_info);
929
930 info->avail_device_memory = virgl_info.avail_device_memory;
931 info->avail_staging_memory = virgl_info.avail_staging_memory;
932 info->device_memory_evicted = virgl_info.device_memory_evicted;
933 info->nr_device_memory_evictions = virgl_info.nr_device_memory_evictions;
934 info->total_device_memory = virgl_info.total_device_memory;
935 info->total_staging_memory = virgl_info.total_staging_memory;
936
937 screen->resource_destroy(screen, &res->b);
938 ctx->destroy(ctx);
939 }
940
virgl_get_disk_shader_cache(struct pipe_screen * pscreen)941 static struct disk_cache *virgl_get_disk_shader_cache (struct pipe_screen *pscreen)
942 {
943 struct virgl_screen *screen = virgl_screen(pscreen);
944
945 return screen->disk_cache;
946 }
947
virgl_disk_cache_create(struct virgl_screen * screen)948 static void virgl_disk_cache_create(struct virgl_screen *screen)
949 {
950 struct mesa_sha1 sha1_ctx;
951 _mesa_sha1_init(&sha1_ctx);
952
953 #ifdef HAVE_DL_ITERATE_PHDR
954 const struct build_id_note *note =
955 build_id_find_nhdr_for_addr(virgl_disk_cache_create);
956 assert(note);
957
958 unsigned build_id_len = build_id_length(note);
959 assert(build_id_len == 20); /* sha1 */
960
961 const uint8_t *id_sha1 = build_id_data(note);
962 assert(id_sha1);
963
964 _mesa_sha1_update(&sha1_ctx, id_sha1, build_id_len);
965 #endif
966
967 /* When we switch the host the caps might change and then we might have to
968 * apply different lowering. */
969 _mesa_sha1_update(&sha1_ctx, &screen->caps, sizeof(screen->caps));
970
971 uint8_t sha1[20];
972 _mesa_sha1_final(&sha1_ctx, sha1);
973 char timestamp[41];
974 _mesa_sha1_format(timestamp, sha1);
975
976 screen->disk_cache = disk_cache_create("virgl", timestamp, 0);
977 }
978
979 static bool
virgl_is_dmabuf_modifier_supported(UNUSED struct pipe_screen * pscreen,UNUSED uint64_t modifier,UNUSED enum pipe_format format,UNUSED bool * external_only)980 virgl_is_dmabuf_modifier_supported(UNUSED struct pipe_screen *pscreen,
981 UNUSED uint64_t modifier,
982 UNUSED enum pipe_format format,
983 UNUSED bool *external_only)
984 {
985 /* Always advertise support until virgl starts checking against host
986 * virglrenderer or consuming valid non-linear modifiers here.
987 */
988 return true;
989 }
990
991 static unsigned int
virgl_get_dmabuf_modifier_planes(UNUSED struct pipe_screen * pscreen,UNUSED uint64_t modifier,enum pipe_format format)992 virgl_get_dmabuf_modifier_planes(UNUSED struct pipe_screen *pscreen,
993 UNUSED uint64_t modifier,
994 enum pipe_format format)
995 {
996 /* Return the format plane count queried from pipe_format. For virgl,
997 * additional aux planes are entirely resolved on the host side.
998 */
999 return util_format_get_num_planes(format);
1000 }
1001
1002 static void
fixup_renderer(union virgl_caps * caps)1003 fixup_renderer(union virgl_caps *caps)
1004 {
1005 if (caps->v2.host_feature_check_version < 5)
1006 return;
1007
1008 char renderer[64];
1009 int renderer_len = snprintf(renderer, sizeof(renderer), "virgl (%s)",
1010 caps->v2.renderer);
1011 if (renderer_len >= 64) {
1012 memcpy(renderer + 59, "...)", 4);
1013 renderer_len = 63;
1014 }
1015 memcpy(caps->v2.renderer, renderer, renderer_len + 1);
1016 }
1017
1018 static const void *
virgl_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)1019 virgl_get_compiler_options(struct pipe_screen *pscreen,
1020 enum pipe_shader_ir ir,
1021 enum pipe_shader_type shader)
1022 {
1023 struct virgl_screen *vscreen = virgl_screen(pscreen);
1024
1025 return &vscreen->compiler_options;
1026 }
1027
1028 static int
virgl_screen_get_fd(struct pipe_screen * pscreen)1029 virgl_screen_get_fd(struct pipe_screen *pscreen)
1030 {
1031 struct virgl_screen *vscreen = virgl_screen(pscreen);
1032 struct virgl_winsys *vws = vscreen->vws;
1033
1034 if (vws->get_fd)
1035 return vws->get_fd(vws);
1036 else
1037 return -1;
1038 }
1039
1040 struct pipe_screen *
virgl_create_screen(struct virgl_winsys * vws,const struct pipe_screen_config * config)1041 virgl_create_screen(struct virgl_winsys *vws, const struct pipe_screen_config *config)
1042 {
1043 struct virgl_screen *screen = CALLOC_STRUCT(virgl_screen);
1044
1045 const char *VIRGL_GLES_EMULATE_BGRA = "gles_emulate_bgra";
1046 const char *VIRGL_GLES_APPLY_BGRA_DEST_SWIZZLE = "gles_apply_bgra_dest_swizzle";
1047 const char *VIRGL_GLES_SAMPLES_PASSED_VALUE = "gles_samples_passed_value";
1048 const char *VIRGL_FORMAT_L8_SRGB_ENABLE_READBACK = "format_l8_srgb_enable_readback";
1049 const char *VIRGL_SHADER_SYNC = "virgl_shader_sync";
1050
1051 if (!screen)
1052 return NULL;
1053
1054 virgl_debug = debug_get_option_virgl_debug();
1055
1056 if (config && config->options) {
1057 driParseConfigFiles(config->options, config->options_info, 0, "virtio_gpu",
1058 NULL, NULL, NULL, 0, NULL, 0);
1059
1060 screen->tweak_gles_emulate_bgra =
1061 driQueryOptionb(config->options, VIRGL_GLES_EMULATE_BGRA);
1062 screen->tweak_gles_apply_bgra_dest_swizzle =
1063 driQueryOptionb(config->options, VIRGL_GLES_APPLY_BGRA_DEST_SWIZZLE);
1064 screen->tweak_gles_tf3_value =
1065 driQueryOptioni(config->options, VIRGL_GLES_SAMPLES_PASSED_VALUE);
1066 screen->tweak_l8_srgb_readback =
1067 driQueryOptionb(config->options, VIRGL_FORMAT_L8_SRGB_ENABLE_READBACK);
1068 screen->shader_sync = driQueryOptionb(config->options, VIRGL_SHADER_SYNC);
1069 }
1070 screen->tweak_gles_emulate_bgra &= !(virgl_debug & VIRGL_DEBUG_NO_EMULATE_BGRA);
1071 screen->tweak_gles_apply_bgra_dest_swizzle &= !(virgl_debug & VIRGL_DEBUG_NO_BGRA_DEST_SWIZZLE);
1072 screen->no_coherent = virgl_debug & VIRGL_DEBUG_NO_COHERENT;
1073 screen->tweak_l8_srgb_readback |= !!(virgl_debug & VIRGL_DEBUG_L8_SRGB_ENABLE_READBACK);
1074 screen->shader_sync |= !!(virgl_debug & VIRGL_DEBUG_SHADER_SYNC);
1075
1076 screen->vws = vws;
1077 screen->base.get_name = virgl_get_name;
1078 screen->base.get_vendor = virgl_get_vendor;
1079 screen->base.get_screen_fd = virgl_screen_get_fd;
1080 screen->base.get_shader_param = virgl_get_shader_param;
1081 screen->base.get_video_param = virgl_get_video_param;
1082 screen->base.get_compute_param = virgl_get_compute_param;
1083 screen->base.get_compiler_options = virgl_get_compiler_options;
1084 screen->base.is_format_supported = virgl_is_format_supported;
1085 screen->base.is_video_format_supported = virgl_is_video_format_supported;
1086 screen->base.destroy = virgl_destroy_screen;
1087 screen->base.context_create = virgl_context_create;
1088 screen->base.flush_frontbuffer = virgl_flush_frontbuffer;
1089 screen->base.get_timestamp = u_default_get_timestamp;
1090 screen->base.fence_reference = virgl_fence_reference;
1091 //screen->base.fence_signalled = virgl_fence_signalled;
1092 screen->base.fence_finish = virgl_fence_finish;
1093 screen->base.fence_get_fd = virgl_fence_get_fd;
1094 screen->base.query_memory_info = virgl_query_memory_info;
1095 screen->base.get_disk_shader_cache = virgl_get_disk_shader_cache;
1096 screen->base.is_dmabuf_modifier_supported = virgl_is_dmabuf_modifier_supported;
1097 screen->base.get_dmabuf_modifier_planes = virgl_get_dmabuf_modifier_planes;
1098
1099 virgl_init_screen_resource_functions(&screen->base);
1100
1101 vws->get_caps(vws, &screen->caps);
1102 fixup_formats(&screen->caps.caps,
1103 &screen->caps.caps.v2.supported_readback_formats);
1104 fixup_formats(&screen->caps.caps, &screen->caps.caps.v2.scanout);
1105 fixup_renderer(&screen->caps.caps);
1106
1107 union virgl_caps *caps = &screen->caps.caps;
1108 screen->tweak_gles_emulate_bgra &= !virgl_format_check_bitmask(PIPE_FORMAT_B8G8R8A8_SRGB, caps->v1.render.bitmask, false);
1109 screen->refcnt = 1;
1110
1111 virgl_init_screen_caps(screen);
1112
1113 /* Set up the NIR shader compiler options now that we've figured out the caps. */
1114 screen->compiler_options = *(nir_shader_compiler_options *)
1115 nir_to_tgsi_get_compiler_options(&screen->base, PIPE_SHADER_IR_NIR, PIPE_SHADER_FRAGMENT);
1116 if (screen->base.caps.doubles) {
1117 /* virglrenderer is missing DFLR support, so avoid turning 64-bit
1118 * ffract+fsub back into ffloor.
1119 */
1120 screen->compiler_options.lower_ffloor = true;
1121 screen->compiler_options.lower_fneg = true;
1122 }
1123 screen->compiler_options.no_integers = screen->caps.caps.v1.glsl_level < 130;
1124 screen->compiler_options.lower_ffma32 = true;
1125 screen->compiler_options.fuse_ffma32 = false;
1126 screen->compiler_options.lower_ldexp = true;
1127 screen->compiler_options.lower_image_offset_to_range_base = true;
1128 screen->compiler_options.lower_atomic_offset_to_range_base = true;
1129 screen->compiler_options.support_indirect_outputs = (uint8_t)BITFIELD_MASK(PIPE_SHADER_TYPES);
1130
1131 if (screen->caps.caps.v2.capability_bits & VIRGL_CAP_INDIRECT_INPUT_ADDR) {
1132 screen->compiler_options.support_indirect_inputs |= BITFIELD_BIT(MESA_SHADER_TESS_CTRL) |
1133 BITFIELD_BIT(MESA_SHADER_TESS_EVAL) |
1134 BITFIELD_BIT(MESA_SHADER_GEOMETRY) |
1135 BITFIELD_BIT(MESA_SHADER_FRAGMENT);
1136
1137 if (!(screen->caps.caps.v2.capability_bits & VIRGL_CAP_HOST_IS_GLES))
1138 screen->compiler_options.support_indirect_inputs |= BITFIELD_BIT(MESA_SHADER_VERTEX);
1139 }
1140
1141 slab_create_parent(&screen->transfer_pool, sizeof(struct virgl_transfer), 16);
1142
1143 virgl_disk_cache_create(screen);
1144 return &screen->base;
1145 }
1146