1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/accum.h"
30 #include "main/context.h"
31 #include "main/debug_output.h"
32 #include "main/framebuffer.h"
33 #include "main/glthread.h"
34 #include "main/shaderobj.h"
35 #include "main/state.h"
36 #include "main/version.h"
37 #include "main/hash.h"
38 #include "program/prog_cache.h"
39 #include "vbo/vbo.h"
40 #include "glapi/glapi.h"
41 #include "st_manager.h"
42 #include "st_context.h"
43 #include "st_debug.h"
44 #include "st_cb_bitmap.h"
45 #include "st_cb_clear.h"
46 #include "st_cb_drawpixels.h"
47 #include "st_cb_drawtex.h"
48 #include "st_cb_eglimage.h"
49 #include "st_cb_feedback.h"
50 #include "st_cb_flush.h"
51 #include "st_atom.h"
52 #include "st_draw.h"
53 #include "st_extensions.h"
54 #include "st_gen_mipmap.h"
55 #include "st_pbo.h"
56 #include "st_program.h"
57 #include "st_sampler_view.h"
58 #include "st_shader_cache.h"
59 #include "st_texcompress_compute.h"
60 #include "st_texture.h"
61 #include "st_util.h"
62 #include "pipe/p_context.h"
63 #include "util/u_cpu_detect.h"
64 #include "util/u_inlines.h"
65 #include "util/u_upload_mgr.h"
66 #include "util/u_vbuf.h"
67 #include "util/u_memory.h"
68 #include "util/hash_table.h"
69 #include "util/thread_sched.h"
70 #include "cso_cache/cso_context.h"
71 #include "compiler/glsl/glsl_parser_extras.h"
72 #include "nir.h"
73
74 DEBUG_GET_ONCE_BOOL_OPTION(mesa_mvp_dp4, "MESA_MVP_DP4", false)
75
76 void
st_invalidate_buffers(struct st_context * st)77 st_invalidate_buffers(struct st_context *st)
78 {
79 st->ctx->NewDriverState |= ST_NEW_BLEND |
80 ST_NEW_DSA |
81 ST_NEW_FB_STATE |
82 ST_NEW_SAMPLE_STATE |
83 ST_NEW_SAMPLE_SHADING |
84 ST_NEW_FS_STATE |
85 ST_NEW_POLY_STIPPLE |
86 ST_NEW_VIEWPORT |
87 ST_NEW_RASTERIZER |
88 ST_NEW_SCISSOR |
89 ST_NEW_WINDOW_RECTANGLES;
90 }
91
92
93 static inline bool
st_vp_uses_current_values(const struct gl_context * ctx)94 st_vp_uses_current_values(const struct gl_context *ctx)
95 {
96 const uint64_t inputs = ctx->VertexProgram._Current->info.inputs_read;
97
98 return ~_mesa_get_enabled_vertex_arrays(ctx) & inputs;
99 }
100
101
102 void
st_invalidate_state(struct gl_context * ctx)103 st_invalidate_state(struct gl_context *ctx)
104 {
105 GLbitfield new_state = ctx->NewState;
106 struct st_context *st = st_context(ctx);
107
108 if (new_state & _NEW_BUFFERS) {
109 st_invalidate_buffers(st);
110 } else {
111 /* These set a subset of flags set by _NEW_BUFFERS, so we only have to
112 * check them when _NEW_BUFFERS isn't set.
113 */
114 if (new_state & _NEW_FOG)
115 ctx->NewDriverState |= ST_NEW_FS_STATE;
116 }
117
118 if (new_state & (_NEW_LIGHT_STATE |
119 _NEW_POINT))
120 ctx->NewDriverState |= ST_NEW_RASTERIZER;
121
122 if ((new_state & _NEW_LIGHT_STATE) &&
123 (st->lower_flatshade || st->lower_two_sided_color))
124 ctx->NewDriverState |= ST_NEW_FS_STATE;
125
126 if (new_state & _NEW_PROJECTION &&
127 st_user_clip_planes_enabled(ctx))
128 ctx->NewDriverState |= ST_NEW_CLIP_STATE;
129
130 if (new_state & _NEW_PIXEL)
131 ctx->NewDriverState |= ST_NEW_PIXEL_TRANSFER;
132
133 if (new_state & _NEW_CURRENT_ATTRIB && st_vp_uses_current_values(ctx)) {
134 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
135 /* glColor3f -> glColor4f changes the vertex format. */
136 ctx->Array.NewVertexElements = true;
137 }
138
139 /* Update the vertex shader if ctx->Light._ClampVertexColor was changed. */
140 if (st->clamp_vert_color_in_shader && (new_state & _NEW_LIGHT_STATE)) {
141 ctx->NewDriverState |= ST_NEW_VS_STATE;
142 if (_mesa_is_desktop_gl_compat(st->ctx) && ctx->Version >= 32) {
143 ctx->NewDriverState |= ST_NEW_GS_STATE | ST_NEW_TES_STATE;
144 }
145 }
146
147 /* Update the vertex shader if ctx->Point was changed. */
148 if (st->lower_point_size && new_state & _NEW_POINT) {
149 if (ctx->GeometryProgram._Current)
150 ctx->NewDriverState |= ST_NEW_GS_STATE | ST_NEW_GS_CONSTANTS;
151 else if (ctx->TessEvalProgram._Current)
152 ctx->NewDriverState |= ST_NEW_TES_STATE | ST_NEW_TES_CONSTANTS;
153 else
154 ctx->NewDriverState |= ST_NEW_VS_STATE | ST_NEW_VS_CONSTANTS;
155 }
156
157 if (new_state & _NEW_TEXTURE_OBJECT) {
158 ctx->NewDriverState |= st->active_states &
159 (ST_NEW_SAMPLER_VIEWS |
160 ST_NEW_SAMPLERS |
161 ST_NEW_IMAGE_UNITS);
162 if (ctx->FragmentProgram._Current) {
163 struct gl_program *fp = ctx->FragmentProgram._Current;
164
165 if (fp->ExternalSamplersUsed || fp->ati_fs ||
166 (!fp->shader_program && fp->ShadowSamplers))
167 ctx->NewDriverState |= ST_NEW_FS_STATE;
168 }
169 }
170 }
171
172
173 /*
174 * In some circumstances (such as running google-chrome) the state
175 * tracker may try to delete a resource view from a context different
176 * than when it was created. We don't want to do that.
177 *
178 * In that situation, st_texture_release_all_sampler_views() calls this
179 * function to transfer the sampler view reference to this context (expected
180 * to be the context which created the view.)
181 */
182 void
st_save_zombie_sampler_view(struct st_context * st,struct pipe_sampler_view * view)183 st_save_zombie_sampler_view(struct st_context *st,
184 struct pipe_sampler_view *view)
185 {
186 struct st_zombie_sampler_view_node *entry;
187
188 assert(view->context == st->pipe);
189
190 entry = MALLOC_STRUCT(st_zombie_sampler_view_node);
191 if (!entry)
192 return;
193
194 entry->view = view;
195
196 /* We need a mutex since this function may be called from one thread
197 * while free_zombie_resource_views() is called from another.
198 */
199 simple_mtx_lock(&st->zombie_sampler_views.mutex);
200 list_addtail(&entry->node, &st->zombie_sampler_views.list.node);
201 simple_mtx_unlock(&st->zombie_sampler_views.mutex);
202 }
203
204
205 /*
206 * Since OpenGL shaders may be shared among contexts, we can wind up
207 * with variants of a shader created with different contexts.
208 * When we go to destroy a gallium shader, we want to free it with the
209 * same context that it was created with, unless the driver reports
210 * pipe_caps.shareable_shaders = TRUE.
211 */
212 void
st_save_zombie_shader(struct st_context * st,enum pipe_shader_type type,struct pipe_shader_state * shader)213 st_save_zombie_shader(struct st_context *st,
214 enum pipe_shader_type type,
215 struct pipe_shader_state *shader)
216 {
217 struct st_zombie_shader_node *entry;
218
219 /* we shouldn't be here if the driver supports shareable shaders */
220 assert(!st->has_shareable_shaders);
221
222 entry = MALLOC_STRUCT(st_zombie_shader_node);
223 if (!entry)
224 return;
225
226 entry->shader = shader;
227 entry->type = type;
228
229 /* We need a mutex since this function may be called from one thread
230 * while free_zombie_shaders() is called from another.
231 */
232 simple_mtx_lock(&st->zombie_shaders.mutex);
233 list_addtail(&entry->node, &st->zombie_shaders.list.node);
234 simple_mtx_unlock(&st->zombie_shaders.mutex);
235 }
236
237
238 /*
239 * Free any zombie sampler views that may be attached to this context.
240 */
241 static void
free_zombie_sampler_views(struct st_context * st)242 free_zombie_sampler_views(struct st_context *st)
243 {
244 struct st_zombie_sampler_view_node *entry, *next;
245
246 if (list_is_empty(&st->zombie_sampler_views.list.node)) {
247 return;
248 }
249
250 simple_mtx_lock(&st->zombie_sampler_views.mutex);
251
252 LIST_FOR_EACH_ENTRY_SAFE(entry, next,
253 &st->zombie_sampler_views.list.node, node) {
254 list_del(&entry->node); // remove this entry from the list
255
256 assert(entry->view->context == st->pipe);
257 pipe_sampler_view_reference(&entry->view, NULL);
258
259 free(entry);
260 }
261
262 assert(list_is_empty(&st->zombie_sampler_views.list.node));
263
264 simple_mtx_unlock(&st->zombie_sampler_views.mutex);
265 }
266
267
268 /*
269 * Free any zombie shaders that may be attached to this context.
270 */
271 static void
free_zombie_shaders(struct st_context * st)272 free_zombie_shaders(struct st_context *st)
273 {
274 struct st_zombie_shader_node *entry, *next;
275
276 if (list_is_empty(&st->zombie_shaders.list.node)) {
277 return;
278 }
279
280 simple_mtx_lock(&st->zombie_shaders.mutex);
281
282 LIST_FOR_EACH_ENTRY_SAFE(entry, next,
283 &st->zombie_shaders.list.node, node) {
284 list_del(&entry->node); // remove this entry from the list
285
286 switch (entry->type) {
287 case PIPE_SHADER_VERTEX:
288 st->ctx->NewDriverState |= ST_NEW_VS_STATE;
289 st->pipe->delete_vs_state(st->pipe, entry->shader);
290 break;
291 case PIPE_SHADER_FRAGMENT:
292 st->ctx->NewDriverState |= ST_NEW_FS_STATE;
293 st->pipe->delete_fs_state(st->pipe, entry->shader);
294 break;
295 case PIPE_SHADER_GEOMETRY:
296 st->ctx->NewDriverState |= ST_NEW_GS_STATE;
297 st->pipe->delete_gs_state(st->pipe, entry->shader);
298 break;
299 case PIPE_SHADER_TESS_CTRL:
300 st->ctx->NewDriverState |= ST_NEW_TCS_STATE;
301 st->pipe->delete_tcs_state(st->pipe, entry->shader);
302 break;
303 case PIPE_SHADER_TESS_EVAL:
304 st->ctx->NewDriverState |= ST_NEW_TES_STATE;
305 st->pipe->delete_tes_state(st->pipe, entry->shader);
306 break;
307 case PIPE_SHADER_COMPUTE:
308 st->ctx->NewDriverState |= ST_NEW_CS_STATE;
309 st->pipe->delete_compute_state(st->pipe, entry->shader);
310 break;
311 default:
312 unreachable("invalid shader type in free_zombie_shaders()");
313 }
314 free(entry);
315 }
316
317 assert(list_is_empty(&st->zombie_shaders.list.node));
318
319 simple_mtx_unlock(&st->zombie_shaders.mutex);
320 }
321
322
323 /*
324 * This function is called periodically to free any zombie objects
325 * which are attached to this context.
326 */
327 void
st_context_free_zombie_objects(struct st_context * st)328 st_context_free_zombie_objects(struct st_context *st)
329 {
330 free_zombie_sampler_views(st);
331 free_zombie_shaders(st);
332 }
333
334
335 static void
st_destroy_context_priv(struct st_context * st,bool destroy_pipe)336 st_destroy_context_priv(struct st_context *st, bool destroy_pipe)
337 {
338 st_destroy_draw(st);
339 st_destroy_clear(st);
340 st_destroy_bitmap(st);
341 st_destroy_drawpix(st);
342 st_destroy_drawtex(st);
343 st_destroy_pbo_helpers(st);
344
345 if (_mesa_has_compute_shaders(st->ctx) && st->transcode_astc)
346 st_destroy_texcompress_compute(st);
347
348 st_destroy_bound_texture_handles(st);
349 st_destroy_bound_image_handles(st);
350
351 /* free glReadPixels cache data */
352 st_invalidate_readpix_cache(st);
353 util_throttle_deinit(st->screen, &st->throttle);
354
355 cso_destroy_context(st->cso_context);
356
357 if (st->pipe && destroy_pipe)
358 st->pipe->destroy(st->pipe);
359
360 st->ctx->st = NULL;
361 FREE(st);
362 }
363
364
365 static void
st_init_driver_flags(struct st_context * st)366 st_init_driver_flags(struct st_context *st)
367 {
368 struct gl_driver_flags *f = &st->ctx->DriverFlags;
369
370 /* Shader resources */
371 if (st->has_hw_atomics)
372 f->NewAtomicBuffer = ST_NEW_HW_ATOMICS | ST_NEW_CS_ATOMICS;
373 else
374 f->NewAtomicBuffer = ST_NEW_ATOMIC_BUFFER;
375
376 f->NewShaderConstants[MESA_SHADER_VERTEX] = ST_NEW_VS_CONSTANTS;
377 f->NewShaderConstants[MESA_SHADER_TESS_CTRL] = ST_NEW_TCS_CONSTANTS;
378 f->NewShaderConstants[MESA_SHADER_TESS_EVAL] = ST_NEW_TES_CONSTANTS;
379 f->NewShaderConstants[MESA_SHADER_GEOMETRY] = ST_NEW_GS_CONSTANTS;
380 f->NewShaderConstants[MESA_SHADER_FRAGMENT] = ST_NEW_FS_CONSTANTS;
381 f->NewShaderConstants[MESA_SHADER_COMPUTE] = ST_NEW_CS_CONSTANTS;
382
383 if (st->lower_alpha_test)
384 f->NewAlphaTest = ST_NEW_FS_STATE | ST_NEW_FS_CONSTANTS;
385 else
386 f->NewAlphaTest = ST_NEW_DSA;
387
388 f->NewMultisampleEnable = ST_NEW_BLEND | ST_NEW_RASTERIZER |
389 ST_NEW_SAMPLE_STATE | ST_NEW_SAMPLE_SHADING;
390 f->NewSampleShading = ST_NEW_SAMPLE_SHADING;
391
392 /* This depends on what the gallium driver wants. */
393 if (st->force_persample_in_shader) {
394 f->NewMultisampleEnable |= ST_NEW_FS_STATE;
395 f->NewSampleShading |= ST_NEW_FS_STATE;
396 } else {
397 f->NewSampleShading |= ST_NEW_RASTERIZER;
398 }
399
400 if (st->clamp_frag_color_in_shader) {
401 f->NewFragClamp = ST_NEW_FS_STATE;
402 } else {
403 f->NewFragClamp = ST_NEW_RASTERIZER;
404 }
405
406 f->NewClipPlaneEnable = ST_NEW_RASTERIZER;
407 if (st->lower_ucp)
408 f->NewClipPlaneEnable |= ST_NEW_VS_STATE | ST_NEW_GS_STATE | ST_NEW_TES_STATE;
409
410 if (st->emulate_gl_clamp)
411 f->NewSamplersWithClamp = ST_NEW_SAMPLERS |
412 ST_NEW_VS_STATE | ST_NEW_TCS_STATE |
413 ST_NEW_TES_STATE | ST_NEW_GS_STATE |
414 ST_NEW_FS_STATE | ST_NEW_CS_STATE;
415
416 if (!st->has_hw_atomics && st->ctx->Const.ShaderStorageBufferOffsetAlignment > 4)
417 f->NewAtomicBuffer |= ST_NEW_CONSTANTS;
418 }
419
420 static bool
st_have_perfquery(struct st_context * ctx)421 st_have_perfquery(struct st_context *ctx)
422 {
423 struct pipe_context *pipe = ctx->pipe;
424
425 return pipe->init_intel_perf_query_info && pipe->get_intel_perf_query_info &&
426 pipe->get_intel_perf_query_counter_info &&
427 pipe->new_intel_perf_query_obj && pipe->begin_intel_perf_query &&
428 pipe->end_intel_perf_query && pipe->delete_intel_perf_query &&
429 pipe->wait_intel_perf_query && pipe->is_intel_perf_query_ready &&
430 pipe->get_intel_perf_query_data;
431 }
432
433 static struct st_context *
st_create_context_priv(struct gl_context * ctx,struct pipe_context * pipe,const struct st_config_options * options)434 st_create_context_priv(struct gl_context *ctx, struct pipe_context *pipe,
435 const struct st_config_options *options)
436 {
437 struct pipe_screen *screen = pipe->screen;
438 struct st_context *st = CALLOC_STRUCT( st_context);
439
440 st->options = *options;
441
442 ctx->st_opts = &st->options;
443 ctx->st = st;
444
445 st->ctx = ctx;
446 st->screen = screen;
447 st->pipe = pipe;
448
449 st->can_bind_const_buffer_as_vertex =
450 screen->caps.can_bind_const_buffer_as_vertex;
451
452 /* st/mesa always uploads zero-stride vertex attribs, and other user
453 * vertex buffers are only possible with a compatibility profile.
454 * So tell the u_vbuf module that user VBOs are not possible with the Core
455 * profile, so that u_vbuf is bypassed completely if there is nothing else
456 * to do.
457 */
458 unsigned cso_flags;
459 switch (ctx->API) {
460 case API_OPENGL_CORE:
461 cso_flags = CSO_NO_USER_VERTEX_BUFFERS;
462 break;
463 case API_OPENGLES:
464 case API_OPENGLES2:
465 cso_flags = CSO_NO_64B_VERTEX_BUFFERS;
466 break;
467 default:
468 cso_flags = 0;
469 break;
470 }
471
472 st->cso_context = cso_create_context(pipe, cso_flags);
473 ctx->cso_context = st->cso_context;
474
475 STATIC_ASSERT(ARRAY_SIZE(st->update_functions) <= 64);
476
477 #define ST_STATE(FLAG, st_update) st->update_functions[FLAG##_INDEX] = st_update;
478 #include "st_atom_list.h"
479 #undef ST_STATE
480
481 st_init_clear(st);
482 {
483 enum pipe_texture_transfer_mode val = screen->caps.texture_transfer_modes;
484 st->prefer_blit_based_texture_transfer = (val & PIPE_TEXTURE_TRANSFER_BLIT) != 0;
485 st->allow_compute_based_texture_transfer = (val & PIPE_TEXTURE_TRANSFER_COMPUTE) != 0;
486 }
487 st_init_pbo_helpers(st);
488
489 /* Choose texture target for glDrawPixels, glBitmap, renderbuffers */
490 if (screen->caps.npot_textures)
491 st->internal_target = PIPE_TEXTURE_2D;
492 else
493 st->internal_target = PIPE_TEXTURE_RECT;
494
495 /* Setup vertex element info for 'struct st_util_vertex'.
496 */
497 {
498 STATIC_ASSERT(sizeof(struct st_util_vertex) == 9 * sizeof(float));
499
500 memset(&st->util_velems, 0, sizeof(st->util_velems));
501 st->util_velems.velems[0].src_offset = 0;
502 st->util_velems.velems[0].vertex_buffer_index = 0;
503 st->util_velems.velems[0].src_format = PIPE_FORMAT_R32G32B32_FLOAT;
504 st->util_velems.velems[0].src_stride = sizeof(struct st_util_vertex);
505 st->util_velems.velems[1].src_offset = 3 * sizeof(float);
506 st->util_velems.velems[1].vertex_buffer_index = 0;
507 st->util_velems.velems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
508 st->util_velems.velems[1].src_stride = sizeof(struct st_util_vertex);
509 st->util_velems.velems[2].src_offset = 7 * sizeof(float);
510 st->util_velems.velems[2].vertex_buffer_index = 0;
511 st->util_velems.velems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
512 st->util_velems.velems[2].src_stride = sizeof(struct st_util_vertex);
513 }
514
515 ctx->Const.PackedDriverUniformStorage =
516 screen->caps.packed_uniforms;
517
518 ctx->Const.BitmapUsesRed =
519 screen->is_format_supported(screen, PIPE_FORMAT_R8_UNORM,
520 PIPE_TEXTURE_2D, 0, 0,
521 PIPE_BIND_SAMPLER_VIEW);
522
523 ctx->Const.QueryCounterBits.Timestamp =
524 screen->caps.query_timestamp_bits;
525
526 st->has_stencil_export =
527 screen->caps.shader_stencil_export;
528 st->has_etc1 = screen->is_format_supported(screen, PIPE_FORMAT_ETC1_RGB8,
529 PIPE_TEXTURE_2D, 0, 0,
530 PIPE_BIND_SAMPLER_VIEW);
531 st->has_etc2 = screen->is_format_supported(screen, PIPE_FORMAT_ETC2_RGB8,
532 PIPE_TEXTURE_2D, 0, 0,
533 PIPE_BIND_SAMPLER_VIEW);
534 st->transcode_etc = options->transcode_etc &&
535 screen->is_format_supported(screen, PIPE_FORMAT_DXT1_SRGBA,
536 PIPE_TEXTURE_2D, 0, 0,
537 PIPE_BIND_SAMPLER_VIEW);
538 st->transcode_astc = options->transcode_astc &&
539 screen->is_format_supported(screen, PIPE_FORMAT_DXT5_SRGBA,
540 PIPE_TEXTURE_2D, 0, 0,
541 PIPE_BIND_SAMPLER_VIEW) &&
542 screen->is_format_supported(screen, PIPE_FORMAT_DXT5_RGBA,
543 PIPE_TEXTURE_2D, 0, 0,
544 PIPE_BIND_SAMPLER_VIEW);
545 st->has_astc_2d_ldr =
546 screen->is_format_supported(screen, PIPE_FORMAT_ASTC_4x4_SRGB,
547 PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW);
548 st->has_astc_5x5_ldr =
549 screen->is_format_supported(screen, PIPE_FORMAT_ASTC_5x5_SRGB,
550 PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW);
551 st->astc_void_extents_need_denorm_flush =
552 screen->caps.astc_void_extents_need_denorm_flush;
553
554 st->has_s3tc = screen->is_format_supported(screen, PIPE_FORMAT_DXT5_RGBA,
555 PIPE_TEXTURE_2D, 0, 0,
556 PIPE_BIND_SAMPLER_VIEW);
557 st->has_rgtc = screen->is_format_supported(screen, PIPE_FORMAT_RGTC2_UNORM,
558 PIPE_TEXTURE_2D, 0, 0,
559 PIPE_BIND_SAMPLER_VIEW);
560 st->has_latc = screen->is_format_supported(screen, PIPE_FORMAT_LATC2_UNORM,
561 PIPE_TEXTURE_2D, 0, 0,
562 PIPE_BIND_SAMPLER_VIEW);
563 st->has_bptc = screen->is_format_supported(screen, PIPE_FORMAT_BPTC_SRGBA,
564 PIPE_TEXTURE_2D, 0, 0,
565 PIPE_BIND_SAMPLER_VIEW);
566 st->force_persample_in_shader =
567 screen->caps.sample_shading &&
568 !screen->caps.force_persample_interp;
569 st->has_shareable_shaders = screen->caps.shareable_shaders;
570 st->needs_texcoord_semantic =
571 screen->caps.tgsi_texcoord;
572 st->apply_texture_swizzle_to_border_color =
573 !!(screen->caps.texture_border_color_quirk &
574 (PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50 |
575 PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600));
576 st->use_format_with_border_color =
577 !!(screen->caps.texture_border_color_quirk &
578 PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_FREEDRENO);
579 st->alpha_border_color_is_not_w =
580 !!(screen->caps.texture_border_color_quirk &
581 PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_ALPHA_NOT_W);
582 st->emulate_gl_clamp =
583 !screen->caps.gl_clamp;
584 st->has_time_elapsed =
585 screen->caps.query_time_elapsed;
586 ctx->Const.GLSLHasHalfFloatPacking =
587 screen->caps.shader_pack_half_float;
588 st->has_multi_draw_indirect =
589 screen->caps.multi_draw_indirect;
590 st->has_indirect_partial_stride =
591 screen->caps.multi_draw_indirect_partial_stride;
592 st->has_occlusion_query =
593 screen->caps.occlusion_query;
594 st->has_single_pipe_stat =
595 screen->caps.query_pipeline_statistics_single;
596 st->has_pipeline_stat =
597 screen->caps.query_pipeline_statistics;
598 st->has_indep_blend_enable =
599 screen->caps.indep_blend_enable;
600 st->has_indep_blend_func =
601 screen->caps.indep_blend_func;
602 st->can_dither =
603 screen->caps.dithering;
604 st->lower_flatshade =
605 !screen->caps.flatshade;
606 st->lower_alpha_test =
607 !screen->caps.alpha_test;
608 switch (screen->caps.point_size_fixed) {
609 case PIPE_POINT_SIZE_LOWER_ALWAYS:
610 st->lower_point_size = true;
611 st->add_point_size = true;
612 break;
613 case PIPE_POINT_SIZE_LOWER_USER_ONLY:
614 st->lower_point_size = true;
615 break;
616 default: break;
617 }
618 st->lower_two_sided_color =
619 !screen->caps.two_sided_color;
620 st->lower_ucp =
621 !screen->caps.clip_planes;
622 st->prefer_real_buffer_in_constbuf0 =
623 screen->caps.prefer_real_buffer_in_constbuf0;
624 st->has_conditional_render =
625 screen->caps.conditional_render;
626 st->lower_rect_tex =
627 !screen->caps.texrect;
628 st->allow_st_finalize_nir_twice =
629 screen->caps.call_finalize_nir_in_linker;
630
631 st->has_hw_atomics =
632 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
633 PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS)
634 ? true : false;
635
636 st->validate_all_dirty_states =
637 screen->caps.validate_all_dirty_states
638 ? true : false;
639 st->can_null_texture =
640 screen->caps.null_textures
641 ? true : false;
642
643 util_throttle_init(&st->throttle,
644 screen->caps.max_texture_upload_memory_budget);
645
646 /* GL limits and extensions */
647 st_init_limits(screen, &ctx->Const, &ctx->Extensions, ctx->API);
648 st_init_extensions(screen, &ctx->Const,
649 &ctx->Extensions, &st->options, ctx->API);
650
651 if (st_have_perfquery(st)) {
652 ctx->Extensions.INTEL_performance_query = GL_TRUE;
653 }
654
655 /* Enable shader-based fallbacks for ARB_color_buffer_float if needed. */
656 if (screen->caps.vertex_color_unclamped) {
657 if (!screen->caps.vertex_color_clamped) {
658 st->clamp_vert_color_in_shader = GL_TRUE;
659 }
660
661 if (!screen->caps.fragment_color_clamped) {
662 st->clamp_frag_color_in_shader = GL_TRUE;
663 }
664
665 /* For drivers which cannot do color clamping, it's better to just
666 * disable ARB_color_buffer_float in the core profile, because
667 * the clamping is deprecated there anyway. */
668 if (_mesa_is_desktop_gl_core(ctx) &&
669 (st->clamp_frag_color_in_shader || st->clamp_vert_color_in_shader)) {
670 st->clamp_vert_color_in_shader = GL_FALSE;
671 st->clamp_frag_color_in_shader = GL_FALSE;
672 ctx->Extensions.ARB_color_buffer_float = GL_FALSE;
673 }
674 }
675
676 /* called after _mesa_create_context/_mesa_init_point, fix default user
677 * settable max point size up
678 */
679 ctx->Point.MaxSize = MAX2(ctx->Const.MaxPointSize,
680 ctx->Const.MaxPointSizeAA);
681
682 ctx->Const.NoClippingOnCopyTex = screen->caps.no_clip_on_copy_tex;
683
684 ctx->Const.ForceFloat32TexNearest =
685 !screen->caps.texture_float_linear;
686
687 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].PositionAlwaysInvariant = options->vs_position_always_invariant;
688
689 ctx->Const.ShaderCompilerOptions[MESA_SHADER_TESS_EVAL].PositionAlwaysPrecise = options->vs_position_always_precise;
690
691 /* Set which shader types can be compiled at link time. */
692 st->shader_has_one_variant[MESA_SHADER_VERTEX] =
693 st->has_shareable_shaders &&
694 !st->clamp_vert_color_in_shader &&
695 !st->lower_point_size &&
696 !st->lower_ucp;
697
698 st->shader_has_one_variant[MESA_SHADER_FRAGMENT] =
699 st->has_shareable_shaders &&
700 !st->lower_flatshade &&
701 !st->lower_alpha_test &&
702 !st->clamp_frag_color_in_shader &&
703 !st->force_persample_in_shader &&
704 !st->lower_two_sided_color;
705
706 st->shader_has_one_variant[MESA_SHADER_TESS_CTRL] = st->has_shareable_shaders;
707 st->shader_has_one_variant[MESA_SHADER_TESS_EVAL] =
708 st->has_shareable_shaders &&
709 !st->clamp_vert_color_in_shader &&
710 !st->lower_point_size &&
711 !st->lower_ucp;
712
713 st->shader_has_one_variant[MESA_SHADER_GEOMETRY] =
714 st->has_shareable_shaders &&
715 !st->clamp_vert_color_in_shader &&
716 !st->lower_point_size &&
717 !st->lower_ucp;
718 st->shader_has_one_variant[MESA_SHADER_COMPUTE] = st->has_shareable_shaders;
719
720 if (!st->pipe->set_context_param || !util_thread_scheduler_enabled())
721 st->pin_thread_counter = ST_THREAD_SCHEDULER_DISABLED;
722
723 st->bitmap.cache.empty = true;
724
725 _mesa_override_extensions(ctx);
726 _mesa_compute_version(ctx);
727
728 if (ctx->Version == 0 ||
729 !_mesa_initialize_dispatch_tables(ctx)) {
730 /* This can happen when a core profile was requested, but the driver
731 * does not support some features of GL 3.1 or later.
732 */
733 st_destroy_context_priv(st, false);
734 return NULL;
735 }
736
737 if (_mesa_has_compute_shaders(ctx) &&
738 st->transcode_astc && !st_init_texcompress_compute(st)) {
739 /* Transcoding ASTC to DXT5 using compute shaders can provide a
740 * significant performance benefit over the CPU path. It isn't strictly
741 * necessary to fail if we can't use the compute shader path, but it's
742 * very convenient to do so. This should be rare.
743 */
744 st_destroy_context_priv(st, false);
745 return NULL;
746 }
747
748 /* This must be done after extensions are initialized to enable persistent
749 * mappings immediately.
750 */
751 _vbo_CreateContext(ctx);
752
753 st_init_driver_flags(st);
754 st_init_update_array(st);
755
756 /* Initialize context's winsys buffers list */
757 list_inithead(&st->winsys_buffers);
758
759 list_inithead(&st->zombie_sampler_views.list.node);
760 simple_mtx_init(&st->zombie_sampler_views.mutex, mtx_plain);
761 list_inithead(&st->zombie_shaders.list.node);
762 simple_mtx_init(&st->zombie_shaders.mutex, mtx_plain);
763
764 ctx->Const.DriverSupportedPrimMask = screen->caps.supported_prim_modes |
765 /* patches is always supported */
766 BITFIELD_BIT(MESA_PRIM_PATCHES);
767 st->active_states = _mesa_get_active_states(ctx);
768
769 return st;
770 }
771
772 void
st_set_background_context(struct gl_context * ctx,struct util_queue_monitoring * queue_info)773 st_set_background_context(struct gl_context *ctx,
774 struct util_queue_monitoring *queue_info)
775 {
776 struct st_context *st = ctx->st;
777 struct pipe_frontend_screen *fscreen = st->frontend_screen;
778
779 assert(fscreen->set_background_context);
780 fscreen->set_background_context(st, queue_info);
781 }
782
783 static void
st_init_driver_functions(struct pipe_screen * screen,struct dd_function_table * functions,bool has_egl_image_validate)784 st_init_driver_functions(struct pipe_screen *screen,
785 struct dd_function_table *functions,
786 bool has_egl_image_validate)
787 {
788 st_init_draw_functions(screen, functions);
789
790 functions->NewProgram = _mesa_new_program;
791 st_init_flush_functions(screen, functions);
792
793 /* GL_ARB_get_program_binary */
794 functions->ShaderCacheSerializeDriverBlob = st_serialise_nir_program;
795 functions->ProgramBinarySerializeDriverBlob =
796 st_serialise_nir_program_binary;
797 functions->ProgramBinaryDeserializeDriverBlob =
798 st_deserialise_nir_program;
799 }
800
801
802 struct st_context *
st_create_context(gl_api api,struct pipe_context * pipe,const struct gl_config * visual,struct st_context * share,const struct st_config_options * options,bool no_error,bool has_egl_image_validate)803 st_create_context(gl_api api, struct pipe_context *pipe,
804 const struct gl_config *visual,
805 struct st_context *share,
806 const struct st_config_options *options,
807 bool no_error, bool has_egl_image_validate)
808 {
809 struct gl_context *ctx;
810 struct gl_context *shareCtx = share ? share->ctx : NULL;
811 struct dd_function_table funcs;
812 struct st_context *st;
813
814 memset(&funcs, 0, sizeof(funcs));
815 st_init_driver_functions(pipe->screen, &funcs, has_egl_image_validate);
816
817 /* gl_context must be 16-byte aligned due to the alignment on GLmatrix. */
818 ctx = align_malloc(sizeof(struct gl_context), 16);
819 if (!ctx)
820 return NULL;
821 memset(ctx, 0, sizeof(*ctx));
822
823 ctx->pipe = pipe;
824 ctx->screen = pipe->screen;
825
826 if (!_mesa_initialize_context(ctx, api, no_error, visual, shareCtx, &funcs,
827 options)) {
828 align_free(ctx);
829 return NULL;
830 }
831
832 st_debug_init();
833
834 if (pipe->screen->get_disk_shader_cache)
835 ctx->Cache = pipe->screen->get_disk_shader_cache(pipe->screen);
836
837 /* XXX: need a capability bit in gallium to query if the pipe
838 * driver prefers DP4 or MUL/MAD for vertex transformation.
839 */
840 if (debug_get_option_mesa_mvp_dp4())
841 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = GL_TRUE;
842
843 if (pipe->screen->caps.invalidate_buffer)
844 ctx->has_invalidate_buffer = true;
845
846 if (pipe->screen->caps.string_marker)
847 ctx->has_string_marker = true;
848
849 st = st_create_context_priv(ctx, pipe, options);
850 if (!st) {
851 _mesa_free_context_data(ctx, true);
852 align_free(ctx);
853 }
854
855 return st;
856 }
857
858
859 /**
860 * When we destroy a context, we must examine all texture objects to
861 * find/release any sampler views created by that context.
862 *
863 * This callback is called per-texture object. It releases all the
864 * texture's sampler views which belong to the context.
865 */
866 static void
destroy_tex_sampler_cb(void * data,void * userData)867 destroy_tex_sampler_cb(void *data, void *userData)
868 {
869 struct gl_texture_object *texObj = (struct gl_texture_object *) data;
870 struct st_context *st = (struct st_context *) userData;
871
872 st_texture_release_context_sampler_view(st, texObj);
873 }
874
875 static void
destroy_framebuffer_attachment_sampler_cb(void * data,void * userData)876 destroy_framebuffer_attachment_sampler_cb(void *data, void *userData)
877 {
878 struct gl_framebuffer* glfb = (struct gl_framebuffer*) data;
879 struct st_context *st = (struct st_context *) userData;
880
881 for (unsigned i = 0; i < BUFFER_COUNT; i++) {
882 struct gl_renderbuffer_attachment *att = &glfb->Attachment[i];
883 if (att->Texture) {
884 st_texture_release_context_sampler_view(st, att->Texture);
885 }
886 }
887 }
888
889 void
st_destroy_context(struct st_context * st)890 st_destroy_context(struct st_context *st)
891 {
892 struct gl_context *ctx = st->ctx;
893 struct gl_framebuffer *stfb, *next;
894 struct gl_framebuffer *save_drawbuffer;
895 struct gl_framebuffer *save_readbuffer;
896
897 /* Save the current context and draw/read buffers*/
898 GET_CURRENT_CONTEXT(save_ctx);
899 if (save_ctx) {
900 save_drawbuffer = save_ctx->WinSysDrawBuffer;
901 save_readbuffer = save_ctx->WinSysReadBuffer;
902 } else {
903 save_drawbuffer = save_readbuffer = NULL;
904 }
905
906 /*
907 * We need to bind the context we're deleting so that
908 * _mesa_reference_texobj_() uses this context when deleting textures.
909 * Similarly for framebuffer objects, etc.
910 */
911 _mesa_make_current(ctx, NULL, NULL);
912
913 /* This must be called first so that glthread has a chance to finish */
914 _mesa_glthread_destroy(ctx);
915
916 _mesa_HashWalk(&ctx->Shared->TexObjects, destroy_tex_sampler_cb, st);
917
918 /* For the fallback textures, free any sampler views belonging to this
919 * context.
920 */
921 for (unsigned i = 0; i < NUM_TEXTURE_TARGETS; i++) {
922 for (unsigned j = 0; j < ARRAY_SIZE(ctx->Shared->FallbackTex[0]); j++) {
923 struct gl_texture_object *stObj =
924 ctx->Shared->FallbackTex[i][j];
925 if (stObj) {
926 st_texture_release_context_sampler_view(st, stObj);
927 }
928 }
929 }
930
931 st_release_program(st, &st->fp);
932 st_release_program(st, &st->gp);
933 st_release_program(st, &st->vp);
934 st_release_program(st, &st->tcp);
935 st_release_program(st, &st->tep);
936 st_release_program(st, &st->cp);
937
938 if (st->hw_select_shaders) {
939 hash_table_foreach(st->hw_select_shaders, entry)
940 st->pipe->delete_gs_state(st->pipe, entry->data);
941 _mesa_hash_table_destroy(st->hw_select_shaders, NULL);
942 }
943
944 /* release framebuffer in the winsys buffers list */
945 LIST_FOR_EACH_ENTRY_SAFE_REV(stfb, next, &st->winsys_buffers, head) {
946 _mesa_reference_framebuffer(&stfb, NULL);
947 }
948
949 _mesa_HashWalk(&ctx->Shared->FrameBuffers, destroy_framebuffer_attachment_sampler_cb, st);
950
951 pipe_sampler_view_reference(&st->pixel_xfer.pixelmap_sampler_view, NULL);
952 pipe_resource_reference(&st->pixel_xfer.pixelmap_texture, NULL);
953
954 _vbo_DestroyContext(ctx);
955
956 st_destroy_program_variants(st);
957
958 /* Do not release debug_output yet because it might be in use by other threads.
959 * These threads will be terminated by _mesa_free_context_data and
960 * st_destroy_context_priv.
961 */
962 _mesa_free_context_data(ctx, false);
963
964 st_context_free_zombie_objects(st);
965
966 simple_mtx_destroy(&st->zombie_sampler_views.mutex);
967 simple_mtx_destroy(&st->zombie_shaders.mutex);
968
969 /* This will free the st_context too, so 'st' must not be accessed
970 * afterwards. */
971 st_destroy_context_priv(st, true);
972 st = NULL;
973
974 _mesa_destroy_debug_output(ctx);
975
976 align_free(ctx);
977
978 if (save_ctx == ctx) {
979 /* unbind the context we just deleted */
980 _mesa_make_current(NULL, NULL, NULL);
981 } else {
982 /* Restore the current context and draw/read buffers (may be NULL) */
983 _mesa_make_current(save_ctx, save_drawbuffer, save_readbuffer);
984 }
985 }
986
987 const struct nir_shader_compiler_options *
st_get_nir_compiler_options(struct st_context * st,gl_shader_stage stage)988 st_get_nir_compiler_options(struct st_context *st, gl_shader_stage stage)
989 {
990 return st->ctx->Const.ShaderCompilerOptions[stage].NirOptions;
991 }
992