1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/accum.h"
30 #include "main/context.h"
31 #include "main/debug_output.h"
32 #include "main/framebuffer.h"
33 #include "main/glthread.h"
34 #include "main/shaderobj.h"
35 #include "main/state.h"
36 #include "main/version.h"
37 #include "main/hash.h"
38 #include "program/prog_cache.h"
39 #include "vbo/vbo.h"
40 #include "glapi/glapi.h"
41 #include "st_manager.h"
42 #include "st_context.h"
43 #include "st_debug.h"
44 #include "st_cb_bitmap.h"
45 #include "st_cb_clear.h"
46 #include "st_cb_drawpixels.h"
47 #include "st_cb_drawtex.h"
48 #include "st_cb_eglimage.h"
49 #include "st_cb_feedback.h"
50 #include "st_cb_flush.h"
51 #include "st_atom.h"
52 #include "st_draw.h"
53 #include "st_extensions.h"
54 #include "st_gen_mipmap.h"
55 #include "st_pbo.h"
56 #include "st_program.h"
57 #include "st_sampler_view.h"
58 #include "st_shader_cache.h"
59 #include "st_texture.h"
60 #include "st_util.h"
61 #include "pipe/p_context.h"
62 #include "util/u_cpu_detect.h"
63 #include "util/u_inlines.h"
64 #include "util/u_upload_mgr.h"
65 #include "util/u_vbuf.h"
66 #include "util/u_memory.h"
67 #include "util/hash_table.h"
68 #include "cso_cache/cso_context.h"
69 #include "compiler/glsl/glsl_parser_extras.h"
70 #include "nir/nir_to_tgsi.h"
71
72 DEBUG_GET_ONCE_BOOL_OPTION(mesa_mvp_dp4, "MESA_MVP_DP4", FALSE)
73
74 static uint64_t
st_get_active_states(struct gl_context * ctx)75 st_get_active_states(struct gl_context *ctx)
76 {
77 struct gl_program *vp = ctx->VertexProgram._Current;
78 struct gl_program *tcp = ctx->TessCtrlProgram._Current;
79 struct gl_program *tep = ctx->TessEvalProgram._Current;
80 struct gl_program *gp = ctx->GeometryProgram._Current;
81 struct gl_program *fp = ctx->FragmentProgram._Current;
82 struct gl_program *cp = ctx->ComputeProgram._Current;
83 uint64_t active_shader_states = 0;
84
85 if (vp)
86 active_shader_states |= vp->affected_states;
87 if (tcp)
88 active_shader_states |= tcp->affected_states;
89 if (tep)
90 active_shader_states |= tep->affected_states;
91 if (gp)
92 active_shader_states |= gp->affected_states;
93 if (fp)
94 active_shader_states |= fp->affected_states;
95 if (cp)
96 active_shader_states |= cp->affected_states;
97
98 /* Mark non-shader-resource shader states as "always active". */
99 return active_shader_states | ~ST_ALL_SHADER_RESOURCES;
100 }
101
102
103 void
st_invalidate_buffers(struct st_context * st)104 st_invalidate_buffers(struct st_context *st)
105 {
106 st->dirty |= ST_NEW_BLEND |
107 ST_NEW_DSA |
108 ST_NEW_FB_STATE |
109 ST_NEW_SAMPLE_STATE |
110 ST_NEW_SAMPLE_SHADING |
111 ST_NEW_FS_STATE |
112 ST_NEW_POLY_STIPPLE |
113 ST_NEW_VIEWPORT |
114 ST_NEW_RASTERIZER |
115 ST_NEW_SCISSOR |
116 ST_NEW_WINDOW_RECTANGLES;
117 }
118
119
120 static inline bool
st_vp_uses_current_values(const struct gl_context * ctx)121 st_vp_uses_current_values(const struct gl_context *ctx)
122 {
123 const uint64_t inputs = ctx->VertexProgram._Current->info.inputs_read;
124 return _mesa_draw_current_bits(ctx) & inputs;
125 }
126
127
128 void
st_invalidate_state(struct gl_context * ctx)129 st_invalidate_state(struct gl_context *ctx)
130 {
131 GLbitfield new_state = ctx->NewState;
132 struct st_context *st = st_context(ctx);
133
134 if (new_state & _NEW_BUFFERS) {
135 st_invalidate_buffers(st);
136 } else {
137 /* These set a subset of flags set by _NEW_BUFFERS, so we only have to
138 * check them when _NEW_BUFFERS isn't set.
139 */
140 if (new_state & _NEW_PROGRAM)
141 st->dirty |= ST_NEW_RASTERIZER;
142
143 if (new_state & _NEW_FOG)
144 st->dirty |= ST_NEW_FS_STATE;
145 }
146
147 if (new_state & (_NEW_LIGHT_STATE |
148 _NEW_POINT))
149 st->dirty |= ST_NEW_RASTERIZER;
150
151 if ((new_state & _NEW_LIGHT_STATE) &&
152 (st->lower_flatshade || st->lower_two_sided_color))
153 st->dirty |= ST_NEW_FS_STATE;
154
155 if (new_state & _NEW_PROJECTION &&
156 st_user_clip_planes_enabled(ctx))
157 st->dirty |= ST_NEW_CLIP_STATE;
158
159 if (new_state & _NEW_POINT && st->lower_texcoord_replace)
160 st->dirty |= ST_NEW_FS_STATE;
161
162 if (new_state & _NEW_PIXEL)
163 st->dirty |= ST_NEW_PIXEL_TRANSFER;
164
165 if (new_state & _NEW_CURRENT_ATTRIB && st_vp_uses_current_values(ctx)) {
166 st->dirty |= ST_NEW_VERTEX_ARRAYS;
167 /* glColor3f -> glColor4f changes the vertex format. */
168 ctx->Array.NewVertexElements = true;
169 }
170
171 /* Update the vertex shader if ctx->Light._ClampVertexColor was changed. */
172 if (st->clamp_vert_color_in_shader && (new_state & _NEW_LIGHT_STATE)) {
173 st->dirty |= ST_NEW_VS_STATE;
174 if (st->ctx->API == API_OPENGL_COMPAT && ctx->Version >= 32) {
175 st->dirty |= ST_NEW_GS_STATE | ST_NEW_TES_STATE;
176 }
177 }
178
179 /* Update the vertex shader if ctx->Point was changed. */
180 if (st->lower_point_size && new_state & _NEW_POINT) {
181 if (ctx->GeometryProgram._Current)
182 st->dirty |= ST_NEW_GS_STATE | ST_NEW_GS_CONSTANTS;
183 else if (ctx->TessEvalProgram._Current)
184 st->dirty |= ST_NEW_TES_STATE | ST_NEW_TES_CONSTANTS;
185 else
186 st->dirty |= ST_NEW_VS_STATE | ST_NEW_VS_CONSTANTS;
187 }
188
189 /* Which shaders are dirty will be determined manually. */
190 if (new_state & _NEW_PROGRAM) {
191 st->gfx_shaders_may_be_dirty = true;
192 st->compute_shader_may_be_dirty = true;
193 /* This will mask out unused shader resources. */
194 st->active_states = st_get_active_states(ctx);
195 }
196
197 if (new_state & _NEW_TEXTURE_OBJECT) {
198 st->dirty |= st->active_states &
199 (ST_NEW_SAMPLER_VIEWS |
200 ST_NEW_SAMPLERS |
201 ST_NEW_IMAGE_UNITS);
202 if (ctx->FragmentProgram._Current) {
203 struct gl_program *fp = ctx->FragmentProgram._Current;
204
205 if (fp->ExternalSamplersUsed || fp->ati_fs)
206 st->dirty |= ST_NEW_FS_STATE;
207 }
208 }
209 }
210
211
212 /*
213 * In some circumstances (such as running google-chrome) the state
214 * tracker may try to delete a resource view from a context different
215 * than when it was created. We don't want to do that.
216 *
217 * In that situation, st_texture_release_all_sampler_views() calls this
218 * function to transfer the sampler view reference to this context (expected
219 * to be the context which created the view.)
220 */
221 void
st_save_zombie_sampler_view(struct st_context * st,struct pipe_sampler_view * view)222 st_save_zombie_sampler_view(struct st_context *st,
223 struct pipe_sampler_view *view)
224 {
225 struct st_zombie_sampler_view_node *entry;
226
227 assert(view->context == st->pipe);
228
229 entry = MALLOC_STRUCT(st_zombie_sampler_view_node);
230 if (!entry)
231 return;
232
233 entry->view = view;
234
235 /* We need a mutex since this function may be called from one thread
236 * while free_zombie_resource_views() is called from another.
237 */
238 simple_mtx_lock(&st->zombie_sampler_views.mutex);
239 list_addtail(&entry->node, &st->zombie_sampler_views.list.node);
240 simple_mtx_unlock(&st->zombie_sampler_views.mutex);
241 }
242
243
244 /*
245 * Since OpenGL shaders may be shared among contexts, we can wind up
246 * with variants of a shader created with different contexts.
247 * When we go to destroy a gallium shader, we want to free it with the
248 * same context that it was created with, unless the driver reports
249 * PIPE_CAP_SHAREABLE_SHADERS = TRUE.
250 */
251 void
st_save_zombie_shader(struct st_context * st,enum pipe_shader_type type,struct pipe_shader_state * shader)252 st_save_zombie_shader(struct st_context *st,
253 enum pipe_shader_type type,
254 struct pipe_shader_state *shader)
255 {
256 struct st_zombie_shader_node *entry;
257
258 /* we shouldn't be here if the driver supports shareable shaders */
259 assert(!st->has_shareable_shaders);
260
261 entry = MALLOC_STRUCT(st_zombie_shader_node);
262 if (!entry)
263 return;
264
265 entry->shader = shader;
266 entry->type = type;
267
268 /* We need a mutex since this function may be called from one thread
269 * while free_zombie_shaders() is called from another.
270 */
271 simple_mtx_lock(&st->zombie_shaders.mutex);
272 list_addtail(&entry->node, &st->zombie_shaders.list.node);
273 simple_mtx_unlock(&st->zombie_shaders.mutex);
274 }
275
276
277 /*
278 * Free any zombie sampler views that may be attached to this context.
279 */
280 static void
free_zombie_sampler_views(struct st_context * st)281 free_zombie_sampler_views(struct st_context *st)
282 {
283 struct st_zombie_sampler_view_node *entry, *next;
284
285 if (list_is_empty(&st->zombie_sampler_views.list.node)) {
286 return;
287 }
288
289 simple_mtx_lock(&st->zombie_sampler_views.mutex);
290
291 LIST_FOR_EACH_ENTRY_SAFE(entry, next,
292 &st->zombie_sampler_views.list.node, node) {
293 list_del(&entry->node); // remove this entry from the list
294
295 assert(entry->view->context == st->pipe);
296 pipe_sampler_view_reference(&entry->view, NULL);
297
298 free(entry);
299 }
300
301 assert(list_is_empty(&st->zombie_sampler_views.list.node));
302
303 simple_mtx_unlock(&st->zombie_sampler_views.mutex);
304 }
305
306
307 /*
308 * Free any zombie shaders that may be attached to this context.
309 */
310 static void
free_zombie_shaders(struct st_context * st)311 free_zombie_shaders(struct st_context *st)
312 {
313 struct st_zombie_shader_node *entry, *next;
314
315 if (list_is_empty(&st->zombie_shaders.list.node)) {
316 return;
317 }
318
319 simple_mtx_lock(&st->zombie_shaders.mutex);
320
321 LIST_FOR_EACH_ENTRY_SAFE(entry, next,
322 &st->zombie_shaders.list.node, node) {
323 list_del(&entry->node); // remove this entry from the list
324
325 switch (entry->type) {
326 case PIPE_SHADER_VERTEX:
327 st->pipe->bind_vs_state(st->pipe, NULL);
328 st->pipe->delete_vs_state(st->pipe, entry->shader);
329 break;
330 case PIPE_SHADER_FRAGMENT:
331 st->pipe->bind_fs_state(st->pipe, NULL);
332 st->pipe->delete_fs_state(st->pipe, entry->shader);
333 break;
334 case PIPE_SHADER_GEOMETRY:
335 st->pipe->bind_gs_state(st->pipe, NULL);
336 st->pipe->delete_gs_state(st->pipe, entry->shader);
337 break;
338 case PIPE_SHADER_TESS_CTRL:
339 st->pipe->bind_tcs_state(st->pipe, NULL);
340 st->pipe->delete_tcs_state(st->pipe, entry->shader);
341 break;
342 case PIPE_SHADER_TESS_EVAL:
343 st->pipe->bind_tes_state(st->pipe, NULL);
344 st->pipe->delete_tes_state(st->pipe, entry->shader);
345 break;
346 case PIPE_SHADER_COMPUTE:
347 st->pipe->bind_compute_state(st->pipe, NULL);
348 st->pipe->delete_compute_state(st->pipe, entry->shader);
349 break;
350 default:
351 unreachable("invalid shader type in free_zombie_shaders()");
352 }
353 free(entry);
354 }
355
356 assert(list_is_empty(&st->zombie_shaders.list.node));
357
358 simple_mtx_unlock(&st->zombie_shaders.mutex);
359 }
360
361
362 /*
363 * This function is called periodically to free any zombie objects
364 * which are attached to this context.
365 */
366 void
st_context_free_zombie_objects(struct st_context * st)367 st_context_free_zombie_objects(struct st_context *st)
368 {
369 free_zombie_sampler_views(st);
370 free_zombie_shaders(st);
371 }
372
373
374 static void
st_destroy_context_priv(struct st_context * st,bool destroy_pipe)375 st_destroy_context_priv(struct st_context *st, bool destroy_pipe)
376 {
377 st_destroy_atoms(st);
378 st_destroy_draw(st);
379 st_destroy_clear(st);
380 st_destroy_bitmap(st);
381 st_destroy_drawpix(st);
382 st_destroy_drawtex(st);
383 st_destroy_pbo_helpers(st);
384 st_destroy_bound_texture_handles(st);
385 st_destroy_bound_image_handles(st);
386
387 /* free glReadPixels cache data */
388 st_invalidate_readpix_cache(st);
389 util_throttle_deinit(st->screen, &st->throttle);
390
391 cso_destroy_context(st->cso_context);
392
393 if (st->pipe && destroy_pipe)
394 st->pipe->destroy(st->pipe);
395
396 FREE(st);
397 }
398
399
400 static void
st_init_driver_flags(struct st_context * st)401 st_init_driver_flags(struct st_context *st)
402 {
403 struct gl_driver_flags *f = &st->ctx->DriverFlags;
404
405 /* Shader resources */
406 if (st->has_hw_atomics)
407 f->NewAtomicBuffer = ST_NEW_HW_ATOMICS | ST_NEW_CS_ATOMICS;
408 else
409 f->NewAtomicBuffer = ST_NEW_ATOMIC_BUFFER;
410
411 f->NewShaderConstants[MESA_SHADER_VERTEX] = ST_NEW_VS_CONSTANTS;
412 f->NewShaderConstants[MESA_SHADER_TESS_CTRL] = ST_NEW_TCS_CONSTANTS;
413 f->NewShaderConstants[MESA_SHADER_TESS_EVAL] = ST_NEW_TES_CONSTANTS;
414 f->NewShaderConstants[MESA_SHADER_GEOMETRY] = ST_NEW_GS_CONSTANTS;
415 f->NewShaderConstants[MESA_SHADER_FRAGMENT] = ST_NEW_FS_CONSTANTS;
416 f->NewShaderConstants[MESA_SHADER_COMPUTE] = ST_NEW_CS_CONSTANTS;
417
418 if (st->lower_alpha_test)
419 f->NewAlphaTest = ST_NEW_FS_STATE | ST_NEW_FS_CONSTANTS;
420 else
421 f->NewAlphaTest = ST_NEW_DSA;
422
423 f->NewMultisampleEnable = ST_NEW_BLEND | ST_NEW_RASTERIZER |
424 ST_NEW_SAMPLE_STATE | ST_NEW_SAMPLE_SHADING;
425 f->NewSampleShading = ST_NEW_SAMPLE_SHADING;
426
427 /* This depends on what the gallium driver wants. */
428 if (st->force_persample_in_shader) {
429 f->NewMultisampleEnable |= ST_NEW_FS_STATE;
430 f->NewSampleShading |= ST_NEW_FS_STATE;
431 } else {
432 f->NewSampleShading |= ST_NEW_RASTERIZER;
433 }
434
435 if (st->clamp_frag_color_in_shader) {
436 f->NewFragClamp = ST_NEW_FS_STATE;
437 } else {
438 f->NewFragClamp = ST_NEW_RASTERIZER;
439 }
440
441 f->NewClipPlaneEnable = ST_NEW_RASTERIZER;
442 if (st->lower_ucp)
443 f->NewClipPlaneEnable |= ST_NEW_VS_STATE | ST_NEW_GS_STATE | ST_NEW_TES_STATE;
444
445 if (st->emulate_gl_clamp)
446 f->NewSamplersWithClamp = ST_NEW_SAMPLERS |
447 ST_NEW_VS_STATE | ST_NEW_TCS_STATE |
448 ST_NEW_TES_STATE | ST_NEW_GS_STATE |
449 ST_NEW_FS_STATE | ST_NEW_CS_STATE;
450
451 if (!st->has_hw_atomics && st->ctx->Const.ShaderStorageBufferOffsetAlignment > 4)
452 f->NewAtomicBuffer |= ST_NEW_CONSTANTS;
453 }
454
455 static bool
st_have_perfmon(struct st_context * st)456 st_have_perfmon(struct st_context *st)
457 {
458 struct pipe_screen *screen = st->screen;
459
460 if (!screen->get_driver_query_info || !screen->get_driver_query_group_info)
461 return false;
462
463 return screen->get_driver_query_group_info(screen, 0, NULL) != 0;
464 }
465
466 static bool
st_have_perfquery(struct st_context * ctx)467 st_have_perfquery(struct st_context *ctx)
468 {
469 struct pipe_context *pipe = ctx->pipe;
470
471 return pipe->init_intel_perf_query_info && pipe->get_intel_perf_query_info &&
472 pipe->get_intel_perf_query_counter_info &&
473 pipe->new_intel_perf_query_obj && pipe->begin_intel_perf_query &&
474 pipe->end_intel_perf_query && pipe->delete_intel_perf_query &&
475 pipe->wait_intel_perf_query && pipe->is_intel_perf_query_ready &&
476 pipe->get_intel_perf_query_data;
477 }
478
479 static struct st_context *
st_create_context_priv(struct gl_context * ctx,struct pipe_context * pipe,const struct st_config_options * options)480 st_create_context_priv(struct gl_context *ctx, struct pipe_context *pipe,
481 const struct st_config_options *options)
482 {
483 struct pipe_screen *screen = pipe->screen;
484 struct st_context *st = CALLOC_STRUCT( st_context);
485
486 st->options = *options;
487
488 ctx->st_opts = &st->options;
489 ctx->st = st;
490
491 st->ctx = ctx;
492 st->screen = screen;
493 st->pipe = pipe;
494 st->dirty = ST_ALL_STATES_MASK;
495
496 st->can_bind_const_buffer_as_vertex =
497 screen->get_param(screen, PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX);
498
499 /* st/mesa always uploads zero-stride vertex attribs, and other user
500 * vertex buffers are only possible with a compatibility profile.
501 * So tell the u_vbuf module that user VBOs are not possible with the Core
502 * profile, so that u_vbuf is bypassed completely if there is nothing else
503 * to do.
504 */
505 unsigned cso_flags;
506 switch (ctx->API) {
507 case API_OPENGL_CORE:
508 cso_flags = CSO_NO_USER_VERTEX_BUFFERS;
509 break;
510 case API_OPENGLES:
511 case API_OPENGLES2:
512 cso_flags = CSO_NO_64B_VERTEX_BUFFERS;
513 break;
514 default:
515 cso_flags = 0;
516 break;
517 }
518
519 st->cso_context = cso_create_context(pipe, cso_flags);
520 ctx->cso_context = st->cso_context;
521
522 st_init_atoms(st);
523 st_init_clear(st);
524 {
525 enum pipe_texture_transfer_mode val = screen->get_param(screen, PIPE_CAP_TEXTURE_TRANSFER_MODES);
526 st->prefer_blit_based_texture_transfer = (val & PIPE_TEXTURE_TRANSFER_BLIT) != 0;
527 st->allow_compute_based_texture_transfer = (val & PIPE_TEXTURE_TRANSFER_COMPUTE) != 0;
528 }
529 st_init_pbo_helpers(st);
530
531 /* Choose texture target for glDrawPixels, glBitmap, renderbuffers */
532 if (screen->get_param(screen, PIPE_CAP_NPOT_TEXTURES))
533 st->internal_target = PIPE_TEXTURE_2D;
534 else
535 st->internal_target = PIPE_TEXTURE_RECT;
536
537 /* Setup vertex element info for 'struct st_util_vertex'.
538 */
539 {
540 STATIC_ASSERT(sizeof(struct st_util_vertex) == 9 * sizeof(float));
541
542 memset(&st->util_velems, 0, sizeof(st->util_velems));
543 st->util_velems.velems[0].src_offset = 0;
544 st->util_velems.velems[0].vertex_buffer_index = 0;
545 st->util_velems.velems[0].src_format = PIPE_FORMAT_R32G32B32_FLOAT;
546 st->util_velems.velems[1].src_offset = 3 * sizeof(float);
547 st->util_velems.velems[1].vertex_buffer_index = 0;
548 st->util_velems.velems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
549 st->util_velems.velems[2].src_offset = 7 * sizeof(float);
550 st->util_velems.velems[2].vertex_buffer_index = 0;
551 st->util_velems.velems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
552 }
553
554 ctx->Const.PackedDriverUniformStorage =
555 screen->get_param(screen, PIPE_CAP_PACKED_UNIFORMS);
556
557 ctx->Const.BitmapUsesRed =
558 screen->is_format_supported(screen, PIPE_FORMAT_R8_UNORM,
559 PIPE_TEXTURE_2D, 0, 0,
560 PIPE_BIND_SAMPLER_VIEW);
561
562 st->has_stencil_export =
563 screen->get_param(screen, PIPE_CAP_SHADER_STENCIL_EXPORT);
564 st->has_etc1 = screen->is_format_supported(screen, PIPE_FORMAT_ETC1_RGB8,
565 PIPE_TEXTURE_2D, 0, 0,
566 PIPE_BIND_SAMPLER_VIEW);
567 st->has_etc2 = screen->is_format_supported(screen, PIPE_FORMAT_ETC2_RGB8,
568 PIPE_TEXTURE_2D, 0, 0,
569 PIPE_BIND_SAMPLER_VIEW);
570 st->transcode_etc = options->transcode_etc &&
571 screen->is_format_supported(screen, PIPE_FORMAT_DXT1_SRGBA,
572 PIPE_TEXTURE_2D, 0, 0,
573 PIPE_BIND_SAMPLER_VIEW);
574 st->transcode_astc = options->transcode_astc &&
575 screen->is_format_supported(screen, PIPE_FORMAT_DXT5_SRGBA,
576 PIPE_TEXTURE_2D, 0, 0,
577 PIPE_BIND_SAMPLER_VIEW) &&
578 screen->is_format_supported(screen, PIPE_FORMAT_DXT5_RGBA,
579 PIPE_TEXTURE_2D, 0, 0,
580 PIPE_BIND_SAMPLER_VIEW);
581 st->has_astc_2d_ldr =
582 screen->is_format_supported(screen, PIPE_FORMAT_ASTC_4x4_SRGB,
583 PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW);
584 st->has_astc_5x5_ldr =
585 screen->is_format_supported(screen, PIPE_FORMAT_ASTC_5x5_SRGB,
586 PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW);
587 st->force_persample_in_shader =
588 screen->get_param(screen, PIPE_CAP_SAMPLE_SHADING) &&
589 !screen->get_param(screen, PIPE_CAP_FORCE_PERSAMPLE_INTERP);
590 st->has_shareable_shaders = screen->get_param(screen,
591 PIPE_CAP_SHAREABLE_SHADERS);
592 st->needs_texcoord_semantic =
593 screen->get_param(screen, PIPE_CAP_TGSI_TEXCOORD);
594 st->apply_texture_swizzle_to_border_color =
595 !!(screen->get_param(screen, PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK) &
596 (PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50 |
597 PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600));
598 st->use_format_with_border_color =
599 !!(screen->get_param(screen, PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK) &
600 PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_FREEDRENO);
601 st->emulate_gl_clamp =
602 !screen->get_param(screen, PIPE_CAP_GL_CLAMP);
603 st->texture_buffer_sampler =
604 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_SAMPLER);
605 st->has_time_elapsed =
606 screen->get_param(screen, PIPE_CAP_QUERY_TIME_ELAPSED);
607 st->has_half_float_packing =
608 screen->get_param(screen, PIPE_CAP_SHADER_PACK_HALF_FLOAT);
609 st->has_multi_draw_indirect =
610 screen->get_param(screen, PIPE_CAP_MULTI_DRAW_INDIRECT);
611 st->has_indirect_partial_stride =
612 screen->get_param(screen, PIPE_CAP_MULTI_DRAW_INDIRECT_PARTIAL_STRIDE);
613 st->has_single_pipe_stat =
614 screen->get_param(screen, PIPE_CAP_QUERY_PIPELINE_STATISTICS_SINGLE);
615 st->has_indep_blend_func =
616 screen->get_param(screen, PIPE_CAP_INDEP_BLEND_FUNC);
617 st->needs_rgb_dst_alpha_override =
618 screen->get_param(screen, PIPE_CAP_RGB_OVERRIDE_DST_ALPHA_BLEND);
619 st->can_dither =
620 screen->get_param(screen, PIPE_CAP_DITHERING);
621 st->lower_flatshade =
622 !screen->get_param(screen, PIPE_CAP_FLATSHADE);
623 st->lower_alpha_test =
624 !screen->get_param(screen, PIPE_CAP_ALPHA_TEST);
625 st->lower_point_size =
626 !screen->get_param(screen, PIPE_CAP_POINT_SIZE_FIXED);
627 st->lower_two_sided_color =
628 !screen->get_param(screen, PIPE_CAP_TWO_SIDED_COLOR);
629 st->lower_ucp =
630 !screen->get_param(screen, PIPE_CAP_CLIP_PLANES);
631 st->prefer_real_buffer_in_constbuf0 =
632 screen->get_param(screen, PIPE_CAP_PREFER_REAL_BUFFER_IN_CONSTBUF0);
633 st->has_conditional_render =
634 screen->get_param(screen, PIPE_CAP_CONDITIONAL_RENDER);
635 st->lower_texcoord_replace =
636 !screen->get_param(screen, PIPE_CAP_POINT_SPRITE);
637 st->lower_rect_tex =
638 !screen->get_param(screen, PIPE_CAP_TEXRECT);
639 st->allow_st_finalize_nir_twice = screen->finalize_nir != NULL;
640
641 st->has_hw_atomics =
642 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
643 PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS)
644 ? true : false;
645
646 util_throttle_init(&st->throttle,
647 screen->get_param(screen,
648 PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET));
649
650 /* GL limits and extensions */
651 st_init_limits(screen, &ctx->Const, &ctx->Extensions);
652 st_init_extensions(screen, &ctx->Const,
653 &ctx->Extensions, &st->options, ctx->API);
654
655 if (st_have_perfmon(st)) {
656 ctx->Extensions.AMD_performance_monitor = GL_TRUE;
657 }
658
659 if (st_have_perfquery(st)) {
660 ctx->Extensions.INTEL_performance_query = GL_TRUE;
661 }
662
663 /* Enable shader-based fallbacks for ARB_color_buffer_float if needed. */
664 if (screen->get_param(screen, PIPE_CAP_VERTEX_COLOR_UNCLAMPED)) {
665 if (!screen->get_param(screen, PIPE_CAP_VERTEX_COLOR_CLAMPED)) {
666 st->clamp_vert_color_in_shader = GL_TRUE;
667 }
668
669 if (!screen->get_param(screen, PIPE_CAP_FRAGMENT_COLOR_CLAMPED)) {
670 st->clamp_frag_color_in_shader = GL_TRUE;
671 }
672
673 /* For drivers which cannot do color clamping, it's better to just
674 * disable ARB_color_buffer_float in the core profile, because
675 * the clamping is deprecated there anyway. */
676 if (ctx->API == API_OPENGL_CORE &&
677 (st->clamp_frag_color_in_shader || st->clamp_vert_color_in_shader)) {
678 st->clamp_vert_color_in_shader = GL_FALSE;
679 st->clamp_frag_color_in_shader = GL_FALSE;
680 ctx->Extensions.ARB_color_buffer_float = GL_FALSE;
681 }
682 }
683
684 /* called after _mesa_create_context/_mesa_init_point, fix default user
685 * settable max point size up
686 */
687 ctx->Point.MaxSize = MAX2(ctx->Const.MaxPointSize,
688 ctx->Const.MaxPointSizeAA);
689
690 ctx->Const.PointCoordOriginUpperLeft =
691 screen->get_param(screen, PIPE_CAP_POINT_COORD_ORIGIN_UPPER_LEFT);
692
693 ctx->Const.NoClippingOnCopyTex = screen->get_param(screen,
694 PIPE_CAP_NO_CLIP_ON_COPY_TEX);
695
696 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].PositionAlwaysInvariant = options->vs_position_always_invariant;
697
698 ctx->Const.ShaderCompilerOptions[MESA_SHADER_TESS_EVAL].PositionAlwaysPrecise = options->vs_position_always_precise;
699
700 /* NIR drivers that support tess shaders and compact arrays need to use
701 * GLSLTessLevelsAsInputs / PIPE_CAP_GLSL_TESS_LEVELS_AS_INPUTS. The NIR
702 * linker doesn't support linking these as compat arrays of sysvals.
703 */
704 assert(ctx->Const.GLSLTessLevelsAsInputs ||
705 !screen->get_param(screen, PIPE_CAP_NIR_COMPACT_ARRAYS) ||
706 !ctx->Extensions.ARB_tessellation_shader);
707
708 /* Set which shader types can be compiled at link time. */
709 st->shader_has_one_variant[MESA_SHADER_VERTEX] =
710 st->has_shareable_shaders &&
711 !st->clamp_vert_color_in_shader &&
712 !st->lower_point_size &&
713 !st->lower_ucp;
714
715 st->shader_has_one_variant[MESA_SHADER_FRAGMENT] =
716 st->has_shareable_shaders &&
717 !st->lower_flatshade &&
718 !st->lower_alpha_test &&
719 !st->clamp_frag_color_in_shader &&
720 !st->force_persample_in_shader &&
721 !st->lower_two_sided_color &&
722 !st->lower_texcoord_replace;
723
724 st->shader_has_one_variant[MESA_SHADER_TESS_CTRL] = st->has_shareable_shaders;
725 st->shader_has_one_variant[MESA_SHADER_TESS_EVAL] =
726 st->has_shareable_shaders &&
727 !st->clamp_vert_color_in_shader &&
728 !st->lower_point_size &&
729 !st->lower_ucp;
730
731 st->shader_has_one_variant[MESA_SHADER_GEOMETRY] =
732 st->has_shareable_shaders &&
733 !st->clamp_vert_color_in_shader &&
734 !st->lower_point_size &&
735 !st->lower_ucp;
736 st->shader_has_one_variant[MESA_SHADER_COMPUTE] = st->has_shareable_shaders;
737
738 if (util_get_cpu_caps()->num_L3_caches == 1 ||
739 !st->pipe->set_context_param)
740 st->pin_thread_counter = ST_L3_PINNING_DISABLED;
741
742 st->bitmap.cache.empty = true;
743
744 if (ctx->Const.ForceGLNamesReuse && ctx->Shared->RefCount == 1) {
745 _mesa_HashEnableNameReuse(ctx->Shared->TexObjects);
746 _mesa_HashEnableNameReuse(ctx->Shared->ShaderObjects);
747 _mesa_HashEnableNameReuse(ctx->Shared->BufferObjects);
748 _mesa_HashEnableNameReuse(ctx->Shared->SamplerObjects);
749 _mesa_HashEnableNameReuse(ctx->Shared->FrameBuffers);
750 _mesa_HashEnableNameReuse(ctx->Shared->RenderBuffers);
751 _mesa_HashEnableNameReuse(ctx->Shared->MemoryObjects);
752 _mesa_HashEnableNameReuse(ctx->Shared->SemaphoreObjects);
753 }
754 /* SPECviewperf13/sw-04 crashes since a56849ddda6 if Mesa is build with
755 * -O3 on gcc 7.5, which doesn't happen with ForceGLNamesReuse, which is
756 * the default setting for SPECviewperf because it simulates glGen behavior
757 * of closed source drivers.
758 */
759 if (ctx->Const.ForceGLNamesReuse)
760 _mesa_HashEnableNameReuse(ctx->Query.QueryObjects);
761
762 _mesa_override_extensions(ctx);
763 _mesa_compute_version(ctx);
764
765 if (ctx->Version == 0) {
766 /* This can happen when a core profile was requested, but the driver
767 * does not support some features of GL 3.1 or later.
768 */
769 st_destroy_context_priv(st, false);
770 return NULL;
771 }
772
773 /* This must be done after extensions are initialized to enable persistent
774 * mappings immediately.
775 */
776 _vbo_CreateContext(ctx);
777
778 _mesa_initialize_dispatch_tables(ctx);
779 st_init_driver_flags(st);
780
781 /* Initialize context's winsys buffers list */
782 list_inithead(&st->winsys_buffers);
783
784 list_inithead(&st->zombie_sampler_views.list.node);
785 simple_mtx_init(&st->zombie_sampler_views.mutex, mtx_plain);
786 list_inithead(&st->zombie_shaders.list.node);
787 simple_mtx_init(&st->zombie_shaders.mutex, mtx_plain);
788
789 ctx->Const.DriverSupportedPrimMask = screen->get_param(screen, PIPE_CAP_SUPPORTED_PRIM_MODES) |
790 /* patches is always supported */
791 BITFIELD_BIT(PIPE_PRIM_PATCHES);
792
793 return st;
794 }
795
796 void
st_set_background_context(struct gl_context * ctx,struct util_queue_monitoring * queue_info)797 st_set_background_context(struct gl_context *ctx,
798 struct util_queue_monitoring *queue_info)
799 {
800 struct st_context *st = ctx->st;
801 struct st_manager *smapi =
802 (struct st_manager *) st->iface.st_context_private;
803
804 assert(smapi->set_background_context);
805 smapi->set_background_context(&st->iface, queue_info);
806 }
807
808 static void
st_init_driver_functions(struct pipe_screen * screen,struct dd_function_table * functions,bool has_egl_image_validate)809 st_init_driver_functions(struct pipe_screen *screen,
810 struct dd_function_table *functions,
811 bool has_egl_image_validate)
812 {
813 st_init_draw_functions(screen, functions);
814
815 st_init_eglimage_functions(functions, has_egl_image_validate);
816
817 functions->NewProgram = _mesa_new_program;
818 st_init_flush_functions(screen, functions);
819
820 /* GL_ARB_get_program_binary */
821 functions->ShaderCacheSerializeDriverBlob = st_serialise_nir_program;
822 functions->ProgramBinarySerializeDriverBlob =
823 st_serialise_nir_program_binary;
824 functions->ProgramBinaryDeserializeDriverBlob =
825 st_deserialise_nir_program;
826 }
827
828
829 struct st_context *
st_create_context(gl_api api,struct pipe_context * pipe,const struct gl_config * visual,struct st_context * share,const struct st_config_options * options,bool no_error,bool has_egl_image_validate)830 st_create_context(gl_api api, struct pipe_context *pipe,
831 const struct gl_config *visual,
832 struct st_context *share,
833 const struct st_config_options *options,
834 bool no_error, bool has_egl_image_validate)
835 {
836 struct gl_context *ctx;
837 struct gl_context *shareCtx = share ? share->ctx : NULL;
838 struct dd_function_table funcs;
839 struct st_context *st;
840
841 memset(&funcs, 0, sizeof(funcs));
842 st_init_driver_functions(pipe->screen, &funcs, has_egl_image_validate);
843
844 /* gl_context must be 16-byte aligned due to the alignment on GLmatrix. */
845 ctx = align_malloc(sizeof(struct gl_context), 16);
846 if (!ctx)
847 return NULL;
848 memset(ctx, 0, sizeof(*ctx));
849
850 ctx->pipe = pipe;
851 ctx->screen = pipe->screen;
852
853 if (!_mesa_initialize_context(ctx, api, no_error, visual, shareCtx, &funcs)) {
854 align_free(ctx);
855 return NULL;
856 }
857
858 st_debug_init();
859
860 if (pipe->screen->get_disk_shader_cache)
861 ctx->Cache = pipe->screen->get_disk_shader_cache(pipe->screen);
862
863 /* XXX: need a capability bit in gallium to query if the pipe
864 * driver prefers DP4 or MUL/MAD for vertex transformation.
865 */
866 if (debug_get_option_mesa_mvp_dp4())
867 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = GL_TRUE;
868
869 if (pipe->screen->get_param(pipe->screen, PIPE_CAP_INVALIDATE_BUFFER))
870 ctx->has_invalidate_buffer = true;
871
872 if (pipe->screen->get_param(pipe->screen, PIPE_CAP_STRING_MARKER))
873 ctx->has_string_marker = true;
874
875 st = st_create_context_priv(ctx, pipe, options);
876 if (!st) {
877 _mesa_free_context_data(ctx, true);
878 align_free(ctx);
879 }
880
881 return st;
882 }
883
884
885 /**
886 * When we destroy a context, we must examine all texture objects to
887 * find/release any sampler views created by that context.
888 *
889 * This callback is called per-texture object. It releases all the
890 * texture's sampler views which belong to the context.
891 */
892 static void
destroy_tex_sampler_cb(void * data,void * userData)893 destroy_tex_sampler_cb(void *data, void *userData)
894 {
895 struct gl_texture_object *texObj = (struct gl_texture_object *) data;
896 struct st_context *st = (struct st_context *) userData;
897
898 st_texture_release_context_sampler_view(st, texObj);
899 }
900
901 static void
destroy_framebuffer_attachment_sampler_cb(void * data,void * userData)902 destroy_framebuffer_attachment_sampler_cb(void *data, void *userData)
903 {
904 struct gl_framebuffer* glfb = (struct gl_framebuffer*) data;
905 struct st_context *st = (struct st_context *) userData;
906
907 for (unsigned i = 0; i < BUFFER_COUNT; i++) {
908 struct gl_renderbuffer_attachment *att = &glfb->Attachment[i];
909 if (att->Texture) {
910 st_texture_release_context_sampler_view(st, att->Texture);
911 }
912 }
913 }
914
915 void
st_destroy_context(struct st_context * st)916 st_destroy_context(struct st_context *st)
917 {
918 struct gl_context *ctx = st->ctx;
919 struct gl_framebuffer *stfb, *next;
920 struct gl_framebuffer *save_drawbuffer;
921 struct gl_framebuffer *save_readbuffer;
922
923 /* Save the current context and draw/read buffers*/
924 GET_CURRENT_CONTEXT(save_ctx);
925 if (save_ctx) {
926 save_drawbuffer = save_ctx->WinSysDrawBuffer;
927 save_readbuffer = save_ctx->WinSysReadBuffer;
928 } else {
929 save_drawbuffer = save_readbuffer = NULL;
930 }
931
932 /*
933 * We need to bind the context we're deleting so that
934 * _mesa_reference_texobj_() uses this context when deleting textures.
935 * Similarly for framebuffer objects, etc.
936 */
937 _mesa_make_current(ctx, NULL, NULL);
938
939 /* This must be called first so that glthread has a chance to finish */
940 _mesa_glthread_destroy(ctx, NULL);
941
942 _mesa_HashWalk(ctx->Shared->TexObjects, destroy_tex_sampler_cb, st);
943
944 /* For the fallback textures, free any sampler views belonging to this
945 * context.
946 */
947 for (unsigned i = 0; i < NUM_TEXTURE_TARGETS; i++) {
948 struct gl_texture_object *stObj =
949 ctx->Shared->FallbackTex[i];
950 if (stObj) {
951 st_texture_release_context_sampler_view(st, stObj);
952 }
953 }
954
955 st_context_free_zombie_objects(st);
956
957 simple_mtx_destroy(&st->zombie_sampler_views.mutex);
958 simple_mtx_destroy(&st->zombie_shaders.mutex);
959
960 st_release_program(st, &st->fp);
961 st_release_program(st, &st->gp);
962 st_release_program(st, &st->vp);
963 st_release_program(st, &st->tcp);
964 st_release_program(st, &st->tep);
965 st_release_program(st, &st->cp);
966
967 if (st->hw_select_shaders) {
968 hash_table_foreach(st->hw_select_shaders, entry)
969 st->pipe->delete_gs_state(st->pipe, entry->data);
970 _mesa_hash_table_destroy(st->hw_select_shaders, NULL);
971 }
972
973 /* release framebuffer in the winsys buffers list */
974 LIST_FOR_EACH_ENTRY_SAFE_REV(stfb, next, &st->winsys_buffers, head) {
975 _mesa_reference_framebuffer(&stfb, NULL);
976 }
977
978 _mesa_HashWalk(ctx->Shared->FrameBuffers, destroy_framebuffer_attachment_sampler_cb, st);
979
980 pipe_sampler_view_reference(&st->pixel_xfer.pixelmap_sampler_view, NULL);
981 pipe_resource_reference(&st->pixel_xfer.pixelmap_texture, NULL);
982
983 _vbo_DestroyContext(ctx);
984
985 st_destroy_program_variants(st);
986
987 /* Do not release debug_output yet because it might be in use by other threads.
988 * These threads will be terminated by _mesa_free_context_data and
989 * st_destroy_context_priv.
990 */
991 _mesa_free_context_data(ctx, false);
992
993 /* This will free the st_context too, so 'st' must not be accessed
994 * afterwards. */
995 st_destroy_context_priv(st, true);
996 st = NULL;
997
998 _mesa_destroy_debug_output(ctx);
999
1000 align_free(ctx);
1001
1002 if (save_ctx == ctx) {
1003 /* unbind the context we just deleted */
1004 _mesa_make_current(NULL, NULL, NULL);
1005 } else {
1006 /* Restore the current context and draw/read buffers (may be NULL) */
1007 _mesa_make_current(save_ctx, save_drawbuffer, save_readbuffer);
1008 }
1009 }
1010
1011 const struct nir_shader_compiler_options *
st_get_nir_compiler_options(struct st_context * st,gl_shader_stage stage)1012 st_get_nir_compiler_options(struct st_context *st, gl_shader_stage stage)
1013 {
1014 const struct nir_shader_compiler_options *options =
1015 st->ctx->Const.ShaderCompilerOptions[stage].NirOptions;
1016
1017 if (options) {
1018 return options;
1019 } else {
1020 return nir_to_tgsi_get_compiler_options(st->screen,
1021 PIPE_SHADER_IR_NIR,
1022 pipe_shader_type_from_mesa(stage));
1023 }
1024 }
1025