1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/accum.h"
30 #include "main/api_exec.h"
31 #include "main/context.h"
32 #include "main/debug_output.h"
33 #include "main/glthread.h"
34 #include "main/samplerobj.h"
35 #include "main/shaderobj.h"
36 #include "main/state.h"
37 #include "main/version.h"
38 #include "main/vtxfmt.h"
39 #include "main/hash.h"
40 #include "program/prog_cache.h"
41 #include "vbo/vbo.h"
42 #include "glapi/glapi.h"
43 #include "st_manager.h"
44 #include "st_context.h"
45 #include "st_debug.h"
46 #include "st_cb_bitmap.h"
47 #include "st_cb_blit.h"
48 #include "st_cb_bufferobjects.h"
49 #include "st_cb_clear.h"
50 #include "st_cb_compute.h"
51 #include "st_cb_condrender.h"
52 #include "st_cb_copyimage.h"
53 #include "st_cb_drawpixels.h"
54 #include "st_cb_rasterpos.h"
55 #include "st_cb_drawtex.h"
56 #include "st_cb_eglimage.h"
57 #include "st_cb_fbo.h"
58 #include "st_cb_feedback.h"
59 #include "st_cb_memoryobjects.h"
60 #include "st_cb_msaa.h"
61 #include "st_cb_perfmon.h"
62 #include "st_cb_perfquery.h"
63 #include "st_cb_program.h"
64 #include "st_cb_queryobj.h"
65 #include "st_cb_readpixels.h"
66 #include "st_cb_semaphoreobjects.h"
67 #include "st_cb_texture.h"
68 #include "st_cb_xformfb.h"
69 #include "st_cb_flush.h"
70 #include "st_cb_syncobj.h"
71 #include "st_cb_strings.h"
72 #include "st_cb_texturebarrier.h"
73 #include "st_cb_viewport.h"
74 #include "st_atom.h"
75 #include "st_draw.h"
76 #include "st_extensions.h"
77 #include "st_gen_mipmap.h"
78 #include "st_pbo.h"
79 #include "st_program.h"
80 #include "st_sampler_view.h"
81 #include "st_shader_cache.h"
82 #include "st_vdpau.h"
83 #include "st_texture.h"
84 #include "st_util.h"
85 #include "pipe/p_context.h"
86 #include "util/u_cpu_detect.h"
87 #include "util/u_inlines.h"
88 #include "util/u_upload_mgr.h"
89 #include "util/u_vbuf.h"
90 #include "util/u_memory.h"
91 #include "cso_cache/cso_context.h"
92 #include "compiler/glsl/glsl_parser_extras.h"
93 #include "nir/nir_to_tgsi.h"
94
95 DEBUG_GET_ONCE_BOOL_OPTION(mesa_mvp_dp4, "MESA_MVP_DP4", FALSE)
96
97
98 /**
99 * Called via ctx->Driver.Enable()
100 */
101 static void
st_Enable(struct gl_context * ctx,GLenum cap,UNUSED GLboolean state)102 st_Enable(struct gl_context *ctx, GLenum cap, UNUSED GLboolean state)
103 {
104 struct st_context *st = st_context(ctx);
105
106 switch (cap) {
107 case GL_DEBUG_OUTPUT:
108 case GL_DEBUG_OUTPUT_SYNCHRONOUS:
109 st_update_debug_callback(st);
110 break;
111 case GL_BLACKHOLE_RENDER_INTEL:
112 st->pipe->set_frontend_noop(st->pipe, ctx->IntelBlackholeRender);
113 break;
114 default:
115 break;
116 }
117 }
118
119
120 /**
121 * Called via ctx->Driver.QueryMemoryInfo()
122 */
123 static void
st_query_memory_info(struct gl_context * ctx,struct gl_memory_info * out)124 st_query_memory_info(struct gl_context *ctx, struct gl_memory_info *out)
125 {
126 struct pipe_screen *screen = st_context(ctx)->screen;
127 struct pipe_memory_info info;
128
129 assert(screen->query_memory_info);
130 if (!screen->query_memory_info)
131 return;
132
133 screen->query_memory_info(screen, &info);
134
135 out->total_device_memory = info.total_device_memory;
136 out->avail_device_memory = info.avail_device_memory;
137 out->total_staging_memory = info.total_staging_memory;
138 out->avail_staging_memory = info.avail_staging_memory;
139 out->device_memory_evicted = info.device_memory_evicted;
140 out->nr_device_memory_evictions = info.nr_device_memory_evictions;
141 }
142
143
144 static uint64_t
st_get_active_states(struct gl_context * ctx)145 st_get_active_states(struct gl_context *ctx)
146 {
147 struct st_program *vp =
148 st_program(ctx->VertexProgram._Current);
149 struct st_program *tcp =
150 st_program(ctx->TessCtrlProgram._Current);
151 struct st_program *tep =
152 st_program(ctx->TessEvalProgram._Current);
153 struct st_program *gp =
154 st_program(ctx->GeometryProgram._Current);
155 struct st_program *fp =
156 st_program(ctx->FragmentProgram._Current);
157 struct st_program *cp =
158 st_program(ctx->ComputeProgram._Current);
159 uint64_t active_shader_states = 0;
160
161 if (vp)
162 active_shader_states |= vp->affected_states;
163 if (tcp)
164 active_shader_states |= tcp->affected_states;
165 if (tep)
166 active_shader_states |= tep->affected_states;
167 if (gp)
168 active_shader_states |= gp->affected_states;
169 if (fp)
170 active_shader_states |= fp->affected_states;
171 if (cp)
172 active_shader_states |= cp->affected_states;
173
174 /* Mark non-shader-resource shader states as "always active". */
175 return active_shader_states | ~ST_ALL_SHADER_RESOURCES;
176 }
177
178
179 void
st_invalidate_buffers(struct st_context * st)180 st_invalidate_buffers(struct st_context *st)
181 {
182 st->dirty |= ST_NEW_BLEND |
183 ST_NEW_DSA |
184 ST_NEW_FB_STATE |
185 ST_NEW_SAMPLE_STATE |
186 ST_NEW_SAMPLE_SHADING |
187 ST_NEW_FS_STATE |
188 ST_NEW_POLY_STIPPLE |
189 ST_NEW_VIEWPORT |
190 ST_NEW_RASTERIZER |
191 ST_NEW_SCISSOR |
192 ST_NEW_WINDOW_RECTANGLES;
193 }
194
195
196 static inline bool
st_vp_uses_current_values(const struct gl_context * ctx)197 st_vp_uses_current_values(const struct gl_context *ctx)
198 {
199 const uint64_t inputs = ctx->VertexProgram._Current->info.inputs_read;
200 return _mesa_draw_current_bits(ctx) & inputs;
201 }
202
203
204 /**
205 * Called via ctx->Driver.UpdateState()
206 */
207 static void
st_invalidate_state(struct gl_context * ctx)208 st_invalidate_state(struct gl_context *ctx)
209 {
210 GLbitfield new_state = ctx->NewState;
211 struct st_context *st = st_context(ctx);
212
213 if (new_state & _NEW_BUFFERS) {
214 st_invalidate_buffers(st);
215 } else {
216 /* These set a subset of flags set by _NEW_BUFFERS, so we only have to
217 * check them when _NEW_BUFFERS isn't set.
218 */
219 if (new_state & _NEW_PROGRAM)
220 st->dirty |= ST_NEW_RASTERIZER;
221
222 if (new_state & _NEW_FOG)
223 st->dirty |= ST_NEW_FS_STATE;
224 }
225
226 if (new_state & (_NEW_LIGHT_STATE |
227 _NEW_POINT))
228 st->dirty |= ST_NEW_RASTERIZER;
229
230 if ((new_state & _NEW_LIGHT_STATE) &&
231 (st->lower_flatshade || st->lower_two_sided_color))
232 st->dirty |= ST_NEW_FS_STATE;
233
234 if (new_state & _NEW_PROJECTION &&
235 st_user_clip_planes_enabled(ctx))
236 st->dirty |= ST_NEW_CLIP_STATE;
237
238 if (new_state & _NEW_POINT && st->lower_texcoord_replace)
239 st->dirty |= ST_NEW_FS_STATE;
240
241 if (new_state & _NEW_PIXEL)
242 st->dirty |= ST_NEW_PIXEL_TRANSFER;
243
244 if (new_state & _NEW_CURRENT_ATTRIB && st_vp_uses_current_values(ctx))
245 st->dirty |= ST_NEW_VERTEX_ARRAYS;
246
247 if (st->clamp_frag_depth_in_shader && (new_state & _NEW_VIEWPORT)) {
248 if (ctx->GeometryProgram._Current)
249 st->dirty |= ST_NEW_GS_CONSTANTS;
250 else if (ctx->TessEvalProgram._Current)
251 st->dirty |= ST_NEW_TES_CONSTANTS;
252 else
253 st->dirty |= ST_NEW_VS_CONSTANTS;
254 st->dirty |= ST_NEW_FS_CONSTANTS;
255 }
256
257 /* Update the vertex shader if ctx->Light._ClampVertexColor was changed. */
258 if (st->clamp_vert_color_in_shader && (new_state & _NEW_LIGHT_STATE)) {
259 st->dirty |= ST_NEW_VS_STATE;
260 if (st->ctx->API == API_OPENGL_COMPAT && ctx->Version >= 32) {
261 st->dirty |= ST_NEW_GS_STATE | ST_NEW_TES_STATE;
262 }
263 }
264
265 /* Update the vertex shader if ctx->Point was changed. */
266 if (st->lower_point_size && new_state & _NEW_POINT)
267 st->dirty |= ST_NEW_VS_STATE | ST_NEW_TES_STATE | ST_NEW_GS_STATE;
268
269 /* Which shaders are dirty will be determined manually. */
270 if (new_state & _NEW_PROGRAM) {
271 st->gfx_shaders_may_be_dirty = true;
272 st->compute_shader_may_be_dirty = true;
273 /* This will mask out unused shader resources. */
274 st->active_states = st_get_active_states(ctx);
275 }
276
277 if (new_state & _NEW_TEXTURE_OBJECT) {
278 st->dirty |= st->active_states &
279 (ST_NEW_SAMPLER_VIEWS |
280 ST_NEW_SAMPLERS |
281 ST_NEW_IMAGE_UNITS);
282 if (ctx->FragmentProgram._Current) {
283 struct st_program *stfp = st_program(ctx->FragmentProgram._Current);
284
285 if (stfp->Base.ExternalSamplersUsed || stfp->ati_fs)
286 st->dirty |= ST_NEW_FS_STATE;
287 }
288 }
289 }
290
291
292 /*
293 * In some circumstances (such as running google-chrome) the state
294 * tracker may try to delete a resource view from a context different
295 * than when it was created. We don't want to do that.
296 *
297 * In that situation, st_texture_release_all_sampler_views() calls this
298 * function to transfer the sampler view reference to this context (expected
299 * to be the context which created the view.)
300 */
301 void
st_save_zombie_sampler_view(struct st_context * st,struct pipe_sampler_view * view)302 st_save_zombie_sampler_view(struct st_context *st,
303 struct pipe_sampler_view *view)
304 {
305 struct st_zombie_sampler_view_node *entry;
306
307 assert(view->context == st->pipe);
308
309 entry = MALLOC_STRUCT(st_zombie_sampler_view_node);
310 if (!entry)
311 return;
312
313 entry->view = view;
314
315 /* We need a mutex since this function may be called from one thread
316 * while free_zombie_resource_views() is called from another.
317 */
318 simple_mtx_lock(&st->zombie_sampler_views.mutex);
319 list_addtail(&entry->node, &st->zombie_sampler_views.list.node);
320 simple_mtx_unlock(&st->zombie_sampler_views.mutex);
321 }
322
323
324 /*
325 * Since OpenGL shaders may be shared among contexts, we can wind up
326 * with variants of a shader created with different contexts.
327 * When we go to destroy a gallium shader, we want to free it with the
328 * same context that it was created with, unless the driver reports
329 * PIPE_CAP_SHAREABLE_SHADERS = TRUE.
330 */
331 void
st_save_zombie_shader(struct st_context * st,enum pipe_shader_type type,struct pipe_shader_state * shader)332 st_save_zombie_shader(struct st_context *st,
333 enum pipe_shader_type type,
334 struct pipe_shader_state *shader)
335 {
336 struct st_zombie_shader_node *entry;
337
338 /* we shouldn't be here if the driver supports shareable shaders */
339 assert(!st->has_shareable_shaders);
340
341 entry = MALLOC_STRUCT(st_zombie_shader_node);
342 if (!entry)
343 return;
344
345 entry->shader = shader;
346 entry->type = type;
347
348 /* We need a mutex since this function may be called from one thread
349 * while free_zombie_shaders() is called from another.
350 */
351 simple_mtx_lock(&st->zombie_shaders.mutex);
352 list_addtail(&entry->node, &st->zombie_shaders.list.node);
353 simple_mtx_unlock(&st->zombie_shaders.mutex);
354 }
355
356
357 /*
358 * Free any zombie sampler views that may be attached to this context.
359 */
360 static void
free_zombie_sampler_views(struct st_context * st)361 free_zombie_sampler_views(struct st_context *st)
362 {
363 struct st_zombie_sampler_view_node *entry, *next;
364
365 if (list_is_empty(&st->zombie_sampler_views.list.node)) {
366 return;
367 }
368
369 simple_mtx_lock(&st->zombie_sampler_views.mutex);
370
371 LIST_FOR_EACH_ENTRY_SAFE(entry, next,
372 &st->zombie_sampler_views.list.node, node) {
373 list_del(&entry->node); // remove this entry from the list
374
375 assert(entry->view->context == st->pipe);
376 pipe_sampler_view_reference(&entry->view, NULL);
377
378 free(entry);
379 }
380
381 assert(list_is_empty(&st->zombie_sampler_views.list.node));
382
383 simple_mtx_unlock(&st->zombie_sampler_views.mutex);
384 }
385
386
387 /*
388 * Free any zombie shaders that may be attached to this context.
389 */
390 static void
free_zombie_shaders(struct st_context * st)391 free_zombie_shaders(struct st_context *st)
392 {
393 struct st_zombie_shader_node *entry, *next;
394
395 if (list_is_empty(&st->zombie_shaders.list.node)) {
396 return;
397 }
398
399 simple_mtx_lock(&st->zombie_shaders.mutex);
400
401 LIST_FOR_EACH_ENTRY_SAFE(entry, next,
402 &st->zombie_shaders.list.node, node) {
403 list_del(&entry->node); // remove this entry from the list
404
405 switch (entry->type) {
406 case PIPE_SHADER_VERTEX:
407 st->pipe->bind_vs_state(st->pipe, NULL);
408 st->pipe->delete_vs_state(st->pipe, entry->shader);
409 break;
410 case PIPE_SHADER_FRAGMENT:
411 st->pipe->bind_fs_state(st->pipe, NULL);
412 st->pipe->delete_fs_state(st->pipe, entry->shader);
413 break;
414 case PIPE_SHADER_GEOMETRY:
415 st->pipe->bind_gs_state(st->pipe, NULL);
416 st->pipe->delete_gs_state(st->pipe, entry->shader);
417 break;
418 case PIPE_SHADER_TESS_CTRL:
419 st->pipe->bind_tcs_state(st->pipe, NULL);
420 st->pipe->delete_tcs_state(st->pipe, entry->shader);
421 break;
422 case PIPE_SHADER_TESS_EVAL:
423 st->pipe->bind_tes_state(st->pipe, NULL);
424 st->pipe->delete_tes_state(st->pipe, entry->shader);
425 break;
426 case PIPE_SHADER_COMPUTE:
427 st->pipe->bind_compute_state(st->pipe, NULL);
428 st->pipe->delete_compute_state(st->pipe, entry->shader);
429 break;
430 default:
431 unreachable("invalid shader type in free_zombie_shaders()");
432 }
433 free(entry);
434 }
435
436 assert(list_is_empty(&st->zombie_shaders.list.node));
437
438 simple_mtx_unlock(&st->zombie_shaders.mutex);
439 }
440
441
442 /*
443 * This function is called periodically to free any zombie objects
444 * which are attached to this context.
445 */
446 void
st_context_free_zombie_objects(struct st_context * st)447 st_context_free_zombie_objects(struct st_context *st)
448 {
449 free_zombie_sampler_views(st);
450 free_zombie_shaders(st);
451 }
452
453
454 static void
st_destroy_context_priv(struct st_context * st,bool destroy_pipe)455 st_destroy_context_priv(struct st_context *st, bool destroy_pipe)
456 {
457 st_destroy_atoms(st);
458 st_destroy_draw(st);
459 st_destroy_clear(st);
460 st_destroy_bitmap(st);
461 st_destroy_drawpix(st);
462 st_destroy_drawtex(st);
463 st_destroy_perfmon(st);
464 st_destroy_pbo_helpers(st);
465 st_destroy_bound_texture_handles(st);
466 st_destroy_bound_image_handles(st);
467
468 /* free glReadPixels cache data */
469 st_invalidate_readpix_cache(st);
470 util_throttle_deinit(st->screen, &st->throttle);
471
472 cso_destroy_context(st->cso_context);
473
474 if (st->pipe && destroy_pipe)
475 st->pipe->destroy(st->pipe);
476
477 free(st);
478 }
479
480
481 static void
st_init_driver_flags(struct st_context * st)482 st_init_driver_flags(struct st_context *st)
483 {
484 struct gl_driver_flags *f = &st->ctx->DriverFlags;
485
486 f->NewArray = ST_NEW_VERTEX_ARRAYS;
487 f->NewRasterizerDiscard = ST_NEW_RASTERIZER;
488 f->NewTileRasterOrder = ST_NEW_RASTERIZER;
489 f->NewUniformBuffer = ST_NEW_UNIFORM_BUFFER;
490 f->NewTessState = ST_NEW_TESS_STATE;
491
492 /* Shader resources */
493 f->NewTextureBuffer = ST_NEW_SAMPLER_VIEWS;
494 if (st->has_hw_atomics)
495 f->NewAtomicBuffer = ST_NEW_HW_ATOMICS | ST_NEW_CS_ATOMICS;
496 else
497 f->NewAtomicBuffer = ST_NEW_ATOMIC_BUFFER;
498 f->NewShaderStorageBuffer = ST_NEW_STORAGE_BUFFER;
499 f->NewImageUnits = ST_NEW_IMAGE_UNITS;
500
501 f->NewShaderConstants[MESA_SHADER_VERTEX] = ST_NEW_VS_CONSTANTS;
502 f->NewShaderConstants[MESA_SHADER_TESS_CTRL] = ST_NEW_TCS_CONSTANTS;
503 f->NewShaderConstants[MESA_SHADER_TESS_EVAL] = ST_NEW_TES_CONSTANTS;
504 f->NewShaderConstants[MESA_SHADER_GEOMETRY] = ST_NEW_GS_CONSTANTS;
505 f->NewShaderConstants[MESA_SHADER_FRAGMENT] = ST_NEW_FS_CONSTANTS;
506 f->NewShaderConstants[MESA_SHADER_COMPUTE] = ST_NEW_CS_CONSTANTS;
507
508 f->NewWindowRectangles = ST_NEW_WINDOW_RECTANGLES;
509 f->NewFramebufferSRGB = ST_NEW_FB_STATE;
510 f->NewScissorRect = ST_NEW_SCISSOR;
511 f->NewScissorTest = ST_NEW_SCISSOR | ST_NEW_RASTERIZER;
512
513 if (st->lower_alpha_test)
514 f->NewAlphaTest = ST_NEW_FS_STATE | ST_NEW_FS_CONSTANTS;
515 else
516 f->NewAlphaTest = ST_NEW_DSA;
517
518 f->NewBlend = ST_NEW_BLEND;
519 f->NewBlendColor = ST_NEW_BLEND_COLOR;
520 f->NewColorMask = ST_NEW_BLEND;
521 f->NewDepth = ST_NEW_DSA;
522 f->NewLogicOp = ST_NEW_BLEND;
523 f->NewStencil = ST_NEW_DSA;
524 f->NewMultisampleEnable = ST_NEW_BLEND | ST_NEW_RASTERIZER |
525 ST_NEW_SAMPLE_STATE | ST_NEW_SAMPLE_SHADING;
526 f->NewSampleAlphaToXEnable = ST_NEW_BLEND;
527 f->NewSampleMask = ST_NEW_SAMPLE_STATE;
528 f->NewSampleLocations = ST_NEW_SAMPLE_STATE;
529 f->NewSampleShading = ST_NEW_SAMPLE_SHADING;
530
531 /* This depends on what the gallium driver wants. */
532 if (st->force_persample_in_shader) {
533 f->NewMultisampleEnable |= ST_NEW_FS_STATE;
534 f->NewSampleShading |= ST_NEW_FS_STATE;
535 } else {
536 f->NewSampleShading |= ST_NEW_RASTERIZER;
537 }
538
539 f->NewClipControl = ST_NEW_VIEWPORT | ST_NEW_RASTERIZER;
540 f->NewClipPlane = ST_NEW_CLIP_STATE;
541
542 if (st->clamp_frag_color_in_shader) {
543 f->NewFragClamp = ST_NEW_FS_STATE;
544 } else {
545 f->NewFragClamp = ST_NEW_RASTERIZER;
546 }
547
548 if (st->clamp_frag_depth_in_shader) {
549 f->NewClipControl |= ST_NEW_VS_STATE | ST_NEW_GS_STATE |
550 ST_NEW_TES_STATE;
551
552 f->NewDepthClamp = ST_NEW_FS_STATE | ST_NEW_VS_STATE |
553 ST_NEW_GS_STATE | ST_NEW_TES_STATE;
554 } else {
555 f->NewDepthClamp = ST_NEW_RASTERIZER;
556 }
557
558 if (st->lower_ucp)
559 f->NewClipPlaneEnable = ST_NEW_VS_STATE | ST_NEW_GS_STATE;
560 else
561 f->NewClipPlaneEnable = ST_NEW_RASTERIZER;
562
563 f->NewLineState = ST_NEW_RASTERIZER;
564 f->NewPolygonState = ST_NEW_RASTERIZER;
565 f->NewPolygonStipple = ST_NEW_POLY_STIPPLE;
566 f->NewViewport = ST_NEW_VIEWPORT;
567 f->NewNvConservativeRasterization = ST_NEW_RASTERIZER;
568 f->NewNvConservativeRasterizationParams = ST_NEW_RASTERIZER;
569 f->NewIntelConservativeRasterization = ST_NEW_RASTERIZER;
570
571 if (st->emulate_gl_clamp)
572 f->NewSamplersWithClamp = ST_NEW_SAMPLERS |
573 ST_NEW_VS_STATE | ST_NEW_TCS_STATE |
574 ST_NEW_TES_STATE | ST_NEW_GS_STATE |
575 ST_NEW_FS_STATE | ST_NEW_CS_STATE;
576 }
577
578
579 static struct st_context *
st_create_context_priv(struct gl_context * ctx,struct pipe_context * pipe,const struct st_config_options * options,bool no_error)580 st_create_context_priv(struct gl_context *ctx, struct pipe_context *pipe,
581 const struct st_config_options *options, bool no_error)
582 {
583 struct pipe_screen *screen = pipe->screen;
584 uint i;
585 struct st_context *st = ST_CALLOC_STRUCT( st_context);
586
587 st->options = *options;
588
589 ctx->st = st;
590
591 st->ctx = ctx;
592 st->screen = screen;
593 st->pipe = pipe;
594 st->dirty = ST_ALL_STATES_MASK;
595
596 st->can_bind_const_buffer_as_vertex =
597 screen->get_param(screen, PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX);
598
599 /* st/mesa always uploads zero-stride vertex attribs, and other user
600 * vertex buffers are only possible with a compatibility profile.
601 * So tell the u_vbuf module that user VBOs are not possible with the Core
602 * profile, so that u_vbuf is bypassed completely if there is nothing else
603 * to do.
604 */
605 unsigned cso_flags;
606 switch (ctx->API) {
607 case API_OPENGL_CORE:
608 cso_flags = CSO_NO_USER_VERTEX_BUFFERS;
609 break;
610 case API_OPENGLES:
611 case API_OPENGLES2:
612 cso_flags = CSO_NO_64B_VERTEX_BUFFERS;
613 break;
614 default:
615 cso_flags = 0;
616 break;
617 }
618
619 st->cso_context = cso_create_context(pipe, cso_flags);
620
621 st_init_atoms(st);
622 st_init_clear(st);
623 st_init_pbo_helpers(st);
624
625 /* Choose texture target for glDrawPixels, glBitmap, renderbuffers */
626 if (screen->get_param(screen, PIPE_CAP_NPOT_TEXTURES))
627 st->internal_target = PIPE_TEXTURE_2D;
628 else
629 st->internal_target = PIPE_TEXTURE_RECT;
630
631 /* Setup vertex element info for 'struct st_util_vertex'.
632 */
633 {
634 STATIC_ASSERT(sizeof(struct st_util_vertex) == 9 * sizeof(float));
635
636 memset(&st->util_velems, 0, sizeof(st->util_velems));
637 st->util_velems.velems[0].src_offset = 0;
638 st->util_velems.velems[0].vertex_buffer_index = 0;
639 st->util_velems.velems[0].src_format = PIPE_FORMAT_R32G32B32_FLOAT;
640 st->util_velems.velems[1].src_offset = 3 * sizeof(float);
641 st->util_velems.velems[1].vertex_buffer_index = 0;
642 st->util_velems.velems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
643 st->util_velems.velems[2].src_offset = 7 * sizeof(float);
644 st->util_velems.velems[2].vertex_buffer_index = 0;
645 st->util_velems.velems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
646 }
647
648 /* Need these flags:
649 */
650 ctx->FragmentProgram._MaintainTexEnvProgram = GL_TRUE;
651 ctx->VertexProgram._MaintainTnlProgram = GL_TRUE;
652 _mesa_reset_vertex_processing_mode(ctx);
653
654 if (no_error)
655 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR;
656
657 ctx->Const.PackedDriverUniformStorage =
658 screen->get_param(screen, PIPE_CAP_PACKED_UNIFORMS);
659
660 ctx->Const.BitmapUsesRed =
661 screen->is_format_supported(screen, PIPE_FORMAT_R8_UNORM,
662 PIPE_TEXTURE_2D, 0, 0,
663 PIPE_BIND_SAMPLER_VIEW);
664
665 st->has_stencil_export =
666 screen->get_param(screen, PIPE_CAP_SHADER_STENCIL_EXPORT);
667 st->has_etc1 = screen->is_format_supported(screen, PIPE_FORMAT_ETC1_RGB8,
668 PIPE_TEXTURE_2D, 0, 0,
669 PIPE_BIND_SAMPLER_VIEW);
670 st->has_etc2 = screen->is_format_supported(screen, PIPE_FORMAT_ETC2_RGB8,
671 PIPE_TEXTURE_2D, 0, 0,
672 PIPE_BIND_SAMPLER_VIEW);
673 st->transcode_etc = options->transcode_etc &&
674 screen->is_format_supported(screen, PIPE_FORMAT_DXT1_SRGBA,
675 PIPE_TEXTURE_2D, 0, 0,
676 PIPE_BIND_SAMPLER_VIEW);
677 st->transcode_astc = options->transcode_astc &&
678 screen->is_format_supported(screen, PIPE_FORMAT_DXT5_SRGBA,
679 PIPE_TEXTURE_2D, 0, 0,
680 PIPE_BIND_SAMPLER_VIEW);
681 st->has_astc_2d_ldr =
682 screen->is_format_supported(screen, PIPE_FORMAT_ASTC_4x4_SRGB,
683 PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW);
684 st->has_astc_5x5_ldr =
685 screen->is_format_supported(screen, PIPE_FORMAT_ASTC_5x5_SRGB,
686 PIPE_TEXTURE_2D, 0, 0, PIPE_BIND_SAMPLER_VIEW);
687 st->prefer_blit_based_texture_transfer = screen->get_param(screen,
688 PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER);
689 st->force_persample_in_shader =
690 screen->get_param(screen, PIPE_CAP_SAMPLE_SHADING) &&
691 !screen->get_param(screen, PIPE_CAP_FORCE_PERSAMPLE_INTERP);
692 st->has_shareable_shaders = screen->get_param(screen,
693 PIPE_CAP_SHAREABLE_SHADERS);
694 st->needs_texcoord_semantic =
695 screen->get_param(screen, PIPE_CAP_TGSI_TEXCOORD);
696 st->apply_texture_swizzle_to_border_color =
697 !!(screen->get_param(screen, PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK) &
698 (PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50 |
699 PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600));
700 st->emulate_gl_clamp =
701 !screen->get_param(screen, PIPE_CAP_GL_CLAMP);
702 st->texture_buffer_sampler =
703 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_SAMPLER);
704 st->has_time_elapsed =
705 screen->get_param(screen, PIPE_CAP_QUERY_TIME_ELAPSED);
706 st->has_half_float_packing =
707 screen->get_param(screen, PIPE_CAP_TGSI_PACK_HALF_FLOAT);
708 st->has_multi_draw_indirect =
709 screen->get_param(screen, PIPE_CAP_MULTI_DRAW_INDIRECT);
710 st->has_single_pipe_stat =
711 screen->get_param(screen, PIPE_CAP_QUERY_PIPELINE_STATISTICS_SINGLE);
712 st->has_indep_blend_func =
713 screen->get_param(screen, PIPE_CAP_INDEP_BLEND_FUNC);
714 st->needs_rgb_dst_alpha_override =
715 screen->get_param(screen, PIPE_CAP_RGB_OVERRIDE_DST_ALPHA_BLEND);
716 st->lower_flatshade =
717 !screen->get_param(screen, PIPE_CAP_FLATSHADE);
718 st->lower_alpha_test =
719 !screen->get_param(screen, PIPE_CAP_ALPHA_TEST);
720 st->lower_point_size =
721 !screen->get_param(screen, PIPE_CAP_POINT_SIZE_FIXED);
722 st->lower_two_sided_color =
723 !screen->get_param(screen, PIPE_CAP_TWO_SIDED_COLOR);
724 st->lower_ucp =
725 !screen->get_param(screen, PIPE_CAP_CLIP_PLANES);
726 st->prefer_real_buffer_in_constbuf0 =
727 screen->get_param(screen, PIPE_CAP_PREFER_REAL_BUFFER_IN_CONSTBUF0);
728 st->has_conditional_render =
729 screen->get_param(screen, PIPE_CAP_CONDITIONAL_RENDER);
730 st->lower_texcoord_replace =
731 !screen->get_param(screen, PIPE_CAP_POINT_SPRITE);
732 st->lower_rect_tex =
733 !screen->get_param(screen, PIPE_CAP_TEXRECT);
734 st->allow_st_finalize_nir_twice = screen->finalize_nir != NULL;
735
736 st->has_hw_atomics =
737 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
738 PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS)
739 ? true : false;
740
741 util_throttle_init(&st->throttle,
742 screen->get_param(screen,
743 PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET));
744
745 /* GL limits and extensions */
746 st_init_limits(screen, &ctx->Const, &ctx->Extensions);
747 st_init_extensions(screen, &ctx->Const,
748 &ctx->Extensions, &st->options, ctx->API);
749
750 if (st_have_perfmon(st)) {
751 ctx->Extensions.AMD_performance_monitor = GL_TRUE;
752 }
753
754 if (st_have_perfquery(st)) {
755 ctx->Extensions.INTEL_performance_query = GL_TRUE;
756 }
757
758 /* Enable shader-based fallbacks for ARB_color_buffer_float if needed. */
759 if (screen->get_param(screen, PIPE_CAP_VERTEX_COLOR_UNCLAMPED)) {
760 if (!screen->get_param(screen, PIPE_CAP_VERTEX_COLOR_CLAMPED)) {
761 st->clamp_vert_color_in_shader = GL_TRUE;
762 }
763
764 if (!screen->get_param(screen, PIPE_CAP_FRAGMENT_COLOR_CLAMPED)) {
765 st->clamp_frag_color_in_shader = GL_TRUE;
766 }
767
768 /* For drivers which cannot do color clamping, it's better to just
769 * disable ARB_color_buffer_float in the core profile, because
770 * the clamping is deprecated there anyway. */
771 if (ctx->API == API_OPENGL_CORE &&
772 (st->clamp_frag_color_in_shader || st->clamp_vert_color_in_shader)) {
773 st->clamp_vert_color_in_shader = GL_FALSE;
774 st->clamp_frag_color_in_shader = GL_FALSE;
775 ctx->Extensions.ARB_color_buffer_float = GL_FALSE;
776 }
777 }
778
779 if (screen->get_param(screen, PIPE_CAP_DEPTH_CLIP_DISABLE) == 2)
780 st->clamp_frag_depth_in_shader = true;
781
782 /* called after _mesa_create_context/_mesa_init_point, fix default user
783 * settable max point size up
784 */
785 ctx->Point.MaxSize = MAX2(ctx->Const.MaxPointSize,
786 ctx->Const.MaxPointSizeAA);
787
788 ctx->Const.NoClippingOnCopyTex = screen->get_param(screen,
789 PIPE_CAP_NO_CLIP_ON_COPY_TEX);
790
791 /* For vertex shaders, make sure not to emit saturate when SM 3.0
792 * is not supported
793 */
794 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoSat =
795 !screen->get_param(screen, PIPE_CAP_VERTEX_SHADER_SATURATE);
796
797 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].PositionAlwaysInvariant = options->vs_position_always_invariant;
798
799 ctx->Const.ShaderCompilerOptions[MESA_SHADER_TESS_EVAL].PositionAlwaysPrecise = options->vs_position_always_precise;
800
801 enum pipe_shader_ir preferred_ir = (enum pipe_shader_ir)
802 screen->get_shader_param(screen, PIPE_SHADER_VERTEX,
803 PIPE_SHADER_CAP_PREFERRED_IR);
804 ctx->Const.UseNIRGLSLLinker = preferred_ir == PIPE_SHADER_IR_NIR;
805
806 if (ctx->Const.GLSLVersion < 400) {
807 for (i = 0; i < MESA_SHADER_STAGES; i++)
808 ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectSampler = true;
809 }
810
811 /* Set which shader types can be compiled at link time. */
812 st->shader_has_one_variant[MESA_SHADER_VERTEX] =
813 st->has_shareable_shaders &&
814 !st->clamp_frag_depth_in_shader &&
815 !st->clamp_vert_color_in_shader &&
816 !st->lower_point_size &&
817 !st->lower_ucp;
818
819 st->shader_has_one_variant[MESA_SHADER_FRAGMENT] =
820 st->has_shareable_shaders &&
821 !st->lower_flatshade &&
822 !st->lower_alpha_test &&
823 !st->clamp_frag_color_in_shader &&
824 !st->clamp_frag_depth_in_shader &&
825 !st->force_persample_in_shader &&
826 !st->lower_two_sided_color &&
827 !st->lower_texcoord_replace;
828
829 st->shader_has_one_variant[MESA_SHADER_TESS_CTRL] = st->has_shareable_shaders;
830 st->shader_has_one_variant[MESA_SHADER_TESS_EVAL] =
831 st->has_shareable_shaders &&
832 !st->clamp_frag_depth_in_shader &&
833 !st->clamp_vert_color_in_shader &&
834 !st->lower_point_size &&
835 !st->lower_ucp;
836
837 st->shader_has_one_variant[MESA_SHADER_GEOMETRY] =
838 st->has_shareable_shaders &&
839 !st->clamp_frag_depth_in_shader &&
840 !st->clamp_vert_color_in_shader &&
841 !st->lower_point_size &&
842 !st->lower_ucp;
843 st->shader_has_one_variant[MESA_SHADER_COMPUTE] = st->has_shareable_shaders;
844
845 util_cpu_detect();
846
847 if (util_get_cpu_caps()->num_L3_caches == 1 ||
848 !st->pipe->set_context_param)
849 st->pin_thread_counter = ST_L3_PINNING_DISABLED;
850
851 st->bitmap.cache.empty = true;
852
853 if (ctx->Const.ForceGLNamesReuse && ctx->Shared->RefCount == 1) {
854 _mesa_HashEnableNameReuse(ctx->Shared->TexObjects);
855 _mesa_HashEnableNameReuse(ctx->Shared->ShaderObjects);
856 _mesa_HashEnableNameReuse(ctx->Shared->BufferObjects);
857 _mesa_HashEnableNameReuse(ctx->Shared->SamplerObjects);
858 _mesa_HashEnableNameReuse(ctx->Shared->FrameBuffers);
859 _mesa_HashEnableNameReuse(ctx->Shared->RenderBuffers);
860 _mesa_HashEnableNameReuse(ctx->Shared->MemoryObjects);
861 _mesa_HashEnableNameReuse(ctx->Shared->SemaphoreObjects);
862 }
863 /* SPECviewperf13/sw-04 crashes since a56849ddda6 if Mesa is build with
864 * -O3 on gcc 7.5, which doesn't happen with ForceGLNamesReuse, which is
865 * the default setting for SPECviewperf because it simulates glGen behavior
866 * of closed source drivers.
867 */
868 if (ctx->Const.ForceGLNamesReuse)
869 _mesa_HashEnableNameReuse(ctx->Query.QueryObjects);
870
871 _mesa_override_extensions(ctx);
872 _mesa_compute_version(ctx);
873
874 if (ctx->Version == 0) {
875 /* This can happen when a core profile was requested, but the driver
876 * does not support some features of GL 3.1 or later.
877 */
878 st_destroy_context_priv(st, false);
879 return NULL;
880 }
881
882 /* This must be done after extensions are initialized to enable persistent
883 * mappings immediately.
884 */
885 _vbo_CreateContext(ctx, true);
886
887 _mesa_initialize_dispatch_tables(ctx);
888 _mesa_initialize_vbo_vtxfmt(ctx);
889 st_init_driver_flags(st);
890
891 /* Initialize context's winsys buffers list */
892 list_inithead(&st->winsys_buffers);
893
894 list_inithead(&st->zombie_sampler_views.list.node);
895 simple_mtx_init(&st->zombie_sampler_views.mutex, mtx_plain);
896 list_inithead(&st->zombie_shaders.list.node);
897 simple_mtx_init(&st->zombie_shaders.mutex, mtx_plain);
898
899 return st;
900 }
901
902
903 static void
st_emit_string_marker(struct gl_context * ctx,const GLchar * string,GLsizei len)904 st_emit_string_marker(struct gl_context *ctx, const GLchar *string, GLsizei len)
905 {
906 struct st_context *st = ctx->st;
907 st->pipe->emit_string_marker(st->pipe, string, len);
908 }
909
910
911 static void
st_set_background_context(struct gl_context * ctx,struct util_queue_monitoring * queue_info)912 st_set_background_context(struct gl_context *ctx,
913 struct util_queue_monitoring *queue_info)
914 {
915 struct st_context *st = ctx->st;
916 struct st_manager *smapi =
917 (struct st_manager *) st->iface.st_context_private;
918
919 assert(smapi->set_background_context);
920 smapi->set_background_context(&st->iface, queue_info);
921 }
922
923
924 static void
st_get_device_uuid(struct gl_context * ctx,char * uuid)925 st_get_device_uuid(struct gl_context *ctx, char *uuid)
926 {
927 struct pipe_screen *screen = st_context(ctx)->screen;
928
929 assert(GL_UUID_SIZE_EXT >= PIPE_UUID_SIZE);
930 memset(uuid, 0, GL_UUID_SIZE_EXT);
931 screen->get_device_uuid(screen, uuid);
932 }
933
934
935 static void
st_get_driver_uuid(struct gl_context * ctx,char * uuid)936 st_get_driver_uuid(struct gl_context *ctx, char *uuid)
937 {
938 struct pipe_screen *screen = st_context(ctx)->screen;
939
940 assert(GL_UUID_SIZE_EXT >= PIPE_UUID_SIZE);
941 memset(uuid, 0, GL_UUID_SIZE_EXT);
942 screen->get_driver_uuid(screen, uuid);
943 }
944
945
946 static void
st_pin_driver_to_l3_cache(struct gl_context * ctx,unsigned L3_cache)947 st_pin_driver_to_l3_cache(struct gl_context *ctx, unsigned L3_cache)
948 {
949 struct pipe_context *pipe = st_context(ctx)->pipe;
950
951 pipe->set_context_param(pipe, PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE,
952 L3_cache);
953 }
954
955
956 static void
st_init_driver_functions(struct pipe_screen * screen,struct dd_function_table * functions,bool has_egl_image_validate)957 st_init_driver_functions(struct pipe_screen *screen,
958 struct dd_function_table *functions,
959 bool has_egl_image_validate)
960 {
961 _mesa_init_sampler_object_functions(functions);
962
963 st_init_draw_functions(screen, functions);
964 st_init_blit_functions(functions);
965 st_init_bufferobject_functions(screen, functions);
966 st_init_clear_functions(functions);
967 st_init_bitmap_functions(functions);
968 st_init_copy_image_functions(functions);
969 st_init_drawpixels_functions(functions);
970 st_init_rasterpos_functions(functions);
971
972 st_init_drawtex_functions(functions);
973
974 st_init_eglimage_functions(functions, has_egl_image_validate);
975
976 st_init_fbo_functions(functions);
977 st_init_feedback_functions(functions);
978 st_init_memoryobject_functions(functions);
979 st_init_msaa_functions(functions);
980 st_init_perfmon_functions(functions);
981 st_init_perfquery_functions(functions);
982 st_init_program_functions(functions);
983 st_init_query_functions(functions);
984 st_init_cond_render_functions(functions);
985 st_init_readpixels_functions(functions);
986 st_init_semaphoreobject_functions(functions);
987 st_init_texture_functions(functions);
988 st_init_texture_barrier_functions(functions);
989 st_init_flush_functions(screen, functions);
990 st_init_string_functions(functions);
991 st_init_viewport_functions(functions);
992 st_init_compute_functions(functions);
993
994 st_init_xformfb_functions(functions);
995 st_init_syncobj_functions(functions);
996
997 st_init_vdpau_functions(functions);
998
999 if (screen->get_param(screen, PIPE_CAP_STRING_MARKER))
1000 functions->EmitStringMarker = st_emit_string_marker;
1001
1002 functions->Enable = st_Enable;
1003 functions->UpdateState = st_invalidate_state;
1004 functions->QueryMemoryInfo = st_query_memory_info;
1005 functions->SetBackgroundContext = st_set_background_context;
1006 functions->GetDriverUuid = st_get_driver_uuid;
1007 functions->GetDeviceUuid = st_get_device_uuid;
1008
1009 /* GL_ARB_get_program_binary */
1010 functions->GetProgramBinaryDriverSHA1 = st_get_program_binary_driver_sha1;
1011
1012 enum pipe_shader_ir preferred_ir = (enum pipe_shader_ir)
1013 screen->get_shader_param(screen, PIPE_SHADER_VERTEX,
1014 PIPE_SHADER_CAP_PREFERRED_IR);
1015 if (preferred_ir == PIPE_SHADER_IR_NIR) {
1016 functions->ShaderCacheSerializeDriverBlob = st_serialise_nir_program;
1017 functions->ProgramBinarySerializeDriverBlob =
1018 st_serialise_nir_program_binary;
1019 functions->ProgramBinaryDeserializeDriverBlob =
1020 st_deserialise_nir_program;
1021 } else {
1022 functions->ShaderCacheSerializeDriverBlob = st_serialise_tgsi_program;
1023 functions->ProgramBinarySerializeDriverBlob =
1024 st_serialise_tgsi_program_binary;
1025 functions->ProgramBinaryDeserializeDriverBlob =
1026 st_deserialise_tgsi_program;
1027 }
1028 }
1029
1030
1031 struct st_context *
st_create_context(gl_api api,struct pipe_context * pipe,const struct gl_config * visual,struct st_context * share,const struct st_config_options * options,bool no_error,bool has_egl_image_validate)1032 st_create_context(gl_api api, struct pipe_context *pipe,
1033 const struct gl_config *visual,
1034 struct st_context *share,
1035 const struct st_config_options *options,
1036 bool no_error, bool has_egl_image_validate)
1037 {
1038 struct gl_context *ctx;
1039 struct gl_context *shareCtx = share ? share->ctx : NULL;
1040 struct dd_function_table funcs;
1041 struct st_context *st;
1042
1043 util_cpu_detect();
1044
1045 memset(&funcs, 0, sizeof(funcs));
1046 st_init_driver_functions(pipe->screen, &funcs, has_egl_image_validate);
1047
1048 if (pipe->set_context_param)
1049 funcs.PinDriverToL3Cache = st_pin_driver_to_l3_cache;
1050
1051 /* gl_context must be 16-byte aligned due to the alignment on GLmatrix. */
1052 ctx = align_malloc(sizeof(struct gl_context), 16);
1053 if (!ctx)
1054 return NULL;
1055 memset(ctx, 0, sizeof(*ctx));
1056
1057 if (!_mesa_initialize_context(ctx, api, visual, shareCtx, &funcs)) {
1058 align_free(ctx);
1059 return NULL;
1060 }
1061
1062 st_debug_init();
1063
1064 if (pipe->screen->get_disk_shader_cache)
1065 ctx->Cache = pipe->screen->get_disk_shader_cache(pipe->screen);
1066
1067 /* XXX: need a capability bit in gallium to query if the pipe
1068 * driver prefers DP4 or MUL/MAD for vertex transformation.
1069 */
1070 if (debug_get_option_mesa_mvp_dp4())
1071 ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = GL_TRUE;
1072
1073 st = st_create_context_priv(ctx, pipe, options, no_error);
1074 if (!st) {
1075 _mesa_free_context_data(ctx, true);
1076 align_free(ctx);
1077 }
1078
1079 return st;
1080 }
1081
1082
1083 /**
1084 * When we destroy a context, we must examine all texture objects to
1085 * find/release any sampler views created by that context.
1086 *
1087 * This callback is called per-texture object. It releases all the
1088 * texture's sampler views which belong to the context.
1089 */
1090 static void
destroy_tex_sampler_cb(void * data,void * userData)1091 destroy_tex_sampler_cb(void *data, void *userData)
1092 {
1093 struct gl_texture_object *texObj = (struct gl_texture_object *) data;
1094 struct st_context *st = (struct st_context *) userData;
1095
1096 st_texture_release_context_sampler_view(st, st_texture_object(texObj));
1097 }
1098
1099 static void
destroy_framebuffer_attachment_sampler_cb(void * data,void * userData)1100 destroy_framebuffer_attachment_sampler_cb(void *data, void *userData)
1101 {
1102 struct gl_framebuffer* glfb = (struct gl_framebuffer*) data;
1103 struct st_context *st = (struct st_context *) userData;
1104
1105 for (unsigned i = 0; i < BUFFER_COUNT; i++) {
1106 struct gl_renderbuffer_attachment *att = &glfb->Attachment[i];
1107 if (att->Texture) {
1108 st_texture_release_context_sampler_view(st, st_texture_object(att->Texture));
1109 }
1110 }
1111 }
1112
1113 void
st_destroy_context(struct st_context * st)1114 st_destroy_context(struct st_context *st)
1115 {
1116 struct gl_context *ctx = st->ctx;
1117 struct st_framebuffer *stfb, *next;
1118 struct gl_framebuffer *save_drawbuffer;
1119 struct gl_framebuffer *save_readbuffer;
1120
1121 /* Save the current context and draw/read buffers*/
1122 GET_CURRENT_CONTEXT(save_ctx);
1123 if (save_ctx) {
1124 save_drawbuffer = save_ctx->WinSysDrawBuffer;
1125 save_readbuffer = save_ctx->WinSysReadBuffer;
1126 } else {
1127 save_drawbuffer = save_readbuffer = NULL;
1128 }
1129
1130 /*
1131 * We need to bind the context we're deleting so that
1132 * _mesa_reference_texobj_() uses this context when deleting textures.
1133 * Similarly for framebuffer objects, etc.
1134 */
1135 _mesa_make_current(ctx, NULL, NULL);
1136
1137 /* This must be called first so that glthread has a chance to finish */
1138 _mesa_glthread_destroy(ctx);
1139
1140 _mesa_HashWalk(ctx->Shared->TexObjects, destroy_tex_sampler_cb, st);
1141
1142 /* For the fallback textures, free any sampler views belonging to this
1143 * context.
1144 */
1145 for (unsigned i = 0; i < NUM_TEXTURE_TARGETS; i++) {
1146 struct st_texture_object *stObj =
1147 st_texture_object(ctx->Shared->FallbackTex[i]);
1148 if (stObj) {
1149 st_texture_release_context_sampler_view(st, stObj);
1150 }
1151 }
1152
1153 st_context_free_zombie_objects(st);
1154
1155 simple_mtx_destroy(&st->zombie_sampler_views.mutex);
1156 simple_mtx_destroy(&st->zombie_shaders.mutex);
1157
1158 st_release_program(st, &st->fp);
1159 st_release_program(st, &st->gp);
1160 st_release_program(st, &st->vp);
1161 st_release_program(st, &st->tcp);
1162 st_release_program(st, &st->tep);
1163 st_release_program(st, &st->cp);
1164
1165 /* release framebuffer in the winsys buffers list */
1166 LIST_FOR_EACH_ENTRY_SAFE_REV(stfb, next, &st->winsys_buffers, head) {
1167 st_framebuffer_reference(&stfb, NULL);
1168 }
1169
1170 _mesa_HashWalk(ctx->Shared->FrameBuffers, destroy_framebuffer_attachment_sampler_cb, st);
1171
1172 pipe_sampler_view_reference(&st->pixel_xfer.pixelmap_sampler_view, NULL);
1173 pipe_resource_reference(&st->pixel_xfer.pixelmap_texture, NULL);
1174
1175 _vbo_DestroyContext(ctx);
1176
1177 st_destroy_program_variants(st);
1178
1179 /* Do not release debug_output yet because it might be in use by other threads.
1180 * These threads will be terminated by _mesa_free_context_data and
1181 * st_destroy_context_priv.
1182 */
1183 _mesa_free_context_data(ctx, false);
1184
1185 /* This will free the st_context too, so 'st' must not be accessed
1186 * afterwards. */
1187 st_destroy_context_priv(st, true);
1188 st = NULL;
1189
1190 _mesa_destroy_debug_output(ctx);
1191
1192 align_free(ctx);
1193
1194 if (save_ctx == ctx) {
1195 /* unbind the context we just deleted */
1196 _mesa_make_current(NULL, NULL, NULL);
1197 } else {
1198 /* Restore the current context and draw/read buffers (may be NULL) */
1199 _mesa_make_current(save_ctx, save_drawbuffer, save_readbuffer);
1200 }
1201 }
1202
1203 const struct nir_shader_compiler_options *
st_get_nir_compiler_options(struct st_context * st,gl_shader_stage stage)1204 st_get_nir_compiler_options(struct st_context *st, gl_shader_stage stage)
1205 {
1206 const struct nir_shader_compiler_options *options =
1207 st->ctx->Const.ShaderCompilerOptions[stage].NirOptions;
1208
1209 if (options) {
1210 return options;
1211 } else {
1212 return nir_to_tgsi_get_compiler_options(st->screen,
1213 PIPE_SHADER_IR_NIR,
1214 pipe_shader_type_from_mesa(stage));
1215 }
1216 }
1217