• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** \file glthread_marshal.h
25  *
26  * Declarations of functions related to marshalling GL calls from a client
27  * thread to a server thread.
28  */
29 
30 #ifndef MARSHAL_H
31 #define MARSHAL_H
32 
33 #include "main/glthread.h"
34 #include "main/context.h"
35 #include "main/macros.h"
36 #include "marshal_generated.h"
37 
38 struct marshal_cmd_base
39 {
40    /**
41     * Type of command.  See enum marshal_dispatch_cmd_id.
42     */
43    uint16_t cmd_id;
44 
45    /**
46     * Number of uint64_t elements used by the command.
47     */
48    uint16_t cmd_size;
49 };
50 
51 typedef uint32_t (*_mesa_unmarshal_func)(struct gl_context *ctx, const void *cmd, const uint64_t *last);
52 extern const _mesa_unmarshal_func _mesa_unmarshal_dispatch[NUM_DISPATCH_CMD];
53 
54 static inline void *
_mesa_glthread_allocate_command(struct gl_context * ctx,uint16_t cmd_id,unsigned size)55 _mesa_glthread_allocate_command(struct gl_context *ctx,
56                                 uint16_t cmd_id,
57                                 unsigned size)
58 {
59    struct glthread_state *glthread = &ctx->GLThread;
60    const unsigned num_elements = align(size, 8) / 8;
61 
62    assert (num_elements <= MARSHAL_MAX_CMD_SIZE / 8);
63 
64    if (unlikely(glthread->used + num_elements > MARSHAL_MAX_CMD_SIZE / 8))
65       _mesa_glthread_flush_batch(ctx);
66 
67    struct glthread_batch *next = glthread->next_batch;
68    struct marshal_cmd_base *cmd_base =
69       (struct marshal_cmd_base *)&next->buffer[glthread->used];
70    glthread->used += num_elements;
71    cmd_base->cmd_id = cmd_id;
72    cmd_base->cmd_size = num_elements;
73    return cmd_base;
74 }
75 
76 static inline bool
_mesa_glthread_has_no_pack_buffer(const struct gl_context * ctx)77 _mesa_glthread_has_no_pack_buffer(const struct gl_context *ctx)
78 {
79    return ctx->GLThread.CurrentPixelPackBufferName == 0;
80 }
81 
82 static inline bool
_mesa_glthread_has_no_unpack_buffer(const struct gl_context * ctx)83 _mesa_glthread_has_no_unpack_buffer(const struct gl_context *ctx)
84 {
85    return ctx->GLThread.CurrentPixelUnpackBufferName == 0;
86 }
87 
88 /**
89  * Instead of conditionally handling marshaling immediate index data in draw
90  * calls (deprecated and removed in GL core), we just disable threading.
91  */
92 static inline bool
_mesa_glthread_has_non_vbo_vertices_or_indices(const struct gl_context * ctx)93 _mesa_glthread_has_non_vbo_vertices_or_indices(const struct gl_context *ctx)
94 {
95    const struct glthread_state *glthread = &ctx->GLThread;
96    struct glthread_vao *vao = glthread->CurrentVAO;
97 
98    return ctx->API != API_OPENGL_CORE &&
99           (vao->CurrentElementBufferName == 0 ||
100            (vao->UserPointerMask & vao->BufferEnabled));
101 }
102 
103 static inline bool
_mesa_glthread_has_non_vbo_vertices(const struct gl_context * ctx)104 _mesa_glthread_has_non_vbo_vertices(const struct gl_context *ctx)
105 {
106    const struct glthread_state *glthread = &ctx->GLThread;
107    const struct glthread_vao *vao = glthread->CurrentVAO;
108 
109    return ctx->API != API_OPENGL_CORE &&
110           (vao->UserPointerMask & vao->BufferEnabled);
111 }
112 
113 static inline bool
_mesa_glthread_has_non_vbo_vertices_or_indirect(const struct gl_context * ctx)114 _mesa_glthread_has_non_vbo_vertices_or_indirect(const struct gl_context *ctx)
115 {
116    const struct glthread_state *glthread = &ctx->GLThread;
117    const struct glthread_vao *vao = glthread->CurrentVAO;
118 
119    return ctx->API != API_OPENGL_CORE &&
120           (glthread->CurrentDrawIndirectBufferName == 0 ||
121            (vao->UserPointerMask & vao->BufferEnabled));
122 }
123 
124 static inline bool
_mesa_glthread_has_non_vbo_vertices_or_indices_or_indirect(const struct gl_context * ctx)125 _mesa_glthread_has_non_vbo_vertices_or_indices_or_indirect(const struct gl_context *ctx)
126 {
127    const struct glthread_state *glthread = &ctx->GLThread;
128    struct glthread_vao *vao = glthread->CurrentVAO;
129 
130    return ctx->API != API_OPENGL_CORE &&
131           (glthread->CurrentDrawIndirectBufferName == 0 ||
132            vao->CurrentElementBufferName == 0 ||
133            (vao->UserPointerMask & vao->BufferEnabled));
134 }
135 
136 
137 bool
138 _mesa_create_marshal_tables(struct gl_context *ctx);
139 
140 static inline unsigned
_mesa_buffer_enum_to_count(GLenum buffer)141 _mesa_buffer_enum_to_count(GLenum buffer)
142 {
143    switch (buffer) {
144    case GL_COLOR:
145       return 4;
146    case GL_DEPTH_STENCIL:
147       return 2;
148    case GL_STENCIL:
149    case GL_DEPTH:
150       return 1;
151    default:
152       return 0;
153    }
154 }
155 
156 static inline unsigned
_mesa_tex_param_enum_to_count(GLenum pname)157 _mesa_tex_param_enum_to_count(GLenum pname)
158 {
159    switch (pname) {
160    case GL_TEXTURE_MIN_FILTER:
161    case GL_TEXTURE_MAG_FILTER:
162    case GL_TEXTURE_WRAP_S:
163    case GL_TEXTURE_WRAP_T:
164    case GL_TEXTURE_WRAP_R:
165    case GL_TEXTURE_BASE_LEVEL:
166    case GL_TEXTURE_MAX_LEVEL:
167    case GL_GENERATE_MIPMAP_SGIS:
168    case GL_TEXTURE_COMPARE_MODE_ARB:
169    case GL_TEXTURE_COMPARE_FUNC_ARB:
170    case GL_DEPTH_TEXTURE_MODE_ARB:
171    case GL_DEPTH_STENCIL_TEXTURE_MODE:
172    case GL_TEXTURE_SRGB_DECODE_EXT:
173    case GL_TEXTURE_REDUCTION_MODE_EXT:
174    case GL_TEXTURE_CUBE_MAP_SEAMLESS:
175    case GL_TEXTURE_SWIZZLE_R:
176    case GL_TEXTURE_SWIZZLE_G:
177    case GL_TEXTURE_SWIZZLE_B:
178    case GL_TEXTURE_SWIZZLE_A:
179    case GL_TEXTURE_MIN_LOD:
180    case GL_TEXTURE_MAX_LOD:
181    case GL_TEXTURE_PRIORITY:
182    case GL_TEXTURE_MAX_ANISOTROPY_EXT:
183    case GL_TEXTURE_LOD_BIAS:
184    case GL_TEXTURE_TILING_EXT:
185       return 1;
186    case GL_TEXTURE_CROP_RECT_OES:
187    case GL_TEXTURE_SWIZZLE_RGBA:
188    case GL_TEXTURE_BORDER_COLOR:
189       return 4;
190    default:
191       return 0;
192    }
193 }
194 
195 static inline unsigned
_mesa_fog_enum_to_count(GLenum pname)196 _mesa_fog_enum_to_count(GLenum pname)
197 {
198    switch (pname) {
199    case GL_FOG_MODE:
200    case GL_FOG_DENSITY:
201    case GL_FOG_START:
202    case GL_FOG_END:
203    case GL_FOG_INDEX:
204    case GL_FOG_COORDINATE_SOURCE_EXT:
205    case GL_FOG_DISTANCE_MODE_NV:
206       return 1;
207    case GL_FOG_COLOR:
208       return 4;
209    default:
210       return 0;
211    }
212 }
213 
214 static inline unsigned
_mesa_light_enum_to_count(GLenum pname)215 _mesa_light_enum_to_count(GLenum pname)
216 {
217    switch (pname) {
218    case GL_AMBIENT:
219    case GL_DIFFUSE:
220    case GL_SPECULAR:
221    case GL_POSITION:
222       return 4;
223    case GL_SPOT_DIRECTION:
224       return 3;
225    case GL_SPOT_EXPONENT:
226    case GL_SPOT_CUTOFF:
227    case GL_CONSTANT_ATTENUATION:
228    case GL_LINEAR_ATTENUATION:
229    case GL_QUADRATIC_ATTENUATION:
230       return 1;
231    default:
232       return 0;
233    }
234 }
235 
236 static inline unsigned
_mesa_light_model_enum_to_count(GLenum pname)237 _mesa_light_model_enum_to_count(GLenum pname)
238 {
239    switch (pname) {
240    case GL_LIGHT_MODEL_AMBIENT:
241       return 4;
242    case GL_LIGHT_MODEL_LOCAL_VIEWER:
243    case GL_LIGHT_MODEL_TWO_SIDE:
244    case GL_LIGHT_MODEL_COLOR_CONTROL:
245       return 1;
246    default:
247       return 0;
248    }
249 }
250 
251 static inline unsigned
_mesa_texenv_enum_to_count(GLenum pname)252 _mesa_texenv_enum_to_count(GLenum pname)
253 {
254    switch (pname) {
255    case GL_TEXTURE_ENV_MODE:
256    case GL_COMBINE_RGB:
257    case GL_COMBINE_ALPHA:
258    case GL_SOURCE0_RGB:
259    case GL_SOURCE1_RGB:
260    case GL_SOURCE2_RGB:
261    case GL_SOURCE3_RGB_NV:
262    case GL_SOURCE0_ALPHA:
263    case GL_SOURCE1_ALPHA:
264    case GL_SOURCE2_ALPHA:
265    case GL_SOURCE3_ALPHA_NV:
266    case GL_OPERAND0_RGB:
267    case GL_OPERAND1_RGB:
268    case GL_OPERAND2_RGB:
269    case GL_OPERAND3_RGB_NV:
270    case GL_OPERAND0_ALPHA:
271    case GL_OPERAND1_ALPHA:
272    case GL_OPERAND2_ALPHA:
273    case GL_OPERAND3_ALPHA_NV:
274    case GL_RGB_SCALE:
275    case GL_ALPHA_SCALE:
276    case GL_TEXTURE_LOD_BIAS_EXT:
277    case GL_COORD_REPLACE:
278       return 1;
279    case GL_TEXTURE_ENV_COLOR:
280       return 4;
281    default:
282       return 0;
283    }
284 }
285 
286 static inline unsigned
_mesa_texgen_enum_to_count(GLenum pname)287 _mesa_texgen_enum_to_count(GLenum pname)
288 {
289    switch (pname) {
290    case GL_TEXTURE_GEN_MODE:
291       return 1;
292    case GL_OBJECT_PLANE:
293    case GL_EYE_PLANE:
294       return 4;
295    default:
296       return 0;
297    }
298 }
299 
300 static inline unsigned
_mesa_material_enum_to_count(GLenum pname)301 _mesa_material_enum_to_count(GLenum pname)
302 {
303    switch (pname) {
304    case GL_EMISSION:
305    case GL_AMBIENT:
306    case GL_DIFFUSE:
307    case GL_SPECULAR:
308    case GL_AMBIENT_AND_DIFFUSE:
309       return 4;
310    case GL_COLOR_INDEXES:
311       return 3;
312    case GL_SHININESS:
313       return 1;
314    default:
315       return 0;
316    }
317 }
318 
319 static inline unsigned
_mesa_point_param_enum_to_count(GLenum pname)320 _mesa_point_param_enum_to_count(GLenum pname)
321 {
322    switch (pname) {
323    case GL_DISTANCE_ATTENUATION_EXT:
324       return 3;
325    case GL_POINT_SIZE_MIN_EXT:
326    case GL_POINT_SIZE_MAX_EXT:
327    case GL_POINT_FADE_THRESHOLD_SIZE_EXT:
328    case GL_POINT_SPRITE_COORD_ORIGIN:
329       return 1;
330    default:
331       return 0;
332    }
333 }
334 
335 static inline unsigned
_mesa_calllists_enum_to_count(GLenum type)336 _mesa_calllists_enum_to_count(GLenum type)
337 {
338    switch (type) {
339    case GL_BYTE:
340    case GL_UNSIGNED_BYTE:
341       return 1;
342    case GL_SHORT:
343    case GL_UNSIGNED_SHORT:
344    case GL_2_BYTES:
345       return 2;
346    case GL_3_BYTES:
347       return 3;
348    case GL_INT:
349    case GL_UNSIGNED_INT:
350    case GL_FLOAT:
351    case GL_4_BYTES:
352       return 4;
353    default:
354       return 0;
355    }
356 }
357 
358 static inline unsigned
_mesa_patch_param_enum_to_count(GLenum pname)359 _mesa_patch_param_enum_to_count(GLenum pname)
360 {
361    switch (pname) {
362    case GL_PATCH_DEFAULT_OUTER_LEVEL:
363       return 4;
364    case GL_PATCH_DEFAULT_INNER_LEVEL:
365       return 2;
366    default:
367       return 0;
368    }
369 }
370 
371 static inline unsigned
_mesa_memobj_enum_to_count(GLenum pname)372 _mesa_memobj_enum_to_count(GLenum pname)
373 {
374    switch (pname) {
375    case GL_DEDICATED_MEMORY_OBJECT_EXT:
376       return 1;
377    default:
378       return 0;
379    }
380 }
381 
382 static inline unsigned
_mesa_semaphore_enum_to_count(GLenum pname)383 _mesa_semaphore_enum_to_count(GLenum pname)
384 {
385    switch (pname) {
386    /* EXT_semaphore and EXT_semaphore_fd define no parameters */
387    default:
388       return 0;
389    }
390 }
391 
392 static inline gl_vert_attrib
_mesa_array_to_attrib(struct gl_context * ctx,GLenum array)393 _mesa_array_to_attrib(struct gl_context *ctx, GLenum array)
394 {
395    switch (array) {
396    case GL_VERTEX_ARRAY:
397       return VERT_ATTRIB_POS;
398    case GL_NORMAL_ARRAY:
399       return VERT_ATTRIB_NORMAL;
400    case GL_COLOR_ARRAY:
401       return VERT_ATTRIB_COLOR0;
402    case GL_INDEX_ARRAY:
403       return VERT_ATTRIB_COLOR_INDEX;
404    case GL_TEXTURE_COORD_ARRAY:
405       return VERT_ATTRIB_TEX(ctx->GLThread.ClientActiveTexture);
406    case GL_EDGE_FLAG_ARRAY:
407       return VERT_ATTRIB_EDGEFLAG;
408    case GL_FOG_COORDINATE_ARRAY:
409       return VERT_ATTRIB_FOG;
410    case GL_SECONDARY_COLOR_ARRAY:
411       return VERT_ATTRIB_COLOR1;
412    case GL_POINT_SIZE_ARRAY_OES:
413       return VERT_ATTRIB_POINT_SIZE;
414    case GL_PRIMITIVE_RESTART_NV:
415       return VERT_ATTRIB_PRIMITIVE_RESTART_NV;
416    default:
417       if (array >= GL_TEXTURE0 && array <= GL_TEXTURE7)
418          return VERT_ATTRIB_TEX(array - GL_TEXTURE0);
419       return VERT_ATTRIB_MAX;
420    }
421 }
422 
423 static inline gl_matrix_index
_mesa_get_matrix_index(struct gl_context * ctx,GLenum mode)424 _mesa_get_matrix_index(struct gl_context *ctx, GLenum mode)
425 {
426    if (mode == GL_MODELVIEW || mode == GL_PROJECTION)
427       return M_MODELVIEW + (mode - GL_MODELVIEW);
428 
429    if (mode == GL_TEXTURE)
430       return M_TEXTURE0 + ctx->GLThread.ActiveTexture;
431 
432    if (mode >= GL_TEXTURE0 && mode <= GL_TEXTURE0 + MAX_TEXTURE_UNITS - 1)
433       return M_TEXTURE0 + (mode - GL_TEXTURE0);
434 
435    if (mode >= GL_MATRIX0_ARB && mode <= GL_MATRIX0_ARB + MAX_PROGRAM_MATRICES - 1)
436       return M_PROGRAM0 + (mode - GL_MATRIX0_ARB);
437 
438    return M_DUMMY;
439 }
440 
441 static inline void
_mesa_glthread_Enable(struct gl_context * ctx,GLenum cap)442 _mesa_glthread_Enable(struct gl_context *ctx, GLenum cap)
443 {
444    if (ctx->GLThread.ListMode == GL_COMPILE)
445       return;
446 
447    switch (cap) {
448    case GL_PRIMITIVE_RESTART:
449    case GL_PRIMITIVE_RESTART_FIXED_INDEX:
450       _mesa_glthread_set_prim_restart(ctx, cap, true);
451       break;
452    case GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB:
453       _mesa_glthread_destroy(ctx, "Enable(DEBUG_OUTPUT_SYNCHRONOUS)");
454       break;
455    case GL_DEPTH_TEST:
456       ctx->GLThread.DepthTest = true;
457       break;
458    case GL_CULL_FACE:
459       ctx->GLThread.CullFace = true;
460       break;
461    }
462 }
463 
464 static inline void
_mesa_glthread_Disable(struct gl_context * ctx,GLenum cap)465 _mesa_glthread_Disable(struct gl_context *ctx, GLenum cap)
466 {
467    if (ctx->GLThread.ListMode == GL_COMPILE)
468       return;
469 
470    switch (cap) {
471    case GL_PRIMITIVE_RESTART:
472    case GL_PRIMITIVE_RESTART_FIXED_INDEX:
473       _mesa_glthread_set_prim_restart(ctx, cap, false);
474       break;
475    case GL_CULL_FACE:
476       ctx->GLThread.CullFace = false;
477       break;
478    case GL_DEPTH_TEST:
479       ctx->GLThread.DepthTest = false;
480       break;
481    }
482 }
483 
484 static inline int
_mesa_glthread_IsEnabled(struct gl_context * ctx,GLenum cap)485 _mesa_glthread_IsEnabled(struct gl_context *ctx, GLenum cap)
486 {
487    switch (cap) {
488    case GL_CULL_FACE:
489       return ctx->GLThread.CullFace;
490    case GL_DEPTH_TEST:
491       return ctx->GLThread.DepthTest;
492    case GL_VERTEX_ARRAY:
493       return !!(ctx->GLThread.CurrentVAO->UserEnabled & VERT_BIT_POS);
494    case GL_NORMAL_ARRAY:
495       return !!(ctx->GLThread.CurrentVAO->UserEnabled & VERT_BIT_NORMAL);
496    case GL_COLOR_ARRAY:
497       return !!(ctx->GLThread.CurrentVAO->UserEnabled & VERT_BIT_COLOR0);
498    case GL_TEXTURE_COORD_ARRAY:
499       return !!(ctx->GLThread.CurrentVAO->UserEnabled &
500                 (1 << VERT_ATTRIB_TEX(ctx->GLThread.ClientActiveTexture)));
501    default:
502       return -1; /* sync and call _mesa_IsEnabled. */
503    }
504 }
505 
506 static inline void
_mesa_glthread_PushAttrib(struct gl_context * ctx,GLbitfield mask)507 _mesa_glthread_PushAttrib(struct gl_context *ctx, GLbitfield mask)
508 {
509    if (ctx->GLThread.ListMode == GL_COMPILE)
510       return;
511 
512    if (ctx->GLThread.AttribStackDepth >= MAX_ATTRIB_STACK_DEPTH)
513       return;
514 
515    struct glthread_attrib_node *attr =
516       &ctx->GLThread.AttribStack[ctx->GLThread.AttribStackDepth++];
517 
518    attr->Mask = mask;
519 
520    if (mask & (GL_POLYGON_BIT | GL_ENABLE_BIT))
521       attr->CullFace = ctx->GLThread.CullFace;
522 
523    if (mask & (GL_DEPTH_BUFFER_BIT | GL_ENABLE_BIT))
524       attr->DepthTest = ctx->GLThread.DepthTest;
525 
526    if (mask & GL_TEXTURE_BIT)
527       attr->ActiveTexture = ctx->GLThread.ActiveTexture;
528 
529    if (mask & GL_TRANSFORM_BIT)
530       attr->MatrixMode = ctx->GLThread.MatrixMode;
531 }
532 
533 static inline void
_mesa_glthread_PopAttrib(struct gl_context * ctx)534 _mesa_glthread_PopAttrib(struct gl_context *ctx)
535 {
536    if (ctx->GLThread.ListMode == GL_COMPILE)
537       return;
538 
539    if (ctx->GLThread.AttribStackDepth == 0)
540       return;
541 
542    struct glthread_attrib_node *attr =
543       &ctx->GLThread.AttribStack[--ctx->GLThread.AttribStackDepth];
544    unsigned mask = attr->Mask;
545 
546    if (mask & (GL_POLYGON_BIT | GL_ENABLE_BIT))
547       ctx->GLThread.CullFace = attr->CullFace;
548 
549    if (mask & (GL_DEPTH_BUFFER_BIT | GL_ENABLE_BIT))
550       ctx->GLThread.DepthTest = attr->DepthTest;
551 
552    if (mask & GL_TEXTURE_BIT)
553       ctx->GLThread.ActiveTexture = attr->ActiveTexture;
554 
555    if (mask & GL_TRANSFORM_BIT) {
556       ctx->GLThread.MatrixMode = attr->MatrixMode;
557       ctx->GLThread.MatrixIndex = _mesa_get_matrix_index(ctx, attr->MatrixMode);
558    }
559 }
560 
561 static bool
is_matrix_stack_full(struct gl_context * ctx,gl_matrix_index idx)562 is_matrix_stack_full(struct gl_context *ctx, gl_matrix_index idx)
563 {
564    int max_stack_depth = 0;
565    if (M_MODELVIEW == ctx->GLThread.MatrixIndex) {
566       max_stack_depth = MAX_MODELVIEW_STACK_DEPTH;
567    } else if (M_PROJECTION == ctx->GLThread.MatrixIndex) {
568       max_stack_depth = MAX_PROJECTION_STACK_DEPTH;
569    } else if (M_PROGRAM_LAST >= ctx->GLThread.MatrixIndex) {
570       max_stack_depth = MAX_PROGRAM_MATRIX_STACK_DEPTH;
571    } else if (M_TEXTURE_LAST >= ctx->GLThread.MatrixIndex) {
572       max_stack_depth = MAX_TEXTURE_STACK_DEPTH;
573    }
574    assert(max_stack_depth);
575 
576    if (ctx->GLThread.MatrixStackDepth[idx] + 1 >= max_stack_depth)
577       return true;
578 
579    return false;
580 }
581 
582 static inline void
_mesa_glthread_MatrixPushEXT(struct gl_context * ctx,GLenum matrixMode)583 _mesa_glthread_MatrixPushEXT(struct gl_context *ctx, GLenum matrixMode)
584 {
585    if (ctx->GLThread.ListMode == GL_COMPILE)
586       return;
587 
588    if (is_matrix_stack_full(ctx, _mesa_get_matrix_index(ctx, matrixMode)))
589       return;
590 
591    ctx->GLThread.MatrixStackDepth[_mesa_get_matrix_index(ctx, matrixMode)]++;
592 }
593 
594 static inline void
_mesa_glthread_MatrixPopEXT(struct gl_context * ctx,GLenum matrixMode)595 _mesa_glthread_MatrixPopEXT(struct gl_context *ctx, GLenum matrixMode)
596 {
597    if (ctx->GLThread.ListMode == GL_COMPILE)
598       return;
599 
600    if (ctx->GLThread.MatrixStackDepth[_mesa_get_matrix_index(ctx, matrixMode)] == 0)
601       return;
602 
603    ctx->GLThread.MatrixStackDepth[_mesa_get_matrix_index(ctx, matrixMode)]--;
604 }
605 
606 static inline void
_mesa_glthread_ActiveTexture(struct gl_context * ctx,GLenum texture)607 _mesa_glthread_ActiveTexture(struct gl_context *ctx, GLenum texture)
608 {
609    if (ctx->GLThread.ListMode == GL_COMPILE)
610       return;
611 
612    ctx->GLThread.ActiveTexture = texture - GL_TEXTURE0;
613    if (ctx->GLThread.MatrixMode == GL_TEXTURE)
614       ctx->GLThread.MatrixIndex = _mesa_get_matrix_index(ctx, texture);
615 }
616 
617 static inline void
_mesa_glthread_PushMatrix(struct gl_context * ctx)618 _mesa_glthread_PushMatrix(struct gl_context *ctx)
619 {
620    if (ctx->GLThread.ListMode == GL_COMPILE)
621       return;
622 
623    if (is_matrix_stack_full(ctx, ctx->GLThread.MatrixIndex))
624       return;
625 
626    ctx->GLThread.MatrixStackDepth[ctx->GLThread.MatrixIndex]++;
627 }
628 
629 static inline void
_mesa_glthread_PopMatrix(struct gl_context * ctx)630 _mesa_glthread_PopMatrix(struct gl_context *ctx)
631 {
632    if (ctx->GLThread.ListMode == GL_COMPILE)
633       return;
634 
635    if (ctx->GLThread.MatrixStackDepth[ctx->GLThread.MatrixIndex] == 0)
636       return;
637 
638    ctx->GLThread.MatrixStackDepth[ctx->GLThread.MatrixIndex]--;
639 }
640 
641 static inline void
_mesa_glthread_MatrixMode(struct gl_context * ctx,GLenum mode)642 _mesa_glthread_MatrixMode(struct gl_context *ctx, GLenum mode)
643 {
644    if (ctx->GLThread.ListMode == GL_COMPILE)
645       return;
646 
647    ctx->GLThread.MatrixIndex = _mesa_get_matrix_index(ctx, mode);
648    ctx->GLThread.MatrixMode = mode;
649 }
650 
651 static inline void
_mesa_glthread_ListBase(struct gl_context * ctx,GLuint base)652 _mesa_glthread_ListBase(struct gl_context *ctx, GLuint base)
653 {
654    if (ctx->GLThread.ListMode == GL_COMPILE)
655       return;
656 
657    ctx->GLThread.ListBase = base;
658 }
659 
660 static inline void
_mesa_glthread_CallList(struct gl_context * ctx,GLuint list)661 _mesa_glthread_CallList(struct gl_context *ctx, GLuint list)
662 {
663    if (ctx->GLThread.ListMode == GL_COMPILE)
664       return;
665 
666    /* Wait for all glEndList and glDeleteLists calls to finish to ensure that
667     * all display lists are up to date and the driver thread is not
668     * modifiying them. We will be executing them in the application thread.
669     */
670    int batch = p_atomic_read(&ctx->GLThread.LastDListChangeBatchIndex);
671    if (batch != -1) {
672       util_queue_fence_wait(&ctx->GLThread.batches[batch].fence);
673       p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, -1);
674    }
675 
676    if (!ctx->Shared->DisplayListsAffectGLThread)
677       return;
678 
679    /* Clear GL_COMPILE_AND_EXECUTE if needed. We only execute here. */
680    unsigned saved_mode = ctx->GLThread.ListMode;
681    ctx->GLThread.ListMode = 0;
682 
683    _mesa_glthread_execute_list(ctx, list);
684 
685    ctx->GLThread.ListMode = saved_mode;
686 }
687 
688 static inline void
_mesa_glthread_CallLists(struct gl_context * ctx,GLsizei n,GLenum type,const GLvoid * lists)689 _mesa_glthread_CallLists(struct gl_context *ctx, GLsizei n, GLenum type,
690                          const GLvoid *lists)
691 {
692    if (ctx->GLThread.ListMode == GL_COMPILE)
693       return;
694 
695    if (n <= 0 || !lists)
696       return;
697 
698    /* Wait for all glEndList and glDeleteLists calls to finish to ensure that
699     * all display lists are up to date and the driver thread is not
700     * modifiying them. We will be executing them in the application thread.
701     */
702    int batch = p_atomic_read(&ctx->GLThread.LastDListChangeBatchIndex);
703    if (batch != -1) {
704       util_queue_fence_wait(&ctx->GLThread.batches[batch].fence);
705       p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, -1);
706    }
707 
708    /* Clear GL_COMPILE_AND_EXECUTE if needed. We only execute here. */
709    unsigned saved_mode = ctx->GLThread.ListMode;
710    ctx->GLThread.ListMode = 0;
711 
712    unsigned base = ctx->GLThread.ListBase;
713 
714    GLbyte *bptr;
715    GLubyte *ubptr;
716    GLshort *sptr;
717    GLushort *usptr;
718    GLint *iptr;
719    GLuint *uiptr;
720    GLfloat *fptr;
721 
722    switch (type) {
723    case GL_BYTE:
724       bptr = (GLbyte *) lists;
725       for (unsigned i = 0; i < n; i++)
726          _mesa_glthread_CallList(ctx, base + bptr[i]);
727       break;
728    case GL_UNSIGNED_BYTE:
729       ubptr = (GLubyte *) lists;
730       for (unsigned i = 0; i < n; i++)
731          _mesa_glthread_CallList(ctx, base + ubptr[i]);
732       break;
733    case GL_SHORT:
734       sptr = (GLshort *) lists;
735       for (unsigned i = 0; i < n; i++)
736          _mesa_glthread_CallList(ctx, base + sptr[i]);
737       break;
738    case GL_UNSIGNED_SHORT:
739       usptr = (GLushort *) lists;
740       for (unsigned i = 0; i < n; i++)
741          _mesa_glthread_CallList(ctx, base + usptr[i]);
742       break;
743    case GL_INT:
744       iptr = (GLint *) lists;
745       for (unsigned i = 0; i < n; i++)
746          _mesa_glthread_CallList(ctx, base + iptr[i]);
747       break;
748    case GL_UNSIGNED_INT:
749       uiptr = (GLuint *) lists;
750       for (unsigned i = 0; i < n; i++)
751          _mesa_glthread_CallList(ctx, base + uiptr[i]);
752       break;
753    case GL_FLOAT:
754       fptr = (GLfloat *) lists;
755       for (unsigned i = 0; i < n; i++)
756          _mesa_glthread_CallList(ctx, base + fptr[i]);
757       break;
758    case GL_2_BYTES:
759       ubptr = (GLubyte *) lists;
760       for (unsigned i = 0; i < n; i++) {
761          _mesa_glthread_CallList(ctx, base +
762                                  (GLint)ubptr[2 * i] * 256 +
763                                  (GLint)ubptr[2 * i + 1]);
764       }
765       break;
766    case GL_3_BYTES:
767       ubptr = (GLubyte *) lists;
768       for (unsigned i = 0; i < n; i++) {
769          _mesa_glthread_CallList(ctx, base +
770                                  (GLint)ubptr[3 * i] * 65536 +
771                                  (GLint)ubptr[3 * i + 1] * 256 +
772                                  (GLint)ubptr[3 * i + 2]);
773       }
774       break;
775    case GL_4_BYTES:
776       ubptr = (GLubyte *) lists;
777       for (unsigned i = 0; i < n; i++) {
778          _mesa_glthread_CallList(ctx, base +
779                                  (GLint)ubptr[4 * i] * 16777216 +
780                                  (GLint)ubptr[4 * i + 1] * 65536 +
781                                  (GLint)ubptr[4 * i + 2] * 256 +
782                                  (GLint)ubptr[4 * i + 3]);
783       }
784       break;
785    }
786 
787    ctx->GLThread.ListMode = saved_mode;
788 }
789 
790 static inline void
_mesa_glthread_NewList(struct gl_context * ctx,GLuint list,GLuint mode)791 _mesa_glthread_NewList(struct gl_context *ctx, GLuint list, GLuint mode)
792 {
793    if (!ctx->GLThread.ListMode)
794       ctx->GLThread.ListMode = mode;
795 }
796 
797 static inline void
_mesa_glthread_EndList(struct gl_context * ctx)798 _mesa_glthread_EndList(struct gl_context *ctx)
799 {
800    if (!ctx->GLThread.ListMode)
801       return;
802 
803    ctx->GLThread.ListMode = 0;
804 
805    /* Track the last display list change. */
806    p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, ctx->GLThread.next);
807    _mesa_glthread_flush_batch(ctx);
808 }
809 
810 static inline void
_mesa_glthread_DeleteLists(struct gl_context * ctx,GLsizei range)811 _mesa_glthread_DeleteLists(struct gl_context *ctx, GLsizei range)
812 {
813    if (range < 0)
814       return;
815 
816    /* Track the last display list change. */
817    p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, ctx->GLThread.next);
818    _mesa_glthread_flush_batch(ctx);
819 }
820 
821 static inline void
_mesa_glthread_DeleteFramebuffers(struct gl_context * ctx,GLsizei n,const GLuint * ids)822 _mesa_glthread_DeleteFramebuffers(struct gl_context *ctx, GLsizei n,
823                                   const GLuint *ids)
824 {
825    if (ctx->GLThread.CurrentDrawFramebuffer) {
826       for (int i = 0; i < n; i++) {
827          if (ctx->GLThread.CurrentDrawFramebuffer == ids[i]) {
828             ctx->GLThread.CurrentDrawFramebuffer = 0;
829             break;
830          }
831       }
832    }
833 }
834 
835 struct marshal_cmd_CallList
836 {
837    struct marshal_cmd_base cmd_base;
838    GLuint list;
839 };
840 
841 #endif /* MARSHAL_H */
842