1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** \file glthread_marshal.h
25 *
26 * Declarations of functions related to marshalling GL calls from a client
27 * thread to a server thread.
28 */
29
30 #ifndef MARSHAL_H
31 #define MARSHAL_H
32
33 #include "main/glthread.h"
34 #include "main/context.h"
35 #include "main/macros.h"
36 #include "marshal_generated.h"
37
38 struct marshal_cmd_base
39 {
40 /**
41 * Type of command. See enum marshal_dispatch_cmd_id.
42 */
43 uint16_t cmd_id;
44
45 /**
46 * Number of uint64_t elements used by the command.
47 */
48 uint16_t cmd_size;
49 };
50
51 typedef uint32_t (*_mesa_unmarshal_func)(struct gl_context *ctx, const void *cmd, const uint64_t *last);
52 extern const _mesa_unmarshal_func _mesa_unmarshal_dispatch[NUM_DISPATCH_CMD];
53
54 static inline void *
_mesa_glthread_allocate_command(struct gl_context * ctx,uint16_t cmd_id,unsigned size)55 _mesa_glthread_allocate_command(struct gl_context *ctx,
56 uint16_t cmd_id,
57 unsigned size)
58 {
59 struct glthread_state *glthread = &ctx->GLThread;
60 const unsigned num_elements = align(size, 8) / 8;
61
62 if (unlikely(glthread->used + num_elements > MARSHAL_MAX_CMD_SIZE / 8))
63 _mesa_glthread_flush_batch(ctx);
64
65 struct glthread_batch *next = glthread->next_batch;
66 struct marshal_cmd_base *cmd_base =
67 (struct marshal_cmd_base *)&next->buffer[glthread->used];
68 glthread->used += num_elements;
69 cmd_base->cmd_id = cmd_id;
70 cmd_base->cmd_size = num_elements;
71 return cmd_base;
72 }
73
74 static inline bool
_mesa_glthread_has_no_pack_buffer(const struct gl_context * ctx)75 _mesa_glthread_has_no_pack_buffer(const struct gl_context *ctx)
76 {
77 return ctx->GLThread.CurrentPixelPackBufferName == 0;
78 }
79
80 static inline bool
_mesa_glthread_has_no_unpack_buffer(const struct gl_context * ctx)81 _mesa_glthread_has_no_unpack_buffer(const struct gl_context *ctx)
82 {
83 return ctx->GLThread.CurrentPixelUnpackBufferName == 0;
84 }
85
86 /**
87 * Instead of conditionally handling marshaling immediate index data in draw
88 * calls (deprecated and removed in GL core), we just disable threading.
89 */
90 static inline bool
_mesa_glthread_has_non_vbo_vertices_or_indices(const struct gl_context * ctx)91 _mesa_glthread_has_non_vbo_vertices_or_indices(const struct gl_context *ctx)
92 {
93 const struct glthread_state *glthread = &ctx->GLThread;
94 struct glthread_vao *vao = glthread->CurrentVAO;
95
96 return ctx->API != API_OPENGL_CORE &&
97 (vao->CurrentElementBufferName == 0 ||
98 (vao->UserPointerMask & vao->BufferEnabled));
99 }
100
101 static inline bool
_mesa_glthread_has_non_vbo_vertices(const struct gl_context * ctx)102 _mesa_glthread_has_non_vbo_vertices(const struct gl_context *ctx)
103 {
104 const struct glthread_state *glthread = &ctx->GLThread;
105 const struct glthread_vao *vao = glthread->CurrentVAO;
106
107 return ctx->API != API_OPENGL_CORE &&
108 (vao->UserPointerMask & vao->BufferEnabled);
109 }
110
111 static inline bool
_mesa_glthread_has_non_vbo_vertices_or_indirect(const struct gl_context * ctx)112 _mesa_glthread_has_non_vbo_vertices_or_indirect(const struct gl_context *ctx)
113 {
114 const struct glthread_state *glthread = &ctx->GLThread;
115 const struct glthread_vao *vao = glthread->CurrentVAO;
116
117 return ctx->API != API_OPENGL_CORE &&
118 (glthread->CurrentDrawIndirectBufferName == 0 ||
119 (vao->UserPointerMask & vao->BufferEnabled));
120 }
121
122 static inline bool
_mesa_glthread_has_non_vbo_vertices_or_indices_or_indirect(const struct gl_context * ctx)123 _mesa_glthread_has_non_vbo_vertices_or_indices_or_indirect(const struct gl_context *ctx)
124 {
125 const struct glthread_state *glthread = &ctx->GLThread;
126 struct glthread_vao *vao = glthread->CurrentVAO;
127
128 return ctx->API != API_OPENGL_CORE &&
129 (glthread->CurrentDrawIndirectBufferName == 0 ||
130 vao->CurrentElementBufferName == 0 ||
131 (vao->UserPointerMask & vao->BufferEnabled));
132 }
133
134
135 struct _glapi_table *
136 _mesa_create_marshal_table(const struct gl_context *ctx);
137
138 static inline unsigned
_mesa_buffer_enum_to_count(GLenum buffer)139 _mesa_buffer_enum_to_count(GLenum buffer)
140 {
141 switch (buffer) {
142 case GL_COLOR:
143 return 4;
144 case GL_DEPTH_STENCIL:
145 return 2;
146 case GL_STENCIL:
147 case GL_DEPTH:
148 return 1;
149 default:
150 return 0;
151 }
152 }
153
154 static inline unsigned
_mesa_tex_param_enum_to_count(GLenum pname)155 _mesa_tex_param_enum_to_count(GLenum pname)
156 {
157 switch (pname) {
158 case GL_TEXTURE_MIN_FILTER:
159 case GL_TEXTURE_MAG_FILTER:
160 case GL_TEXTURE_WRAP_S:
161 case GL_TEXTURE_WRAP_T:
162 case GL_TEXTURE_WRAP_R:
163 case GL_TEXTURE_BASE_LEVEL:
164 case GL_TEXTURE_MAX_LEVEL:
165 case GL_GENERATE_MIPMAP_SGIS:
166 case GL_TEXTURE_COMPARE_MODE_ARB:
167 case GL_TEXTURE_COMPARE_FUNC_ARB:
168 case GL_DEPTH_TEXTURE_MODE_ARB:
169 case GL_DEPTH_STENCIL_TEXTURE_MODE:
170 case GL_TEXTURE_SRGB_DECODE_EXT:
171 case GL_TEXTURE_REDUCTION_MODE_EXT:
172 case GL_TEXTURE_CUBE_MAP_SEAMLESS:
173 case GL_TEXTURE_SWIZZLE_R:
174 case GL_TEXTURE_SWIZZLE_G:
175 case GL_TEXTURE_SWIZZLE_B:
176 case GL_TEXTURE_SWIZZLE_A:
177 case GL_TEXTURE_MIN_LOD:
178 case GL_TEXTURE_MAX_LOD:
179 case GL_TEXTURE_PRIORITY:
180 case GL_TEXTURE_MAX_ANISOTROPY_EXT:
181 case GL_TEXTURE_LOD_BIAS:
182 case GL_TEXTURE_TILING_EXT:
183 return 1;
184 case GL_TEXTURE_CROP_RECT_OES:
185 case GL_TEXTURE_SWIZZLE_RGBA:
186 case GL_TEXTURE_BORDER_COLOR:
187 return 4;
188 default:
189 return 0;
190 }
191 }
192
193 static inline unsigned
_mesa_fog_enum_to_count(GLenum pname)194 _mesa_fog_enum_to_count(GLenum pname)
195 {
196 switch (pname) {
197 case GL_FOG_MODE:
198 case GL_FOG_DENSITY:
199 case GL_FOG_START:
200 case GL_FOG_END:
201 case GL_FOG_INDEX:
202 case GL_FOG_COORDINATE_SOURCE_EXT:
203 case GL_FOG_DISTANCE_MODE_NV:
204 return 1;
205 case GL_FOG_COLOR:
206 return 4;
207 default:
208 return 0;
209 }
210 }
211
212 static inline unsigned
_mesa_light_enum_to_count(GLenum pname)213 _mesa_light_enum_to_count(GLenum pname)
214 {
215 switch (pname) {
216 case GL_AMBIENT:
217 case GL_DIFFUSE:
218 case GL_SPECULAR:
219 case GL_POSITION:
220 return 4;
221 case GL_SPOT_DIRECTION:
222 return 3;
223 case GL_SPOT_EXPONENT:
224 case GL_SPOT_CUTOFF:
225 case GL_CONSTANT_ATTENUATION:
226 case GL_LINEAR_ATTENUATION:
227 case GL_QUADRATIC_ATTENUATION:
228 return 1;
229 default:
230 return 0;
231 }
232 }
233
234 static inline unsigned
_mesa_light_model_enum_to_count(GLenum pname)235 _mesa_light_model_enum_to_count(GLenum pname)
236 {
237 switch (pname) {
238 case GL_LIGHT_MODEL_AMBIENT:
239 return 4;
240 case GL_LIGHT_MODEL_LOCAL_VIEWER:
241 case GL_LIGHT_MODEL_TWO_SIDE:
242 case GL_LIGHT_MODEL_COLOR_CONTROL:
243 return 1;
244 default:
245 return 0;
246 }
247 }
248
249 static inline unsigned
_mesa_texenv_enum_to_count(GLenum pname)250 _mesa_texenv_enum_to_count(GLenum pname)
251 {
252 switch (pname) {
253 case GL_TEXTURE_ENV_MODE:
254 case GL_COMBINE_RGB:
255 case GL_COMBINE_ALPHA:
256 case GL_SOURCE0_RGB:
257 case GL_SOURCE1_RGB:
258 case GL_SOURCE2_RGB:
259 case GL_SOURCE3_RGB_NV:
260 case GL_SOURCE0_ALPHA:
261 case GL_SOURCE1_ALPHA:
262 case GL_SOURCE2_ALPHA:
263 case GL_SOURCE3_ALPHA_NV:
264 case GL_OPERAND0_RGB:
265 case GL_OPERAND1_RGB:
266 case GL_OPERAND2_RGB:
267 case GL_OPERAND3_RGB_NV:
268 case GL_OPERAND0_ALPHA:
269 case GL_OPERAND1_ALPHA:
270 case GL_OPERAND2_ALPHA:
271 case GL_OPERAND3_ALPHA_NV:
272 case GL_RGB_SCALE:
273 case GL_ALPHA_SCALE:
274 case GL_TEXTURE_LOD_BIAS_EXT:
275 case GL_COORD_REPLACE:
276 return 1;
277 case GL_TEXTURE_ENV_COLOR:
278 return 4;
279 default:
280 return 0;
281 }
282 }
283
284 static inline unsigned
_mesa_texgen_enum_to_count(GLenum pname)285 _mesa_texgen_enum_to_count(GLenum pname)
286 {
287 switch (pname) {
288 case GL_TEXTURE_GEN_MODE:
289 return 1;
290 case GL_OBJECT_PLANE:
291 case GL_EYE_PLANE:
292 return 4;
293 default:
294 return 0;
295 }
296 }
297
298 static inline unsigned
_mesa_material_enum_to_count(GLenum pname)299 _mesa_material_enum_to_count(GLenum pname)
300 {
301 switch (pname) {
302 case GL_EMISSION:
303 case GL_AMBIENT:
304 case GL_DIFFUSE:
305 case GL_SPECULAR:
306 case GL_AMBIENT_AND_DIFFUSE:
307 return 4;
308 case GL_COLOR_INDEXES:
309 return 3;
310 case GL_SHININESS:
311 return 1;
312 default:
313 return 0;
314 }
315 }
316
317 static inline unsigned
_mesa_point_param_enum_to_count(GLenum pname)318 _mesa_point_param_enum_to_count(GLenum pname)
319 {
320 switch (pname) {
321 case GL_DISTANCE_ATTENUATION_EXT:
322 return 3;
323 case GL_POINT_SIZE_MIN_EXT:
324 case GL_POINT_SIZE_MAX_EXT:
325 case GL_POINT_FADE_THRESHOLD_SIZE_EXT:
326 case GL_POINT_SPRITE_COORD_ORIGIN:
327 return 1;
328 default:
329 return 0;
330 }
331 }
332
333 static inline unsigned
_mesa_calllists_enum_to_count(GLenum type)334 _mesa_calllists_enum_to_count(GLenum type)
335 {
336 switch (type) {
337 case GL_BYTE:
338 case GL_UNSIGNED_BYTE:
339 return 1;
340 case GL_SHORT:
341 case GL_UNSIGNED_SHORT:
342 case GL_2_BYTES:
343 return 2;
344 case GL_3_BYTES:
345 return 3;
346 case GL_INT:
347 case GL_UNSIGNED_INT:
348 case GL_FLOAT:
349 case GL_4_BYTES:
350 return 4;
351 default:
352 return 0;
353 }
354 }
355
356 static inline unsigned
_mesa_patch_param_enum_to_count(GLenum pname)357 _mesa_patch_param_enum_to_count(GLenum pname)
358 {
359 switch (pname) {
360 case GL_PATCH_DEFAULT_OUTER_LEVEL:
361 return 4;
362 case GL_PATCH_DEFAULT_INNER_LEVEL:
363 return 2;
364 default:
365 return 0;
366 }
367 }
368
369 static inline unsigned
_mesa_memobj_enum_to_count(GLenum pname)370 _mesa_memobj_enum_to_count(GLenum pname)
371 {
372 switch (pname) {
373 case GL_DEDICATED_MEMORY_OBJECT_EXT:
374 return 1;
375 default:
376 return 0;
377 }
378 }
379
380 static inline unsigned
_mesa_semaphore_enum_to_count(GLenum pname)381 _mesa_semaphore_enum_to_count(GLenum pname)
382 {
383 switch (pname) {
384 /* EXT_semaphore and EXT_semaphore_fd define no parameters */
385 default:
386 return 0;
387 }
388 }
389
390 static inline gl_vert_attrib
_mesa_array_to_attrib(struct gl_context * ctx,GLenum array)391 _mesa_array_to_attrib(struct gl_context *ctx, GLenum array)
392 {
393 switch (array) {
394 case GL_VERTEX_ARRAY:
395 return VERT_ATTRIB_POS;
396 case GL_NORMAL_ARRAY:
397 return VERT_ATTRIB_NORMAL;
398 case GL_COLOR_ARRAY:
399 return VERT_ATTRIB_COLOR0;
400 case GL_INDEX_ARRAY:
401 return VERT_ATTRIB_COLOR_INDEX;
402 case GL_TEXTURE_COORD_ARRAY:
403 return VERT_ATTRIB_TEX(ctx->GLThread.ClientActiveTexture);
404 case GL_EDGE_FLAG_ARRAY:
405 return VERT_ATTRIB_EDGEFLAG;
406 case GL_FOG_COORDINATE_ARRAY:
407 return VERT_ATTRIB_FOG;
408 case GL_SECONDARY_COLOR_ARRAY:
409 return VERT_ATTRIB_COLOR1;
410 case GL_POINT_SIZE_ARRAY_OES:
411 return VERT_ATTRIB_POINT_SIZE;
412 case GL_PRIMITIVE_RESTART_NV:
413 return VERT_ATTRIB_PRIMITIVE_RESTART_NV;
414 default:
415 if (array >= GL_TEXTURE0 && array <= GL_TEXTURE7)
416 return VERT_ATTRIB_TEX(array - GL_TEXTURE0);
417 return VERT_ATTRIB_MAX;
418 }
419 }
420
421 static inline gl_matrix_index
_mesa_get_matrix_index(struct gl_context * ctx,GLenum mode)422 _mesa_get_matrix_index(struct gl_context *ctx, GLenum mode)
423 {
424 if (mode == GL_MODELVIEW || mode == GL_PROJECTION)
425 return M_MODELVIEW + (mode - GL_MODELVIEW);
426
427 if (mode == GL_TEXTURE)
428 return M_TEXTURE0 + ctx->GLThread.ActiveTexture;
429
430 if (mode >= GL_TEXTURE0 && mode <= GL_TEXTURE0 + MAX_TEXTURE_UNITS - 1)
431 return M_TEXTURE0 + (mode - GL_TEXTURE0);
432
433 if (mode >= GL_MATRIX0_ARB && mode <= GL_MATRIX0_ARB + MAX_PROGRAM_MATRICES - 1)
434 return M_PROGRAM0 + (mode - GL_MATRIX0_ARB);
435
436 return M_DUMMY;
437 }
438
439 static inline void
_mesa_glthread_Enable(struct gl_context * ctx,GLenum cap)440 _mesa_glthread_Enable(struct gl_context *ctx, GLenum cap)
441 {
442 if (ctx->GLThread.ListMode == GL_COMPILE)
443 return;
444
445 if (cap == GL_PRIMITIVE_RESTART ||
446 cap == GL_PRIMITIVE_RESTART_FIXED_INDEX)
447 _mesa_glthread_set_prim_restart(ctx, cap, true);
448 else if (cap == GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB)
449 _mesa_glthread_disable(ctx, "Enable(DEBUG_OUTPUT_SYNCHRONOUS)");
450 }
451
452 static inline void
_mesa_glthread_Disable(struct gl_context * ctx,GLenum cap)453 _mesa_glthread_Disable(struct gl_context *ctx, GLenum cap)
454 {
455 if (ctx->GLThread.ListMode == GL_COMPILE)
456 return;
457
458 if (cap == GL_PRIMITIVE_RESTART ||
459 cap == GL_PRIMITIVE_RESTART_FIXED_INDEX)
460 _mesa_glthread_set_prim_restart(ctx, cap, false);
461 }
462
463 static inline void
_mesa_glthread_PushAttrib(struct gl_context * ctx,GLbitfield mask)464 _mesa_glthread_PushAttrib(struct gl_context *ctx, GLbitfield mask)
465 {
466 if (ctx->GLThread.ListMode == GL_COMPILE)
467 return;
468
469 struct glthread_attrib_node *attr =
470 &ctx->GLThread.AttribStack[ctx->GLThread.AttribStackDepth++];
471
472 attr->Mask = mask;
473
474 if (mask & GL_TEXTURE_BIT)
475 attr->ActiveTexture = ctx->GLThread.ActiveTexture;
476
477 if (mask & GL_TRANSFORM_BIT)
478 attr->MatrixMode = ctx->GLThread.MatrixMode;
479 }
480
481 static inline void
_mesa_glthread_PopAttrib(struct gl_context * ctx)482 _mesa_glthread_PopAttrib(struct gl_context *ctx)
483 {
484 if (ctx->GLThread.ListMode == GL_COMPILE)
485 return;
486
487 struct glthread_attrib_node *attr =
488 &ctx->GLThread.AttribStack[--ctx->GLThread.AttribStackDepth];
489 unsigned mask = attr->Mask;
490
491 if (mask & GL_TEXTURE_BIT)
492 ctx->GLThread.ActiveTexture = attr->ActiveTexture;
493
494 if (mask & GL_TRANSFORM_BIT) {
495 ctx->GLThread.MatrixMode = attr->MatrixMode;
496 ctx->GLThread.MatrixIndex = _mesa_get_matrix_index(ctx, attr->MatrixMode);
497 }
498 }
499
500 static inline void
_mesa_glthread_MatrixPushEXT(struct gl_context * ctx,GLenum matrixMode)501 _mesa_glthread_MatrixPushEXT(struct gl_context *ctx, GLenum matrixMode)
502 {
503 if (ctx->GLThread.ListMode == GL_COMPILE)
504 return;
505
506 ctx->GLThread.MatrixStackDepth[_mesa_get_matrix_index(ctx, matrixMode)]++;
507 }
508
509 static inline void
_mesa_glthread_MatrixPopEXT(struct gl_context * ctx,GLenum matrixMode)510 _mesa_glthread_MatrixPopEXT(struct gl_context *ctx, GLenum matrixMode)
511 {
512 if (ctx->GLThread.ListMode == GL_COMPILE)
513 return;
514
515 ctx->GLThread.MatrixStackDepth[_mesa_get_matrix_index(ctx, matrixMode)]--;
516 }
517
518 static inline void
_mesa_glthread_ActiveTexture(struct gl_context * ctx,GLenum texture)519 _mesa_glthread_ActiveTexture(struct gl_context *ctx, GLenum texture)
520 {
521 if (ctx->GLThread.ListMode == GL_COMPILE)
522 return;
523
524 ctx->GLThread.ActiveTexture = texture - GL_TEXTURE0;
525 if (ctx->GLThread.MatrixMode == GL_TEXTURE)
526 ctx->GLThread.MatrixIndex = _mesa_get_matrix_index(ctx, texture);
527 }
528
529 static inline void
_mesa_glthread_PushMatrix(struct gl_context * ctx)530 _mesa_glthread_PushMatrix(struct gl_context *ctx)
531 {
532 if (ctx->GLThread.ListMode == GL_COMPILE)
533 return;
534
535 ctx->GLThread.MatrixStackDepth[ctx->GLThread.MatrixIndex]++;
536 }
537
538 static inline void
_mesa_glthread_PopMatrix(struct gl_context * ctx)539 _mesa_glthread_PopMatrix(struct gl_context *ctx)
540 {
541 if (ctx->GLThread.ListMode == GL_COMPILE)
542 return;
543
544 ctx->GLThread.MatrixStackDepth[ctx->GLThread.MatrixIndex]--;
545 }
546
547 static inline void
_mesa_glthread_MatrixMode(struct gl_context * ctx,GLenum mode)548 _mesa_glthread_MatrixMode(struct gl_context *ctx, GLenum mode)
549 {
550 if (ctx->GLThread.ListMode == GL_COMPILE)
551 return;
552
553 ctx->GLThread.MatrixIndex = _mesa_get_matrix_index(ctx, mode);
554 ctx->GLThread.MatrixMode = mode;
555 }
556
557 static inline void
_mesa_glthread_ListBase(struct gl_context * ctx,GLuint base)558 _mesa_glthread_ListBase(struct gl_context *ctx, GLuint base)
559 {
560 if (ctx->GLThread.ListMode == GL_COMPILE)
561 return;
562
563 ctx->GLThread.ListBase = base;
564 }
565
566 static inline void
_mesa_glthread_CallList(struct gl_context * ctx,GLuint list)567 _mesa_glthread_CallList(struct gl_context *ctx, GLuint list)
568 {
569 if (ctx->GLThread.ListMode == GL_COMPILE)
570 return;
571
572 /* Wait for all glEndList and glDeleteLists calls to finish to ensure that
573 * all display lists are up to date and the driver thread is not
574 * modifiying them. We will be executing them in the application thread.
575 */
576 int batch = p_atomic_read(&ctx->GLThread.LastDListChangeBatchIndex);
577 if (batch != -1) {
578 util_queue_fence_wait(&ctx->GLThread.batches[batch].fence);
579 p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, -1);
580 }
581
582 /* Clear GL_COMPILE_AND_EXECUTE if needed. We only execute here. */
583 unsigned saved_mode = ctx->GLThread.ListMode;
584 ctx->GLThread.ListMode = 0;
585
586 _mesa_glthread_execute_list(ctx, list);
587
588 ctx->GLThread.ListMode = saved_mode;
589 }
590
591 static inline void
_mesa_glthread_CallLists(struct gl_context * ctx,GLsizei n,GLenum type,const GLvoid * lists)592 _mesa_glthread_CallLists(struct gl_context *ctx, GLsizei n, GLenum type,
593 const GLvoid *lists)
594 {
595 if (ctx->GLThread.ListMode == GL_COMPILE)
596 return;
597
598 if (n <= 0 || !lists)
599 return;
600
601 /* Wait for all glEndList and glDeleteLists calls to finish to ensure that
602 * all display lists are up to date and the driver thread is not
603 * modifiying them. We will be executing them in the application thread.
604 */
605 int batch = p_atomic_read(&ctx->GLThread.LastDListChangeBatchIndex);
606 if (batch != -1) {
607 util_queue_fence_wait(&ctx->GLThread.batches[batch].fence);
608 p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, -1);
609 }
610
611 /* Clear GL_COMPILE_AND_EXECUTE if needed. We only execute here. */
612 unsigned saved_mode = ctx->GLThread.ListMode;
613 ctx->GLThread.ListMode = 0;
614
615 unsigned base = ctx->GLThread.ListBase;
616
617 GLbyte *bptr;
618 GLubyte *ubptr;
619 GLshort *sptr;
620 GLushort *usptr;
621 GLint *iptr;
622 GLuint *uiptr;
623 GLfloat *fptr;
624
625 switch (type) {
626 case GL_BYTE:
627 bptr = (GLbyte *) lists;
628 for (unsigned i = 0; i < n; i++)
629 _mesa_glthread_CallList(ctx, base + bptr[i]);
630 break;
631 case GL_UNSIGNED_BYTE:
632 ubptr = (GLubyte *) lists;
633 for (unsigned i = 0; i < n; i++)
634 _mesa_glthread_CallList(ctx, base + ubptr[i]);
635 break;
636 case GL_SHORT:
637 sptr = (GLshort *) lists;
638 for (unsigned i = 0; i < n; i++)
639 _mesa_glthread_CallList(ctx, base + sptr[i]);
640 break;
641 case GL_UNSIGNED_SHORT:
642 usptr = (GLushort *) lists;
643 for (unsigned i = 0; i < n; i++)
644 _mesa_glthread_CallList(ctx, base + usptr[i]);
645 break;
646 case GL_INT:
647 iptr = (GLint *) lists;
648 for (unsigned i = 0; i < n; i++)
649 _mesa_glthread_CallList(ctx, base + iptr[i]);
650 break;
651 case GL_UNSIGNED_INT:
652 uiptr = (GLuint *) lists;
653 for (unsigned i = 0; i < n; i++)
654 _mesa_glthread_CallList(ctx, base + uiptr[i]);
655 break;
656 case GL_FLOAT:
657 fptr = (GLfloat *) lists;
658 for (unsigned i = 0; i < n; i++)
659 _mesa_glthread_CallList(ctx, base + fptr[i]);
660 break;
661 case GL_2_BYTES:
662 ubptr = (GLubyte *) lists;
663 for (unsigned i = 0; i < n; i++) {
664 _mesa_glthread_CallList(ctx, base +
665 (GLint)ubptr[2 * i] * 256 +
666 (GLint)ubptr[2 * i + 1]);
667 }
668 break;
669 case GL_3_BYTES:
670 ubptr = (GLubyte *) lists;
671 for (unsigned i = 0; i < n; i++) {
672 _mesa_glthread_CallList(ctx, base +
673 (GLint)ubptr[3 * i] * 65536 +
674 (GLint)ubptr[3 * i + 1] * 256 +
675 (GLint)ubptr[3 * i + 2]);
676 }
677 break;
678 case GL_4_BYTES:
679 ubptr = (GLubyte *) lists;
680 for (unsigned i = 0; i < n; i++) {
681 _mesa_glthread_CallList(ctx, base +
682 (GLint)ubptr[4 * i] * 16777216 +
683 (GLint)ubptr[4 * i + 1] * 65536 +
684 (GLint)ubptr[4 * i + 2] * 256 +
685 (GLint)ubptr[4 * i + 3]);
686 }
687 break;
688 }
689
690 ctx->GLThread.ListMode = saved_mode;
691 }
692
693 static inline void
_mesa_glthread_NewList(struct gl_context * ctx,GLuint list,GLuint mode)694 _mesa_glthread_NewList(struct gl_context *ctx, GLuint list, GLuint mode)
695 {
696 if (!ctx->GLThread.ListMode)
697 ctx->GLThread.ListMode = mode;
698 }
699
700 static inline void
_mesa_glthread_EndList(struct gl_context * ctx)701 _mesa_glthread_EndList(struct gl_context *ctx)
702 {
703 if (!ctx->GLThread.ListMode)
704 return;
705
706 ctx->GLThread.ListMode = 0;
707
708 /* Track the last display list change. */
709 p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, ctx->GLThread.next);
710 _mesa_glthread_flush_batch(ctx);
711 }
712
713 static inline void
_mesa_glthread_DeleteLists(struct gl_context * ctx,GLsizei range)714 _mesa_glthread_DeleteLists(struct gl_context *ctx, GLsizei range)
715 {
716 if (range < 0)
717 return;
718
719 /* Track the last display list change. */
720 p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, ctx->GLThread.next);
721 _mesa_glthread_flush_batch(ctx);
722 }
723
724 struct marshal_cmd_CallList
725 {
726 struct marshal_cmd_base cmd_base;
727 GLuint list;
728 };
729
730 #endif /* MARSHAL_H */
731