1 /*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /* This implements vertex array state tracking for glthread. It's separate
25 * from the rest of Mesa. Only minimum functionality is implemented here
26 * to serve glthread.
27 */
28
29 #include "main/glthread.h"
30 #include "main/glformats.h"
31 #include "main/mtypes.h"
32 #include "main/hash.h"
33 #include "main/dispatch.h"
34 #include "main/varray.h"
35
36
37 void
_mesa_glthread_reset_vao(struct glthread_vao * vao)38 _mesa_glthread_reset_vao(struct glthread_vao *vao)
39 {
40 static unsigned default_elem_size[VERT_ATTRIB_MAX] = {
41 [VERT_ATTRIB_NORMAL] = 12,
42 [VERT_ATTRIB_COLOR1] = 12,
43 [VERT_ATTRIB_FOG] = 4,
44 [VERT_ATTRIB_COLOR_INDEX] = 4,
45 [VERT_ATTRIB_EDGEFLAG] = 1,
46 [VERT_ATTRIB_POINT_SIZE] = 4,
47 };
48
49 vao->CurrentElementBufferName = 0;
50 vao->UserEnabled = 0;
51 vao->Enabled = 0;
52 vao->BufferEnabled = 0;
53 vao->UserPointerMask = 0;
54 vao->NonZeroDivisorMask = 0;
55
56 for (unsigned i = 0; i < ARRAY_SIZE(vao->Attrib); i++) {
57 unsigned elem_size = default_elem_size[i];
58 if (!elem_size)
59 elem_size = 16;
60
61 vao->Attrib[i].ElementSize = elem_size;
62 vao->Attrib[i].RelativeOffset = 0;
63 vao->Attrib[i].BufferIndex = i;
64 vao->Attrib[i].Stride = elem_size;
65 vao->Attrib[i].Divisor = 0;
66 vao->Attrib[i].EnabledAttribCount = 0;
67 vao->Attrib[i].Pointer = NULL;
68 }
69 }
70
71 static struct glthread_vao *
lookup_vao(struct gl_context * ctx,GLuint id)72 lookup_vao(struct gl_context *ctx, GLuint id)
73 {
74 struct glthread_state *glthread = &ctx->GLThread;
75 struct glthread_vao *vao;
76
77 assert(id != 0);
78
79 if (glthread->LastLookedUpVAO &&
80 glthread->LastLookedUpVAO->Name == id) {
81 vao = glthread->LastLookedUpVAO;
82 } else {
83 vao = _mesa_HashLookupLocked(glthread->VAOs, id);
84 if (!vao)
85 return NULL;
86
87 glthread->LastLookedUpVAO = vao;
88 }
89
90 return vao;
91 }
92
93 void
_mesa_glthread_BindVertexArray(struct gl_context * ctx,GLuint id)94 _mesa_glthread_BindVertexArray(struct gl_context *ctx, GLuint id)
95 {
96 struct glthread_state *glthread = &ctx->GLThread;
97
98 if (id == 0) {
99 glthread->CurrentVAO = &glthread->DefaultVAO;
100 } else {
101 struct glthread_vao *vao = lookup_vao(ctx, id);
102
103 if (vao)
104 glthread->CurrentVAO = vao;
105 }
106 }
107
108 void
_mesa_glthread_DeleteVertexArrays(struct gl_context * ctx,GLsizei n,const GLuint * ids)109 _mesa_glthread_DeleteVertexArrays(struct gl_context *ctx,
110 GLsizei n, const GLuint *ids)
111 {
112 struct glthread_state *glthread = &ctx->GLThread;
113
114 if (!ids)
115 return;
116
117 for (int i = 0; i < n; i++) {
118 /* IDs equal to 0 should be silently ignored. */
119 if (!ids[i])
120 continue;
121
122 struct glthread_vao *vao = lookup_vao(ctx, ids[i]);
123 if (!vao)
124 continue;
125
126 /* If the array object is currently bound, the spec says "the binding
127 * for that object reverts to zero and the default vertex array
128 * becomes current."
129 */
130 if (glthread->CurrentVAO == vao)
131 glthread->CurrentVAO = &glthread->DefaultVAO;
132
133 if (glthread->LastLookedUpVAO == vao)
134 glthread->LastLookedUpVAO = NULL;
135
136 /* The ID is immediately freed for re-use */
137 _mesa_HashRemoveLocked(glthread->VAOs, vao->Name);
138 free(vao);
139 }
140 }
141
142 void
_mesa_glthread_GenVertexArrays(struct gl_context * ctx,GLsizei n,GLuint * arrays)143 _mesa_glthread_GenVertexArrays(struct gl_context *ctx,
144 GLsizei n, GLuint *arrays)
145 {
146 struct glthread_state *glthread = &ctx->GLThread;
147
148 if (!arrays)
149 return;
150
151 /* The IDs have been generated at this point. Create VAOs for glthread. */
152 for (int i = 0; i < n; i++) {
153 GLuint id = arrays[i];
154 struct glthread_vao *vao;
155
156 vao = calloc(1, sizeof(*vao));
157 if (!vao)
158 continue; /* Is that all we can do? */
159
160 vao->Name = id;
161 _mesa_glthread_reset_vao(vao);
162 _mesa_HashInsertLocked(glthread->VAOs, id, vao, true);
163 }
164 }
165
166 /* If vaobj is NULL, use the currently-bound VAO. */
167 static inline struct glthread_vao *
get_vao(struct gl_context * ctx,const GLuint * vaobj)168 get_vao(struct gl_context *ctx, const GLuint *vaobj)
169 {
170 if (vaobj)
171 return lookup_vao(ctx, *vaobj);
172
173 return ctx->GLThread.CurrentVAO;
174 }
175
176 static void
update_primitive_restart(struct gl_context * ctx)177 update_primitive_restart(struct gl_context *ctx)
178 {
179 struct glthread_state *glthread = &ctx->GLThread;
180
181 glthread->_PrimitiveRestart = glthread->PrimitiveRestart ||
182 glthread->PrimitiveRestartFixedIndex;
183 glthread->_RestartIndex[0] =
184 _mesa_get_prim_restart_index(glthread->PrimitiveRestartFixedIndex,
185 glthread->RestartIndex, 1);
186 glthread->_RestartIndex[1] =
187 _mesa_get_prim_restart_index(glthread->PrimitiveRestartFixedIndex,
188 glthread->RestartIndex, 2);
189 glthread->_RestartIndex[3] =
190 _mesa_get_prim_restart_index(glthread->PrimitiveRestartFixedIndex,
191 glthread->RestartIndex, 4);
192 }
193
194 void
_mesa_glthread_set_prim_restart(struct gl_context * ctx,GLenum cap,bool value)195 _mesa_glthread_set_prim_restart(struct gl_context *ctx, GLenum cap, bool value)
196 {
197 switch (cap) {
198 case GL_PRIMITIVE_RESTART:
199 ctx->GLThread.PrimitiveRestart = value;
200 break;
201 case GL_PRIMITIVE_RESTART_FIXED_INDEX:
202 ctx->GLThread.PrimitiveRestartFixedIndex = value;
203 break;
204 }
205
206 update_primitive_restart(ctx);
207 }
208
209 void
_mesa_glthread_PrimitiveRestartIndex(struct gl_context * ctx,GLuint index)210 _mesa_glthread_PrimitiveRestartIndex(struct gl_context *ctx, GLuint index)
211 {
212 ctx->GLThread.RestartIndex = index;
213 update_primitive_restart(ctx);
214 }
215
216 static inline void
enable_buffer(struct glthread_vao * vao,unsigned binding_index)217 enable_buffer(struct glthread_vao *vao, unsigned binding_index)
218 {
219 int attrib_count = ++vao->Attrib[binding_index].EnabledAttribCount;
220
221 if (attrib_count == 1)
222 vao->BufferEnabled |= 1 << binding_index;
223 else if (attrib_count == 2)
224 vao->BufferInterleaved |= 1 << binding_index;
225 }
226
227 static inline void
disable_buffer(struct glthread_vao * vao,unsigned binding_index)228 disable_buffer(struct glthread_vao *vao, unsigned binding_index)
229 {
230 int attrib_count = --vao->Attrib[binding_index].EnabledAttribCount;
231
232 if (attrib_count == 0)
233 vao->BufferEnabled &= ~(1 << binding_index);
234 else if (attrib_count == 1)
235 vao->BufferInterleaved &= ~(1 << binding_index);
236 else
237 assert(attrib_count >= 0);
238 }
239
240 void
_mesa_glthread_ClientState(struct gl_context * ctx,GLuint * vaobj,gl_vert_attrib attrib,bool enable)241 _mesa_glthread_ClientState(struct gl_context *ctx, GLuint *vaobj,
242 gl_vert_attrib attrib, bool enable)
243 {
244 /* The primitive restart client state uses a special value. */
245 if (attrib == VERT_ATTRIB_PRIMITIVE_RESTART_NV) {
246 ctx->GLThread.PrimitiveRestart = enable;
247 update_primitive_restart(ctx);
248 return;
249 }
250
251 if (attrib >= VERT_ATTRIB_MAX)
252 return;
253
254 struct glthread_vao *vao = get_vao(ctx, vaobj);
255 if (!vao)
256 return;
257
258 const unsigned attrib_bit = 1u << attrib;
259
260 if (enable && !(vao->UserEnabled & attrib_bit)) {
261 vao->UserEnabled |= attrib_bit;
262
263 /* The generic0 attribute supersedes the position attribute. We need to
264 * update BufferBindingEnabled accordingly.
265 */
266 if (attrib == VERT_ATTRIB_POS) {
267 if (!(vao->UserEnabled & VERT_BIT_GENERIC0))
268 enable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
269 } else {
270 enable_buffer(vao, vao->Attrib[attrib].BufferIndex);
271
272 if (attrib == VERT_ATTRIB_GENERIC0 && vao->UserEnabled & VERT_BIT_POS)
273 disable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
274 }
275 } else if (!enable && (vao->UserEnabled & attrib_bit)) {
276 vao->UserEnabled &= ~attrib_bit;
277
278 /* The generic0 attribute supersedes the position attribute. We need to
279 * update BufferBindingEnabled accordingly.
280 */
281 if (attrib == VERT_ATTRIB_POS) {
282 if (!(vao->UserEnabled & VERT_BIT_GENERIC0))
283 disable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
284 } else {
285 disable_buffer(vao, vao->Attrib[attrib].BufferIndex);
286
287 if (attrib == VERT_ATTRIB_GENERIC0 && vao->UserEnabled & VERT_BIT_POS)
288 enable_buffer(vao, vao->Attrib[VERT_ATTRIB_POS].BufferIndex);
289 }
290 }
291
292 /* The generic0 attribute supersedes the position attribute. */
293 vao->Enabled = vao->UserEnabled;
294 if (vao->Enabled & VERT_BIT_GENERIC0)
295 vao->Enabled &= ~VERT_BIT_POS;
296 }
297
298 static void
set_attrib_binding(struct glthread_state * glthread,struct glthread_vao * vao,gl_vert_attrib attrib,unsigned new_binding_index)299 set_attrib_binding(struct glthread_state *glthread, struct glthread_vao *vao,
300 gl_vert_attrib attrib, unsigned new_binding_index)
301 {
302 unsigned old_binding_index = vao->Attrib[attrib].BufferIndex;
303
304 if (old_binding_index != new_binding_index) {
305 vao->Attrib[attrib].BufferIndex = new_binding_index;
306
307 if (vao->Enabled & (1u << attrib)) {
308 /* Update BufferBindingEnabled. */
309 enable_buffer(vao, new_binding_index);
310 disable_buffer(vao, old_binding_index);
311 }
312 }
313 }
314
_mesa_glthread_AttribDivisor(struct gl_context * ctx,const GLuint * vaobj,gl_vert_attrib attrib,GLuint divisor)315 void _mesa_glthread_AttribDivisor(struct gl_context *ctx, const GLuint *vaobj,
316 gl_vert_attrib attrib, GLuint divisor)
317 {
318 if (attrib >= VERT_ATTRIB_MAX)
319 return;
320
321 struct glthread_vao *vao = get_vao(ctx, vaobj);
322 if (!vao)
323 return;
324
325 vao->Attrib[attrib].Divisor = divisor;
326
327 set_attrib_binding(&ctx->GLThread, vao, attrib, attrib);
328
329 if (divisor)
330 vao->NonZeroDivisorMask |= 1u << attrib;
331 else
332 vao->NonZeroDivisorMask &= ~(1u << attrib);
333 }
334
335 static void
attrib_pointer(struct glthread_state * glthread,struct glthread_vao * vao,GLuint buffer,gl_vert_attrib attrib,GLint size,GLenum type,GLsizei stride,const void * pointer)336 attrib_pointer(struct glthread_state *glthread, struct glthread_vao *vao,
337 GLuint buffer, gl_vert_attrib attrib,
338 GLint size, GLenum type, GLsizei stride,
339 const void *pointer)
340 {
341 if (attrib >= VERT_ATTRIB_MAX)
342 return;
343
344 unsigned elem_size = _mesa_bytes_per_vertex_attrib(size, type);
345
346 vao->Attrib[attrib].ElementSize = elem_size;
347 vao->Attrib[attrib].Stride = stride ? stride : elem_size;
348 vao->Attrib[attrib].Pointer = pointer;
349 vao->Attrib[attrib].RelativeOffset = 0;
350
351 set_attrib_binding(glthread, vao, attrib, attrib);
352
353 if (buffer != 0)
354 vao->UserPointerMask &= ~(1u << attrib);
355 else
356 vao->UserPointerMask |= 1u << attrib;
357 }
358
359 void
_mesa_glthread_AttribPointer(struct gl_context * ctx,gl_vert_attrib attrib,GLint size,GLenum type,GLsizei stride,const void * pointer)360 _mesa_glthread_AttribPointer(struct gl_context *ctx, gl_vert_attrib attrib,
361 GLint size, GLenum type, GLsizei stride,
362 const void *pointer)
363 {
364 struct glthread_state *glthread = &ctx->GLThread;
365
366 attrib_pointer(glthread, glthread->CurrentVAO,
367 glthread->CurrentArrayBufferName,
368 attrib, size, type, stride, pointer);
369 }
370
371 void
_mesa_glthread_DSAAttribPointer(struct gl_context * ctx,GLuint vaobj,GLuint buffer,gl_vert_attrib attrib,GLint size,GLenum type,GLsizei stride,GLintptr offset)372 _mesa_glthread_DSAAttribPointer(struct gl_context *ctx, GLuint vaobj,
373 GLuint buffer, gl_vert_attrib attrib,
374 GLint size, GLenum type, GLsizei stride,
375 GLintptr offset)
376 {
377 struct glthread_state *glthread = &ctx->GLThread;
378 struct glthread_vao *vao;
379
380 vao = lookup_vao(ctx, vaobj);
381 if (!vao)
382 return;
383
384 attrib_pointer(glthread, vao, buffer, attrib, size, type, stride,
385 (const void*)offset);
386 }
387
388 static void
attrib_format(struct glthread_state * glthread,struct glthread_vao * vao,GLuint attribindex,GLint size,GLenum type,GLuint relativeoffset)389 attrib_format(struct glthread_state *glthread, struct glthread_vao *vao,
390 GLuint attribindex, GLint size, GLenum type,
391 GLuint relativeoffset)
392 {
393 if (attribindex >= VERT_ATTRIB_GENERIC_MAX)
394 return;
395
396 unsigned elem_size = _mesa_bytes_per_vertex_attrib(size, type);
397
398 unsigned i = VERT_ATTRIB_GENERIC(attribindex);
399 vao->Attrib[i].ElementSize = elem_size;
400 vao->Attrib[i].RelativeOffset = relativeoffset;
401 }
402
403 void
_mesa_glthread_AttribFormat(struct gl_context * ctx,GLuint attribindex,GLint size,GLenum type,GLuint relativeoffset)404 _mesa_glthread_AttribFormat(struct gl_context *ctx, GLuint attribindex,
405 GLint size, GLenum type, GLuint relativeoffset)
406 {
407 struct glthread_state *glthread = &ctx->GLThread;
408
409 attrib_format(glthread, glthread->CurrentVAO, attribindex, size, type,
410 relativeoffset);
411 }
412
413 void
_mesa_glthread_DSAAttribFormat(struct gl_context * ctx,GLuint vaobj,GLuint attribindex,GLint size,GLenum type,GLuint relativeoffset)414 _mesa_glthread_DSAAttribFormat(struct gl_context *ctx, GLuint vaobj,
415 GLuint attribindex, GLint size, GLenum type,
416 GLuint relativeoffset)
417 {
418 struct glthread_state *glthread = &ctx->GLThread;
419 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
420
421 if (vao)
422 attrib_format(glthread, vao, attribindex, size, type, relativeoffset);
423 }
424
425 static void
bind_vertex_buffer(struct glthread_state * glthread,struct glthread_vao * vao,GLuint bindingindex,GLuint buffer,GLintptr offset,GLsizei stride)426 bind_vertex_buffer(struct glthread_state *glthread, struct glthread_vao *vao,
427 GLuint bindingindex, GLuint buffer, GLintptr offset,
428 GLsizei stride)
429 {
430 if (bindingindex >= VERT_ATTRIB_GENERIC_MAX)
431 return;
432
433 unsigned i = VERT_ATTRIB_GENERIC(bindingindex);
434 vao->Attrib[i].Pointer = (const void*)offset;
435 vao->Attrib[i].Stride = stride;
436
437 if (buffer != 0)
438 vao->UserPointerMask &= ~(1u << i);
439 else
440 vao->UserPointerMask |= 1u << i;
441 }
442
443 void
_mesa_glthread_VertexBuffer(struct gl_context * ctx,GLuint bindingindex,GLuint buffer,GLintptr offset,GLsizei stride)444 _mesa_glthread_VertexBuffer(struct gl_context *ctx, GLuint bindingindex,
445 GLuint buffer, GLintptr offset, GLsizei stride)
446 {
447 struct glthread_state *glthread = &ctx->GLThread;
448
449 bind_vertex_buffer(glthread, glthread->CurrentVAO, bindingindex, buffer,
450 offset, stride);
451 }
452
453 void
_mesa_glthread_DSAVertexBuffer(struct gl_context * ctx,GLuint vaobj,GLuint bindingindex,GLuint buffer,GLintptr offset,GLsizei stride)454 _mesa_glthread_DSAVertexBuffer(struct gl_context *ctx, GLuint vaobj,
455 GLuint bindingindex, GLuint buffer,
456 GLintptr offset, GLsizei stride)
457 {
458 struct glthread_state *glthread = &ctx->GLThread;
459 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
460
461 if (vao)
462 bind_vertex_buffer(glthread, vao, bindingindex, buffer, offset, stride);
463 }
464
465 void
_mesa_glthread_DSAVertexBuffers(struct gl_context * ctx,GLuint vaobj,GLuint first,GLsizei count,const GLuint * buffers,const GLintptr * offsets,const GLsizei * strides)466 _mesa_glthread_DSAVertexBuffers(struct gl_context *ctx, GLuint vaobj,
467 GLuint first, GLsizei count,
468 const GLuint *buffers,
469 const GLintptr *offsets,
470 const GLsizei *strides)
471 {
472 struct glthread_state *glthread = &ctx->GLThread;
473 struct glthread_vao *vao;
474
475 vao = lookup_vao(ctx, vaobj);
476 if (!vao)
477 return;
478
479 for (unsigned i = 0; i < count; i++) {
480 bind_vertex_buffer(glthread, vao, first + i, buffers[i], offsets[i],
481 strides[i]);
482 }
483 }
484
485 static void
binding_divisor(struct glthread_state * glthread,struct glthread_vao * vao,GLuint bindingindex,GLuint divisor)486 binding_divisor(struct glthread_state *glthread, struct glthread_vao *vao,
487 GLuint bindingindex, GLuint divisor)
488 {
489 if (bindingindex >= VERT_ATTRIB_GENERIC_MAX)
490 return;
491
492 unsigned i = VERT_ATTRIB_GENERIC(bindingindex);
493 vao->Attrib[i].Divisor = divisor;
494
495 if (divisor)
496 vao->NonZeroDivisorMask |= 1u << i;
497 else
498 vao->NonZeroDivisorMask &= ~(1u << i);
499 }
500
501 void
_mesa_glthread_BindingDivisor(struct gl_context * ctx,GLuint bindingindex,GLuint divisor)502 _mesa_glthread_BindingDivisor(struct gl_context *ctx, GLuint bindingindex,
503 GLuint divisor)
504 {
505 struct glthread_state *glthread = &ctx->GLThread;
506
507 binding_divisor(glthread, glthread->CurrentVAO, bindingindex, divisor);
508 }
509
510 void
_mesa_glthread_DSABindingDivisor(struct gl_context * ctx,GLuint vaobj,GLuint bindingindex,GLuint divisor)511 _mesa_glthread_DSABindingDivisor(struct gl_context *ctx, GLuint vaobj,
512 GLuint bindingindex, GLuint divisor)
513 {
514 struct glthread_state *glthread = &ctx->GLThread;
515 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
516
517 if (vao)
518 binding_divisor(glthread, vao, bindingindex, divisor);
519 }
520
521 void
_mesa_glthread_AttribBinding(struct gl_context * ctx,GLuint attribindex,GLuint bindingindex)522 _mesa_glthread_AttribBinding(struct gl_context *ctx, GLuint attribindex,
523 GLuint bindingindex)
524 {
525 struct glthread_state *glthread = &ctx->GLThread;
526
527 if (attribindex >= VERT_ATTRIB_GENERIC_MAX ||
528 bindingindex >= VERT_ATTRIB_GENERIC_MAX)
529 return;
530
531 set_attrib_binding(glthread, glthread->CurrentVAO,
532 VERT_ATTRIB_GENERIC(attribindex),
533 VERT_ATTRIB_GENERIC(bindingindex));
534 }
535
536 void
_mesa_glthread_DSAAttribBinding(struct gl_context * ctx,GLuint vaobj,GLuint attribindex,GLuint bindingindex)537 _mesa_glthread_DSAAttribBinding(struct gl_context *ctx, GLuint vaobj,
538 GLuint attribindex, GLuint bindingindex)
539 {
540 struct glthread_state *glthread = &ctx->GLThread;
541
542 if (attribindex >= VERT_ATTRIB_GENERIC_MAX ||
543 bindingindex >= VERT_ATTRIB_GENERIC_MAX)
544 return;
545
546 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
547 if (vao) {
548 set_attrib_binding(glthread, vao,
549 VERT_ATTRIB_GENERIC(attribindex),
550 VERT_ATTRIB_GENERIC(bindingindex));
551 }
552 }
553
554 void
_mesa_glthread_DSAElementBuffer(struct gl_context * ctx,GLuint vaobj,GLuint buffer)555 _mesa_glthread_DSAElementBuffer(struct gl_context *ctx, GLuint vaobj,
556 GLuint buffer)
557 {
558 struct glthread_vao *vao = lookup_vao(ctx, vaobj);
559
560 if (vao)
561 vao->CurrentElementBufferName = buffer;
562 }
563
564 void
_mesa_glthread_PushClientAttrib(struct gl_context * ctx,GLbitfield mask,bool set_default)565 _mesa_glthread_PushClientAttrib(struct gl_context *ctx, GLbitfield mask,
566 bool set_default)
567 {
568 struct glthread_state *glthread = &ctx->GLThread;
569
570 if (glthread->ClientAttribStackTop >= MAX_CLIENT_ATTRIB_STACK_DEPTH)
571 return;
572
573 struct glthread_client_attrib *top =
574 &glthread->ClientAttribStack[glthread->ClientAttribStackTop];
575
576 if (mask & GL_CLIENT_VERTEX_ARRAY_BIT) {
577 top->VAO = *glthread->CurrentVAO;
578 top->CurrentArrayBufferName = glthread->CurrentArrayBufferName;
579 top->ClientActiveTexture = glthread->ClientActiveTexture;
580 top->RestartIndex = glthread->RestartIndex;
581 top->PrimitiveRestart = glthread->PrimitiveRestart;
582 top->PrimitiveRestartFixedIndex = glthread->PrimitiveRestartFixedIndex;
583 top->Valid = true;
584 } else {
585 top->Valid = false;
586 }
587
588 glthread->ClientAttribStackTop++;
589
590 if (set_default)
591 _mesa_glthread_ClientAttribDefault(ctx, mask);
592 }
593
594 void
_mesa_glthread_PopClientAttrib(struct gl_context * ctx)595 _mesa_glthread_PopClientAttrib(struct gl_context *ctx)
596 {
597 struct glthread_state *glthread = &ctx->GLThread;
598
599 if (glthread->ClientAttribStackTop == 0)
600 return;
601
602 glthread->ClientAttribStackTop--;
603
604 struct glthread_client_attrib *top =
605 &glthread->ClientAttribStack[glthread->ClientAttribStackTop];
606
607 if (!top->Valid)
608 return;
609
610 /* Popping a delete VAO is an error. */
611 struct glthread_vao *vao = NULL;
612 if (top->VAO.Name) {
613 vao = lookup_vao(ctx, top->VAO.Name);
614 if (!vao)
615 return;
616 }
617
618 /* Restore states. */
619 glthread->CurrentArrayBufferName = top->CurrentArrayBufferName;
620 glthread->ClientActiveTexture = top->ClientActiveTexture;
621 glthread->RestartIndex = top->RestartIndex;
622 glthread->PrimitiveRestart = top->PrimitiveRestart;
623 glthread->PrimitiveRestartFixedIndex = top->PrimitiveRestartFixedIndex;
624
625 if (!vao)
626 vao = &glthread->DefaultVAO;
627
628 assert(top->VAO.Name == vao->Name);
629 *vao = top->VAO; /* Copy all fields. */
630 glthread->CurrentVAO = vao;
631 }
632
633 void
_mesa_glthread_ClientAttribDefault(struct gl_context * ctx,GLbitfield mask)634 _mesa_glthread_ClientAttribDefault(struct gl_context *ctx, GLbitfield mask)
635 {
636 struct glthread_state *glthread = &ctx->GLThread;
637
638 if (!(mask & GL_CLIENT_VERTEX_ARRAY_BIT))
639 return;
640
641 glthread->CurrentArrayBufferName = 0;
642 glthread->ClientActiveTexture = 0;
643 glthread->RestartIndex = 0;
644 glthread->PrimitiveRestart = false;
645 glthread->PrimitiveRestartFixedIndex = false;
646 glthread->CurrentVAO = &glthread->DefaultVAO;
647 _mesa_glthread_reset_vao(glthread->CurrentVAO);
648 }
649
650 void
_mesa_glthread_InterleavedArrays(struct gl_context * ctx,GLenum format,GLsizei stride,const GLvoid * pointer)651 _mesa_glthread_InterleavedArrays(struct gl_context *ctx, GLenum format,
652 GLsizei stride, const GLvoid *pointer)
653 {
654 struct gl_interleaved_layout layout;
655 unsigned tex = VERT_ATTRIB_TEX(ctx->GLThread.ClientActiveTexture);
656
657 if (stride < 0 || !_mesa_get_interleaved_layout(format, &layout))
658 return;
659
660 if (!stride)
661 stride = layout.defstride;
662
663 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_EDGEFLAG, false);
664 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_COLOR_INDEX, false);
665 /* XXX also disable secondary color and generic arrays? */
666
667 /* Texcoords */
668 if (layout.tflag) {
669 _mesa_glthread_ClientState(ctx, NULL, tex, true);
670 _mesa_glthread_AttribPointer(ctx, tex, layout.tcomps, GL_FLOAT, stride,
671 (GLubyte *) pointer + layout.toffset);
672 } else {
673 _mesa_glthread_ClientState(ctx, NULL, tex, false);
674 }
675
676 /* Color */
677 if (layout.cflag) {
678 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_COLOR0, true);
679 _mesa_glthread_AttribPointer(ctx, VERT_ATTRIB_COLOR0, layout.ccomps,
680 layout.ctype, stride,
681 (GLubyte *) pointer + layout.coffset);
682 } else {
683 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_COLOR0, false);
684 }
685
686 /* Normals */
687 if (layout.nflag) {
688 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_NORMAL, true);
689 _mesa_glthread_AttribPointer(ctx, VERT_ATTRIB_NORMAL, 3, GL_FLOAT,
690 stride, (GLubyte *) pointer + layout.noffset);
691 } else {
692 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_NORMAL, false);
693 }
694
695 /* Vertices */
696 _mesa_glthread_ClientState(ctx, NULL, VERT_ATTRIB_POS, true);
697 _mesa_glthread_AttribPointer(ctx, VERT_ATTRIB_POS, layout.vcomps, GL_FLOAT,
698 stride, (GLubyte *) pointer + layout.voffset);
699 }
700