1 /*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /* Draw function marshalling for glthread.
25 *
26 * The purpose of these glDraw wrappers is to upload non-VBO vertex and
27 * index data, so that glthread doesn't have to execute synchronously.
28 */
29
30 #include "c99_alloca.h"
31
32 #include "api_exec_decl.h"
33 #include "main/glthread_marshal.h"
34 #include "main/dispatch.h"
35 #include "main/varray.h"
36
37 static inline unsigned
get_index_size(GLenum type)38 get_index_size(GLenum type)
39 {
40 return 1 << _mesa_get_index_size_shift(type);
41 }
42
43 static inline GLindextype
encode_index_type(GLenum type)44 encode_index_type(GLenum type)
45 {
46 /* Map invalid values less than GL_UNSIGNED_BYTE to GL_UNSIGNED_BYTE - 1,
47 * and invalid values greater than GL_UNSIGNED_INT to GL_UNSIGNED_INT + 1,
48 * Then subtract GL_UNSIGNED_BYTE - 1. Final encoding:
49 * 0 = invalid value
50 * 1 = GL_UNSIGNED_BYTE
51 * 2 = invalid value
52 * 3 = GL_UNSIGNED_SHORT
53 * 4 = invalid value
54 * 5 = GL_UNSIGNED_INT
55 * 6 = invalid value
56 */
57 const unsigned min = GL_UNSIGNED_BYTE - 1;
58 const unsigned max = GL_UNSIGNED_INT + 1;
59 return (GLindextype){CLAMP(type, min, max) - min};
60 }
61
62 static ALWAYS_INLINE struct gl_buffer_object *
upload_indices(struct gl_context * ctx,unsigned count,unsigned index_size,const GLvoid ** indices)63 upload_indices(struct gl_context *ctx, unsigned count, unsigned index_size,
64 const GLvoid **indices)
65 {
66 struct gl_buffer_object *upload_buffer = NULL;
67 unsigned upload_offset = 0;
68
69 assert(count);
70
71 _mesa_glthread_upload(ctx, *indices, index_size * count,
72 &upload_offset, &upload_buffer, NULL, 0);
73 *indices = (const GLvoid*)(intptr_t)upload_offset;
74
75 if (!upload_buffer)
76 _mesa_marshal_InternalSetError(GL_OUT_OF_MEMORY);
77
78 return upload_buffer;
79 }
80
81 static ALWAYS_INLINE struct gl_buffer_object *
upload_multi_indices(struct gl_context * ctx,unsigned total_count,unsigned index_size,unsigned draw_count,const GLsizei * count,const GLvoid * const * indices,const GLvoid ** out_indices)82 upload_multi_indices(struct gl_context *ctx, unsigned total_count,
83 unsigned index_size, unsigned draw_count,
84 const GLsizei *count, const GLvoid *const *indices,
85 const GLvoid **out_indices)
86 {
87 struct gl_buffer_object *upload_buffer = NULL;
88 unsigned upload_offset = 0;
89 uint8_t *upload_ptr = NULL;
90
91 assert(total_count);
92
93 _mesa_glthread_upload(ctx, NULL, index_size * total_count,
94 &upload_offset, &upload_buffer, &upload_ptr, 0);
95 if (!upload_buffer) {
96 _mesa_marshal_InternalSetError(GL_OUT_OF_MEMORY);
97 return NULL;
98 }
99
100 for (unsigned i = 0, offset = 0; i < draw_count; i++) {
101 if (!count[i]) {
102 /* Set some valid value so as not to leave it uninitialized. */
103 out_indices[i] = (const GLvoid*)(intptr_t)upload_offset;
104 continue;
105 }
106
107 unsigned size = count[i] * index_size;
108
109 memcpy(upload_ptr + offset, indices[i], size);
110 out_indices[i] = (const GLvoid*)(intptr_t)(upload_offset + offset);
111 offset += size;
112 }
113
114 return upload_buffer;
115 }
116
117 static ALWAYS_INLINE bool
upload_vertices(struct gl_context * ctx,unsigned user_buffer_mask,unsigned start_vertex,unsigned num_vertices,unsigned start_instance,unsigned num_instances,struct gl_buffer_object ** buffers,int * offsets)118 upload_vertices(struct gl_context *ctx, unsigned user_buffer_mask,
119 unsigned start_vertex, unsigned num_vertices,
120 unsigned start_instance, unsigned num_instances,
121 struct gl_buffer_object **buffers, int *offsets)
122 {
123 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
124 unsigned attrib_mask_iter = vao->Enabled;
125 unsigned num_buffers = 0;
126
127 assert((num_vertices || !(user_buffer_mask & ~vao->NonZeroDivisorMask)) &&
128 (num_instances || !(user_buffer_mask & vao->NonZeroDivisorMask)));
129
130 if (unlikely(vao->BufferInterleaved & user_buffer_mask)) {
131 /* Slower upload path where some buffers reference multiple attribs,
132 * so we have to use 2 while loops instead of 1.
133 */
134 unsigned start_offset[VERT_ATTRIB_MAX];
135 unsigned end_offset[VERT_ATTRIB_MAX];
136 uint32_t buffer_mask = 0;
137
138 while (attrib_mask_iter) {
139 unsigned i = u_bit_scan(&attrib_mask_iter);
140 unsigned binding_index = vao->Attrib[i].BufferIndex;
141
142 if (!(user_buffer_mask & (1 << binding_index)))
143 continue;
144
145 unsigned stride = vao->Attrib[binding_index].Stride;
146 unsigned instance_div = vao->Attrib[binding_index].Divisor;
147 unsigned element_size = vao->Attrib[i].ElementSize;
148 unsigned offset = vao->Attrib[i].RelativeOffset;
149 unsigned size;
150
151 if (instance_div) {
152 /* Per-instance attrib. */
153
154 /* Figure out how many instances we'll render given instance_div. We
155 * can't use the typical div_round_up() pattern because the CTS uses
156 * instance_div = ~0 for a test, which overflows div_round_up()'s
157 * addition.
158 */
159 unsigned count = num_instances / instance_div;
160 if (count * instance_div != num_instances)
161 count++;
162
163 offset += stride * start_instance;
164 size = stride * (count - 1) + element_size;
165 } else {
166 /* Per-vertex attrib. */
167 offset += stride * start_vertex;
168 size = stride * (num_vertices - 1) + element_size;
169 }
170
171 unsigned binding_index_bit = 1u << binding_index;
172
173 /* Update upload offsets. */
174 if (!(buffer_mask & binding_index_bit)) {
175 start_offset[binding_index] = offset;
176 end_offset[binding_index] = offset + size;
177 } else {
178 if (offset < start_offset[binding_index])
179 start_offset[binding_index] = offset;
180 if (offset + size > end_offset[binding_index])
181 end_offset[binding_index] = offset + size;
182 }
183
184 buffer_mask |= binding_index_bit;
185 }
186
187 /* Upload buffers. */
188 while (buffer_mask) {
189 struct gl_buffer_object *upload_buffer = NULL;
190 unsigned upload_offset = 0;
191 unsigned start, end;
192
193 unsigned binding_index = u_bit_scan(&buffer_mask);
194
195 start = start_offset[binding_index];
196 end = end_offset[binding_index];
197 assert(start < end);
198
199 /* If the draw start index is non-zero, glthread can upload to offset 0,
200 * which means the attrib offset has to be -(first * stride).
201 * So use signed vertex buffer offsets when possible to save memory.
202 */
203 const void *ptr = vao->Attrib[binding_index].Pointer;
204 _mesa_glthread_upload(ctx, (uint8_t*)ptr + start,
205 end - start, &upload_offset,
206 &upload_buffer, NULL, ctx->Const.VertexBufferOffsetIsInt32 ? 0 : start);
207 if (!upload_buffer) {
208 for (unsigned i = 0; i < num_buffers; i++)
209 _mesa_reference_buffer_object(ctx, &buffers[i], NULL);
210
211 _mesa_marshal_InternalSetError(GL_OUT_OF_MEMORY);
212 return false;
213 }
214
215 buffers[num_buffers] = upload_buffer;
216 offsets[num_buffers] = upload_offset - start;
217 num_buffers++;
218 }
219
220 return true;
221 }
222
223 /* Faster path where all attribs are separate. */
224 while (attrib_mask_iter) {
225 unsigned i = u_bit_scan(&attrib_mask_iter);
226 unsigned binding_index = vao->Attrib[i].BufferIndex;
227
228 if (!(user_buffer_mask & (1 << binding_index)))
229 continue;
230
231 struct gl_buffer_object *upload_buffer = NULL;
232 unsigned upload_offset = 0;
233 unsigned stride = vao->Attrib[binding_index].Stride;
234 unsigned instance_div = vao->Attrib[binding_index].Divisor;
235 unsigned element_size = vao->Attrib[i].ElementSize;
236 unsigned offset = vao->Attrib[i].RelativeOffset;
237 unsigned size;
238
239 if (instance_div) {
240 /* Per-instance attrib. */
241
242 /* Figure out how many instances we'll render given instance_div. We
243 * can't use the typical div_round_up() pattern because the CTS uses
244 * instance_div = ~0 for a test, which overflows div_round_up()'s
245 * addition.
246 */
247 unsigned count = num_instances / instance_div;
248 if (count * instance_div != num_instances)
249 count++;
250
251 offset += stride * start_instance;
252 size = stride * (count - 1) + element_size;
253 } else {
254 /* Per-vertex attrib. */
255 offset += stride * start_vertex;
256 size = stride * (num_vertices - 1) + element_size;
257 }
258
259 /* If the draw start index is non-zero, glthread can upload to offset 0,
260 * which means the attrib offset has to be -(first * stride).
261 * So use signed vertex buffer offsets when possible to save memory.
262 */
263 const void *ptr = vao->Attrib[binding_index].Pointer;
264 _mesa_glthread_upload(ctx, (uint8_t*)ptr + offset,
265 size, &upload_offset, &upload_buffer, NULL,
266 ctx->Const.VertexBufferOffsetIsInt32 ? 0 : offset);
267 if (!upload_buffer) {
268 for (unsigned i = 0; i < num_buffers; i++)
269 _mesa_reference_buffer_object(ctx, &buffers[i], NULL);
270
271 _mesa_marshal_InternalSetError(GL_OUT_OF_MEMORY);
272 return false;
273 }
274
275 buffers[num_buffers] = upload_buffer;
276 offsets[num_buffers] = upload_offset - offset;
277 num_buffers++;
278 }
279
280 return true;
281 }
282
283 /* DrawArraysInstanced without user buffers. */
284 uint32_t
_mesa_unmarshal_DrawArraysInstanced(struct gl_context * ctx,const struct marshal_cmd_DrawArraysInstanced * restrict cmd)285 _mesa_unmarshal_DrawArraysInstanced(struct gl_context *ctx,
286 const struct marshal_cmd_DrawArraysInstanced *restrict cmd)
287 {
288 const GLenum mode = cmd->mode;
289 const GLint first = cmd->first;
290 const GLsizei count = cmd->count;
291 const GLsizei instance_count = cmd->primcount;
292
293 CALL_DrawArraysInstanced(ctx->Dispatch.Current, (mode, first, count, instance_count));
294 return align(sizeof(*cmd), 8) / 8;
295 }
296
297 struct marshal_cmd_DrawArraysInstancedBaseInstanceDrawID
298 {
299 struct marshal_cmd_base cmd_base;
300 GLenum8 mode;
301 GLint first;
302 GLsizei count;
303 GLsizei instance_count;
304 GLuint baseinstance;
305 GLuint drawid;
306 };
307
308 uint32_t
_mesa_unmarshal_DrawArraysInstancedBaseInstanceDrawID(struct gl_context * ctx,const struct marshal_cmd_DrawArraysInstancedBaseInstanceDrawID * cmd)309 _mesa_unmarshal_DrawArraysInstancedBaseInstanceDrawID(struct gl_context *ctx,
310 const struct marshal_cmd_DrawArraysInstancedBaseInstanceDrawID *cmd)
311 {
312 const GLenum mode = cmd->mode;
313 const GLint first = cmd->first;
314 const GLsizei count = cmd->count;
315 const GLsizei instance_count = cmd->instance_count;
316 const GLuint baseinstance = cmd->baseinstance;
317
318 ctx->DrawID = cmd->drawid;
319 CALL_DrawArraysInstancedBaseInstance(ctx->Dispatch.Current,
320 (mode, first, count, instance_count,
321 baseinstance));
322 ctx->DrawID = 0;
323 return align(sizeof(*cmd), 8) / 8;
324 }
325
326 /* DrawArraysInstancedBaseInstance with user buffers. */
327 struct marshal_cmd_DrawArraysUserBuf
328 {
329 struct marshal_cmd_base cmd_base;
330 GLenum8 mode;
331 uint16_t num_slots;
332 GLint first;
333 GLsizei count;
334 GLsizei instance_count;
335 GLuint baseinstance;
336 GLuint drawid;
337 GLuint user_buffer_mask;
338 };
339
340 uint32_t
_mesa_unmarshal_DrawArraysUserBuf(struct gl_context * ctx,const struct marshal_cmd_DrawArraysUserBuf * restrict cmd)341 _mesa_unmarshal_DrawArraysUserBuf(struct gl_context *ctx,
342 const struct marshal_cmd_DrawArraysUserBuf *restrict cmd)
343 {
344 const GLuint user_buffer_mask = cmd->user_buffer_mask;
345
346 /* Bind uploaded buffers if needed. */
347 if (user_buffer_mask) {
348 struct gl_buffer_object **buffers = (struct gl_buffer_object **)(cmd + 1);
349 const int *offsets = (const int *)(buffers + util_bitcount(user_buffer_mask));
350
351 _mesa_InternalBindVertexBuffers(ctx, buffers, offsets, user_buffer_mask);
352 }
353
354 const GLenum mode = cmd->mode;
355 const GLint first = cmd->first;
356 const GLsizei count = cmd->count;
357 const GLsizei instance_count = cmd->instance_count;
358 const GLuint baseinstance = cmd->baseinstance;
359
360 ctx->DrawID = cmd->drawid;
361 CALL_DrawArraysInstancedBaseInstance(ctx->Dispatch.Current,
362 (mode, first, count, instance_count,
363 baseinstance));
364 ctx->DrawID = 0;
365 return cmd->num_slots;
366 }
367
368 static inline unsigned
get_user_buffer_mask(struct gl_context * ctx)369 get_user_buffer_mask(struct gl_context *ctx)
370 {
371 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
372
373 /* BufferEnabled means which attribs are enabled in terms of buffer
374 * binding slots (not attrib slots).
375 *
376 * UserPointerMask means which buffer bindings don't have a buffer bound.
377 *
378 * NonNullPointerMask means which buffer bindings have a NULL pointer.
379 * Those are not uploaded. This can happen when an attrib is enabled, but
380 * the shader doesn't use it, so it's ignored by mesa/state_tracker.
381 */
382 return vao->BufferEnabled & vao->UserPointerMask & vao->NonNullPointerMask;
383 }
384
385 static ALWAYS_INLINE void
draw_arrays(GLuint drawid,GLenum mode,GLint first,GLsizei count,GLsizei instance_count,GLuint baseinstance,bool compiled_into_dlist,bool no_error)386 draw_arrays(GLuint drawid, GLenum mode, GLint first, GLsizei count,
387 GLsizei instance_count, GLuint baseinstance,
388 bool compiled_into_dlist, bool no_error)
389 {
390 GET_CURRENT_CONTEXT(ctx);
391
392 /* The main benefit of no_error is that we can discard no-op draws
393 * immediately.
394 */
395 if (no_error && (count <= 0 || instance_count <= 0))
396 return;
397
398 if (unlikely(compiled_into_dlist && ctx->GLThread.ListMode)) {
399 _mesa_glthread_finish_before(ctx, "DrawArrays");
400 /* Use the function that's compiled into a display list. */
401 CALL_DrawArrays(ctx->Dispatch.Current, (mode, first, count));
402 return;
403 }
404
405 unsigned user_buffer_mask =
406 _mesa_is_desktop_gl_core(ctx) ? 0 : get_user_buffer_mask(ctx);
407
408 /* Fast path when nothing needs to be done.
409 *
410 * This is also an error path. Zero counts should still call the driver
411 * for possible GL errors.
412 */
413 if (!user_buffer_mask ||
414 (!no_error &&
415 (count <= 0 || instance_count <= 0 || /* GL_INVALID_VALUE / no-op */
416 ctx->GLThread.inside_begin_end || /* GL_INVALID_OPERATION */
417 ctx->Dispatch.Current == ctx->Dispatch.ContextLost || /* GL_INVALID_OPERATION */
418 ctx->GLThread.ListMode))) { /* GL_INVALID_OPERATION */
419 if (baseinstance == 0 && drawid == 0) {
420 int cmd_size = sizeof(struct marshal_cmd_DrawArraysInstanced);
421 struct marshal_cmd_DrawArraysInstanced *cmd =
422 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArraysInstanced, cmd_size);
423
424 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
425 cmd->first = first;
426 cmd->count = count;
427 cmd->primcount = instance_count;
428 } else {
429 int cmd_size = sizeof(struct marshal_cmd_DrawArraysInstancedBaseInstanceDrawID);
430 struct marshal_cmd_DrawArraysInstancedBaseInstanceDrawID *cmd =
431 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArraysInstancedBaseInstanceDrawID, cmd_size);
432
433 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
434 cmd->first = first;
435 cmd->count = count;
436 cmd->instance_count = instance_count;
437 cmd->baseinstance = baseinstance;
438 cmd->drawid = drawid;
439 }
440 return;
441 }
442
443 /* Upload and draw. */
444 struct gl_buffer_object *buffers[VERT_ATTRIB_MAX];
445 int offsets[VERT_ATTRIB_MAX];
446
447 if (!upload_vertices(ctx, user_buffer_mask, first, count, baseinstance,
448 instance_count, buffers, offsets))
449 return; /* the error is set by upload_vertices */
450
451 unsigned num_buffers = util_bitcount(user_buffer_mask);
452 int buffers_size = num_buffers * sizeof(buffers[0]);
453 int offsets_size = num_buffers * sizeof(int);
454 int cmd_size = sizeof(struct marshal_cmd_DrawArraysUserBuf) +
455 buffers_size + offsets_size;
456 struct marshal_cmd_DrawArraysUserBuf *cmd;
457
458 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArraysUserBuf,
459 cmd_size);
460 cmd->num_slots = align(cmd_size, 8) / 8;
461 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
462 cmd->first = first;
463 cmd->count = count;
464 cmd->instance_count = instance_count;
465 cmd->baseinstance = baseinstance;
466 cmd->drawid = drawid;
467 cmd->user_buffer_mask = user_buffer_mask;
468
469 if (user_buffer_mask) {
470 char *variable_data = (char*)(cmd + 1);
471 memcpy(variable_data, buffers, buffers_size);
472 variable_data += buffers_size;
473 memcpy(variable_data, offsets, offsets_size);
474 }
475 }
476
477 /* MultiDrawArrays with user buffers. */
478 struct marshal_cmd_MultiDrawArraysUserBuf
479 {
480 struct marshal_cmd_base cmd_base;
481 GLenum8 mode;
482 uint16_t num_slots;
483 GLsizei draw_count;
484 GLuint user_buffer_mask;
485 };
486
487 uint32_t
_mesa_unmarshal_MultiDrawArraysUserBuf(struct gl_context * ctx,const struct marshal_cmd_MultiDrawArraysUserBuf * restrict cmd)488 _mesa_unmarshal_MultiDrawArraysUserBuf(struct gl_context *ctx,
489 const struct marshal_cmd_MultiDrawArraysUserBuf *restrict cmd)
490 {
491 const GLenum mode = cmd->mode;
492 const GLsizei draw_count = cmd->draw_count;
493 const GLsizei real_draw_count = MAX2(draw_count, 0);
494 const GLuint user_buffer_mask = cmd->user_buffer_mask;
495
496 const char *variable_data = (const char *)(cmd + 1);
497 const GLint *first = (GLint *)variable_data;
498 variable_data += sizeof(GLint) * real_draw_count;
499 const GLsizei *count = (GLsizei *)variable_data;
500
501 /* Bind uploaded buffers if needed. */
502 if (user_buffer_mask) {
503 variable_data += sizeof(GLsizei) * real_draw_count;
504 const int *offsets = (const int *)variable_data;
505 variable_data += sizeof(int) * util_bitcount(user_buffer_mask);
506
507 /* Align for pointers. */
508 if ((uintptr_t)variable_data % sizeof(uintptr_t))
509 variable_data += 4;
510
511 struct gl_buffer_object **buffers = (struct gl_buffer_object **)variable_data;
512
513 _mesa_InternalBindVertexBuffers(ctx, buffers, offsets, user_buffer_mask);
514 }
515
516 CALL_MultiDrawArrays(ctx->Dispatch.Current,
517 (mode, first, count, draw_count));
518 return cmd->num_slots;
519 }
520
521 void GLAPIENTRY
_mesa_marshal_MultiDrawArrays(GLenum mode,const GLint * first,const GLsizei * count,GLsizei draw_count)522 _mesa_marshal_MultiDrawArrays(GLenum mode, const GLint *first,
523 const GLsizei *count, GLsizei draw_count)
524 {
525 GET_CURRENT_CONTEXT(ctx);
526
527 if (unlikely(ctx->GLThread.ListMode)) {
528 _mesa_glthread_finish_before(ctx, "MultiDrawArrays");
529 CALL_MultiDrawArrays(ctx->Dispatch.Current,
530 (mode, first, count, draw_count));
531 return;
532 }
533
534 struct gl_buffer_object *buffers[VERT_ATTRIB_MAX];
535 int offsets[VERT_ATTRIB_MAX];
536 unsigned user_buffer_mask =
537 _mesa_is_desktop_gl_core(ctx) || draw_count <= 0 ||
538 ctx->Dispatch.Current == ctx->Dispatch.ContextLost ||
539 ctx->GLThread.inside_begin_end ? 0 : get_user_buffer_mask(ctx);
540
541 if (user_buffer_mask) {
542 unsigned min_index = ~0;
543 unsigned max_index_exclusive = 0;
544
545 for (int i = 0; i < draw_count; i++) {
546 GLsizei vertex_count = count[i];
547
548 if (vertex_count < 0) {
549 /* This will just call the driver to set the GL error. */
550 min_index = ~0;
551 break;
552 }
553 if (vertex_count == 0)
554 continue;
555
556 min_index = MIN2(min_index, first[i]);
557 max_index_exclusive = MAX2(max_index_exclusive, first[i] + vertex_count);
558 }
559
560 if (min_index >= max_index_exclusive) {
561 /* Nothing to do, but call the driver to set possible GL errors. */
562 user_buffer_mask = 0;
563 } else {
564 /* Upload. */
565 unsigned num_vertices = max_index_exclusive - min_index;
566
567 if (!upload_vertices(ctx, user_buffer_mask, min_index, num_vertices,
568 0, 1, buffers, offsets))
569 return; /* the error is set by upload_vertices */
570 }
571 }
572
573 /* Add the call into the batch buffer. */
574 int real_draw_count = MAX2(draw_count, 0);
575 int first_size = sizeof(GLint) * real_draw_count;
576 int count_size = sizeof(GLsizei) * real_draw_count;
577 unsigned num_buffers = util_bitcount(user_buffer_mask);
578 int buffers_size = num_buffers * sizeof(buffers[0]);
579 int offsets_size = num_buffers * sizeof(int);
580 int cmd_size = sizeof(struct marshal_cmd_MultiDrawArraysUserBuf) +
581 first_size + count_size + buffers_size + offsets_size;
582 struct marshal_cmd_MultiDrawArraysUserBuf *cmd;
583
584 /* Make sure cmd can fit in the batch buffer */
585 if (cmd_size <= MARSHAL_MAX_CMD_SIZE) {
586 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawArraysUserBuf,
587 cmd_size);
588 cmd->num_slots = align(cmd_size, 8) / 8;
589 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
590 cmd->draw_count = draw_count;
591 cmd->user_buffer_mask = user_buffer_mask;
592
593 char *variable_data = (char*)(cmd + 1);
594 memcpy(variable_data, first, first_size);
595 variable_data += first_size;
596 memcpy(variable_data, count, count_size);
597
598 if (user_buffer_mask) {
599 variable_data += count_size;
600 memcpy(variable_data, offsets, offsets_size);
601 variable_data += offsets_size;
602
603 /* Align for pointers. */
604 if ((uintptr_t)variable_data % sizeof(uintptr_t))
605 variable_data += 4;
606
607 memcpy(variable_data, buffers, buffers_size);
608 }
609 } else {
610 /* The call is too large, so sync and execute the unmarshal code here. */
611 _mesa_glthread_finish_before(ctx, "MultiDrawArrays");
612
613 if (user_buffer_mask) {
614 _mesa_InternalBindVertexBuffers(ctx, buffers, offsets,
615 user_buffer_mask);
616 }
617
618 CALL_MultiDrawArrays(ctx->Dispatch.Current,
619 (mode, first, count, draw_count));
620 }
621 }
622
623 uint32_t
_mesa_unmarshal_DrawElements(struct gl_context * ctx,const struct marshal_cmd_DrawElements * restrict cmd)624 _mesa_unmarshal_DrawElements(struct gl_context *ctx,
625 const struct marshal_cmd_DrawElements *restrict cmd)
626 {
627 const GLenum mode = cmd->mode;
628 const GLsizei count = cmd->count;
629 const GLenum type = _mesa_decode_index_type(cmd->type);
630 const GLvoid *indices = cmd->indices;
631
632 CALL_DrawElements(ctx->Dispatch.Current, (mode, count, type, indices));
633 return align(sizeof(*cmd), 8) / 8;
634 }
635
636 uint32_t
_mesa_unmarshal_DrawElementsPacked(struct gl_context * ctx,const struct marshal_cmd_DrawElementsPacked * restrict cmd)637 _mesa_unmarshal_DrawElementsPacked(struct gl_context *ctx,
638 const struct marshal_cmd_DrawElementsPacked *restrict cmd)
639 {
640 const GLenum mode = cmd->mode;
641 const GLsizei count = cmd->count;
642 const GLenum type = _mesa_decode_index_type(cmd->type);
643 const GLvoid *indices = (void*)(uintptr_t)cmd->indices;
644
645 CALL_DrawElements(ctx->Dispatch.Current, (mode, count, type, indices));
646 return align(sizeof(*cmd), 8) / 8;
647 }
648
649 uint32_t
_mesa_unmarshal_DrawElementsInstancedBaseVertex(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedBaseVertex * restrict cmd)650 _mesa_unmarshal_DrawElementsInstancedBaseVertex(struct gl_context *ctx,
651 const struct marshal_cmd_DrawElementsInstancedBaseVertex *restrict cmd)
652 {
653 const GLenum mode = cmd->mode;
654 const GLsizei count = cmd->count;
655 const GLenum type = _mesa_decode_index_type(cmd->type);
656 const GLvoid *indices = cmd->indices;
657 const GLsizei instance_count = cmd->primcount;
658 const GLint basevertex = cmd->basevertex;
659
660 CALL_DrawElementsInstancedBaseVertex(ctx->Dispatch.Current,
661 (mode, count, type, indices,
662 instance_count, basevertex));
663 return align(sizeof(*cmd), 8) / 8;
664 }
665
666 uint32_t
_mesa_unmarshal_DrawElementsInstancedBaseInstance(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedBaseInstance * restrict cmd)667 _mesa_unmarshal_DrawElementsInstancedBaseInstance(struct gl_context *ctx,
668 const struct marshal_cmd_DrawElementsInstancedBaseInstance *restrict cmd)
669 {
670 const GLenum mode = cmd->mode;
671 const GLsizei count = cmd->count;
672 const GLenum type = _mesa_decode_index_type(cmd->type);
673 const GLvoid *indices = cmd->indices;
674 const GLsizei instance_count = cmd->primcount;
675 const GLint baseinstance = cmd->baseinstance;
676
677 CALL_DrawElementsInstancedBaseInstance(ctx->Dispatch.Current,
678 (mode, count, type, indices,
679 instance_count, baseinstance));
680 return align(sizeof(*cmd), 8) / 8;
681 }
682
683 uint32_t
_mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstanceDrawID(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstanceDrawID * restrict cmd)684 _mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstanceDrawID(struct gl_context *ctx,
685 const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstanceDrawID *restrict cmd)
686 {
687 const GLenum mode = cmd->mode;
688 const GLsizei count = cmd->count;
689 const GLenum type = _mesa_decode_index_type(cmd->type);
690 const GLvoid *indices = cmd->indices;
691 const GLsizei instance_count = cmd->instance_count;
692 const GLint basevertex = cmd->basevertex;
693 const GLuint baseinstance = cmd->baseinstance;
694
695 ctx->DrawID = cmd->drawid;
696 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx->Dispatch.Current,
697 (mode, count, type, indices,
698 instance_count, basevertex,
699 baseinstance));
700 ctx->DrawID = 0;
701
702 return align(sizeof(*cmd), 8) / 8;
703 }
704
705 uint32_t
_mesa_unmarshal_DrawElementsUserBuf(struct gl_context * ctx,const struct marshal_cmd_DrawElementsUserBuf * restrict cmd)706 _mesa_unmarshal_DrawElementsUserBuf(struct gl_context *ctx,
707 const struct marshal_cmd_DrawElementsUserBuf *restrict cmd)
708 {
709 const GLuint user_buffer_mask = cmd->user_buffer_mask;
710
711 /* Bind uploaded buffers if needed. */
712 if (user_buffer_mask) {
713 struct gl_buffer_object **buffers = (struct gl_buffer_object **)(cmd + 1);
714 const int *offsets = (const int *)(buffers + util_bitcount(user_buffer_mask));
715
716 _mesa_InternalBindVertexBuffers(ctx, buffers, offsets, user_buffer_mask);
717 }
718
719 /* Draw. */
720 CALL_DrawElementsUserBuf(ctx->Dispatch.Current, (cmd));
721
722 struct gl_buffer_object *index_buffer = cmd->index_buffer;
723 _mesa_reference_buffer_object(ctx, &index_buffer, NULL);
724 return cmd->num_slots;
725 }
726
727 uint32_t
_mesa_unmarshal_DrawElementsUserBufPacked(struct gl_context * ctx,const struct marshal_cmd_DrawElementsUserBufPacked * restrict cmd)728 _mesa_unmarshal_DrawElementsUserBufPacked(struct gl_context *ctx,
729 const struct marshal_cmd_DrawElementsUserBufPacked *restrict cmd)
730 {
731 const GLuint user_buffer_mask = cmd->user_buffer_mask;
732
733 /* Bind uploaded buffers if needed. */
734 if (user_buffer_mask) {
735 struct gl_buffer_object **buffers = (struct gl_buffer_object **)(cmd + 1);
736 const int *offsets = (const int *)(buffers + util_bitcount(user_buffer_mask));
737
738 _mesa_InternalBindVertexBuffers(ctx, buffers, offsets, user_buffer_mask);
739 }
740
741 /* Draw. */
742 CALL_DrawElementsUserBufPacked(ctx->Dispatch.Current, (cmd));
743
744 struct gl_buffer_object *index_buffer = cmd->index_buffer;
745 _mesa_reference_buffer_object(ctx, &index_buffer, NULL);
746 return cmd->num_slots;
747 }
748
749 static inline bool
should_convert_to_begin_end(struct gl_context * ctx,unsigned count,unsigned num_upload_vertices,unsigned instance_count,struct glthread_vao * vao)750 should_convert_to_begin_end(struct gl_context *ctx, unsigned count,
751 unsigned num_upload_vertices,
752 unsigned instance_count, struct glthread_vao *vao)
753 {
754 /* Some of these are limitations of _mesa_glthread_UnrollDrawElements.
755 * Others prevent syncing, such as disallowing buffer objects because we
756 * can't map them without syncing.
757 */
758 return ctx->API == API_OPENGL_COMPAT &&
759 util_is_vbo_upload_ratio_too_large(count, num_upload_vertices) &&
760 instance_count == 1 && /* no instancing */
761 vao->CurrentElementBufferName == 0 && /* only user indices */
762 !ctx->GLThread._PrimitiveRestart && /* no primitive restart */
763 vao->UserPointerMask == vao->BufferEnabled && /* no VBOs */
764 !(vao->NonZeroDivisorMask & vao->BufferEnabled); /* no instanced attribs */
765 }
766
767 static ALWAYS_INLINE void
draw_elements(GLuint drawid,GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance,bool index_bounds_valid,GLuint min_index,GLuint max_index,bool compiled_into_dlist,bool no_error)768 draw_elements(GLuint drawid, GLenum mode, GLsizei count, GLenum type,
769 const GLvoid *indices, GLsizei instance_count, GLint basevertex,
770 GLuint baseinstance, bool index_bounds_valid, GLuint min_index,
771 GLuint max_index, bool compiled_into_dlist, bool no_error)
772 {
773 GET_CURRENT_CONTEXT(ctx);
774
775 /* The main benefit of no_error is that we can discard no-op draws
776 * immediately. These are plentiful in Viewperf2020/Catia1.
777 */
778 if (no_error && (count <= 0 || instance_count <= 0))
779 return;
780
781 if (unlikely(compiled_into_dlist && ctx->GLThread.ListMode)) {
782 _mesa_glthread_finish_before(ctx, "DrawElements");
783
784 /* Only use the ones that are compiled into display lists. */
785 if (basevertex) {
786 CALL_DrawElementsBaseVertex(ctx->Dispatch.Current,
787 (mode, count, type, indices, basevertex));
788 } else if (index_bounds_valid) {
789 CALL_DrawRangeElements(ctx->Dispatch.Current,
790 (mode, min_index, max_index, count, type, indices));
791 } else {
792 CALL_DrawElements(ctx->Dispatch.Current, (mode, count, type, indices));
793 }
794 return;
795 }
796
797 if (unlikely(!no_error && index_bounds_valid && max_index < min_index)) {
798 _mesa_marshal_InternalSetError(GL_INVALID_VALUE);
799 return;
800 }
801
802 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
803 unsigned user_buffer_mask =
804 _mesa_is_desktop_gl_core(ctx) ? 0 : get_user_buffer_mask(ctx);
805 bool has_user_indices = vao->CurrentElementBufferName == 0 && indices;
806
807 /* Fast path when nothing needs to be done.
808 *
809 * This is also an error path. Zero counts should still call the driver
810 * for possible GL errors.
811 */
812 if ((!user_buffer_mask && !has_user_indices) ||
813 (!no_error &&
814 /* zeros are discarded for no_error at the beginning */
815 (count <= 0 || instance_count <= 0 || /* GL_INVALID_VALUE / no-op */
816 !_mesa_is_index_type_valid(type) || /* GL_INVALID_VALUE */
817 ctx->Dispatch.Current == ctx->Dispatch.ContextLost || /* GL_INVALID_OPERATION */
818 ctx->GLThread.inside_begin_end || /* GL_INVALID_OPERATION */
819 ctx->GLThread.ListMode))) { /* GL_INVALID_OPERATION */
820 if (drawid == 0 && baseinstance == 0) {
821 if (instance_count == 1 && basevertex == 0) {
822 if ((count & 0xffff) == count && (uintptr_t)indices <= UINT16_MAX) {
823 /* Packed version of DrawElements: 16-bit count and 16-bit index offset,
824 * reducing the call size by 8 bytes.
825 * This is the most common case in Viewperf2020/Catia1.
826 */
827 int cmd_size = sizeof(struct marshal_cmd_DrawElementsPacked);
828 struct marshal_cmd_DrawElementsPacked *cmd =
829 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsPacked, cmd_size);
830
831 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
832 cmd->type = encode_index_type(type);
833 cmd->count = count;
834 cmd->indices = (uintptr_t)indices;
835 } else {
836 int cmd_size = sizeof(struct marshal_cmd_DrawElements);
837 struct marshal_cmd_DrawElements *cmd =
838 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElements, cmd_size);
839
840 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
841 cmd->type = encode_index_type(type);
842 cmd->count = count;
843 cmd->indices = indices;
844 }
845 } else {
846 int cmd_size = sizeof(struct marshal_cmd_DrawElementsInstancedBaseVertex);
847 struct marshal_cmd_DrawElementsInstancedBaseVertex *cmd =
848 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsInstancedBaseVertex, cmd_size);
849
850 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
851 cmd->type = encode_index_type(type);
852 cmd->count = count;
853 cmd->primcount = instance_count;
854 cmd->basevertex = basevertex;
855 cmd->indices = indices;
856 }
857 } else if (drawid == 0 && basevertex == 0) {
858 int cmd_size = sizeof(struct marshal_cmd_DrawElementsInstancedBaseInstance);
859 struct marshal_cmd_DrawElementsInstancedBaseInstance *cmd =
860 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsInstancedBaseInstance, cmd_size);
861
862 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
863 cmd->type = encode_index_type(type);
864 cmd->count = count;
865 cmd->primcount = instance_count;
866 cmd->baseinstance = baseinstance;
867 cmd->indices = indices;
868 } else {
869 int cmd_size = sizeof(struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstanceDrawID);
870 struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstanceDrawID *cmd =
871 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsInstancedBaseVertexBaseInstanceDrawID, cmd_size);
872
873 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
874 cmd->type = encode_index_type(type);
875 cmd->count = count;
876 cmd->instance_count = instance_count;
877 cmd->basevertex = basevertex;
878 cmd->baseinstance = baseinstance;
879 cmd->drawid = drawid;
880 cmd->indices = indices;
881 }
882 return;
883 }
884
885 bool need_index_bounds = user_buffer_mask & ~vao->NonZeroDivisorMask;
886 unsigned index_size = get_index_size(type);
887
888 if (need_index_bounds && !index_bounds_valid) {
889 /* Compute the index bounds. */
890 if (has_user_indices) {
891 min_index = ~0;
892 max_index = 0;
893 vbo_get_minmax_index_mapped(count, index_size,
894 ctx->GLThread._RestartIndex[index_size - 1],
895 ctx->GLThread._PrimitiveRestart, indices,
896 &min_index, &max_index);
897 } else {
898 /* Indices in a buffer. */
899 _mesa_glthread_finish_before(ctx, "DrawElements - need index bounds");
900 vbo_get_minmax_index(ctx, ctx->Array.VAO->IndexBufferObj,
901 NULL, (intptr_t)indices, count, index_size,
902 ctx->GLThread._PrimitiveRestart,
903 ctx->GLThread._RestartIndex[index_size - 1],
904 &min_index, &max_index);
905 }
906 index_bounds_valid = true;
907 }
908
909 unsigned start_vertex = min_index + basevertex;
910 unsigned num_vertices = max_index + 1 - min_index;
911
912 /* If the vertex range to upload is much greater than the vertex count (e.g.
913 * only 3 vertices with indices 0, 1, 999999), uploading the whole range
914 * would take too much time. If all buffers are user buffers, have glthread
915 * fetch all indices and vertices and convert the draw into glBegin/glEnd.
916 * For such pathological cases, it's the fastest way.
917 *
918 * The game Cogs benefits from this - its FPS increases from 0 to 197.
919 */
920 if (should_convert_to_begin_end(ctx, count, num_vertices, instance_count,
921 vao)) {
922 _mesa_glthread_UnrollDrawElements(ctx, mode, count, type, indices,
923 basevertex);
924 return;
925 }
926
927 struct gl_buffer_object *buffers[VERT_ATTRIB_MAX];
928 int offsets[VERT_ATTRIB_MAX];
929
930 if (user_buffer_mask) {
931 if (!upload_vertices(ctx, user_buffer_mask, start_vertex, num_vertices,
932 baseinstance, instance_count, buffers, offsets))
933 return; /* the error is set by upload_vertices */
934 }
935
936 /* Upload indices. */
937 struct gl_buffer_object *index_buffer = NULL;
938 if (has_user_indices) {
939 index_buffer = upload_indices(ctx, count, index_size, &indices);
940 if (!index_buffer)
941 return; /* the error is set by upload_indices */
942 }
943
944 /* Draw asynchronously. */
945 unsigned num_buffers = util_bitcount(user_buffer_mask);
946 int buffers_size = num_buffers * sizeof(buffers[0]);
947 int offsets_size = num_buffers * sizeof(int);
948 char *variable_data;
949
950 if (instance_count == 1 && basevertex == 0 && baseinstance == 0 &&
951 drawid == 0 && (count & 0xffff) == count &&
952 (uintptr_t)indices <= UINT32_MAX) {
953 int cmd_size = sizeof(struct marshal_cmd_DrawElementsUserBufPacked) +
954 buffers_size + offsets_size;
955 struct marshal_cmd_DrawElementsUserBufPacked *cmd;
956
957 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsUserBufPacked, cmd_size);
958 cmd->num_slots = align(cmd_size, 8) / 8;
959 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
960 cmd->type = encode_index_type(type);
961 cmd->count = count; /* truncated */
962 cmd->indices = (uintptr_t)indices; /* truncated */
963 cmd->user_buffer_mask = user_buffer_mask;
964 cmd->index_buffer = index_buffer;
965 variable_data = (char*)(cmd + 1);
966 } else {
967 int cmd_size = sizeof(struct marshal_cmd_DrawElementsUserBuf) +
968 buffers_size + offsets_size;
969 struct marshal_cmd_DrawElementsUserBuf *cmd;
970
971 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsUserBuf, cmd_size);
972 cmd->num_slots = align(cmd_size, 8) / 8;
973 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
974 cmd->type = encode_index_type(type);
975 cmd->count = count;
976 cmd->indices = indices;
977 cmd->instance_count = instance_count;
978 cmd->basevertex = basevertex;
979 cmd->baseinstance = baseinstance;
980 cmd->user_buffer_mask = user_buffer_mask;
981 cmd->index_buffer = index_buffer;
982 cmd->drawid = drawid;
983 variable_data = (char*)(cmd + 1);
984 }
985
986 if (user_buffer_mask) {
987 memcpy(variable_data, buffers, buffers_size);
988 variable_data += buffers_size;
989 memcpy(variable_data, offsets, offsets_size);
990 }
991 }
992
993 struct marshal_cmd_MultiDrawElementsUserBuf
994 {
995 struct marshal_cmd_base cmd_base;
996 bool has_base_vertex;
997 GLenum8 mode;
998 GLindextype type;
999 uint16_t num_slots;
1000 GLsizei draw_count;
1001 GLuint user_buffer_mask;
1002 struct gl_buffer_object *index_buffer;
1003 };
1004
1005 uint32_t
_mesa_unmarshal_MultiDrawElementsUserBuf(struct gl_context * ctx,const struct marshal_cmd_MultiDrawElementsUserBuf * restrict cmd)1006 _mesa_unmarshal_MultiDrawElementsUserBuf(struct gl_context *ctx,
1007 const struct marshal_cmd_MultiDrawElementsUserBuf *restrict cmd)
1008 {
1009 const GLsizei draw_count = cmd->draw_count;
1010 const GLsizei real_draw_count = MAX2(draw_count, 0);
1011 const GLuint user_buffer_mask = cmd->user_buffer_mask;
1012 const bool has_base_vertex = cmd->has_base_vertex;
1013
1014 const char *variable_data = (const char *)(cmd + 1);
1015 const GLsizei *count = (GLsizei *)variable_data;
1016 variable_data += sizeof(GLsizei) * real_draw_count;
1017 const GLsizei *basevertex = NULL;
1018 if (has_base_vertex) {
1019 basevertex = (GLsizei *)variable_data;
1020 variable_data += sizeof(GLsizei) * real_draw_count;
1021 }
1022 const int *offsets = NULL;
1023 if (user_buffer_mask) {
1024 offsets = (const int *)variable_data;
1025 variable_data += sizeof(int) * util_bitcount(user_buffer_mask);
1026 }
1027
1028 /* Align for pointers. */
1029 if ((uintptr_t)variable_data % sizeof(uintptr_t))
1030 variable_data += 4;
1031
1032 const GLvoid *const *indices = (const GLvoid *const *)variable_data;
1033 variable_data += sizeof(const GLvoid *const *) * real_draw_count;
1034
1035 /* Bind uploaded buffers if needed. */
1036 if (user_buffer_mask) {
1037 struct gl_buffer_object **buffers = (struct gl_buffer_object **)variable_data;
1038
1039 _mesa_InternalBindVertexBuffers(ctx, buffers, offsets, user_buffer_mask);
1040 }
1041
1042 /* Draw. */
1043 const GLenum mode = cmd->mode;
1044 const GLenum type = _mesa_decode_index_type(cmd->type);
1045 struct gl_buffer_object *index_buffer = cmd->index_buffer;
1046
1047 CALL_MultiDrawElementsUserBuf(ctx->Dispatch.Current,
1048 ((GLintptr)index_buffer, mode, count, type,
1049 indices, draw_count, basevertex));
1050 _mesa_reference_buffer_object(ctx, &index_buffer, NULL);
1051 return cmd->num_slots;
1052 }
1053
1054 static void
multi_draw_elements_async(struct gl_context * ctx,GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei draw_count,const GLsizei * basevertex,struct gl_buffer_object * index_buffer,unsigned user_buffer_mask,struct gl_buffer_object ** buffers,const int * offsets)1055 multi_draw_elements_async(struct gl_context *ctx, GLenum mode,
1056 const GLsizei *count, GLenum type,
1057 const GLvoid *const *indices, GLsizei draw_count,
1058 const GLsizei *basevertex,
1059 struct gl_buffer_object *index_buffer,
1060 unsigned user_buffer_mask,
1061 struct gl_buffer_object **buffers,
1062 const int *offsets)
1063 {
1064 int real_draw_count = MAX2(draw_count, 0);
1065 int count_size = sizeof(GLsizei) * real_draw_count;
1066 int indices_size = sizeof(indices[0]) * real_draw_count;
1067 int basevertex_size = basevertex ? sizeof(GLsizei) * real_draw_count : 0;
1068 unsigned num_buffers = util_bitcount(user_buffer_mask);
1069 int buffers_size = num_buffers * sizeof(buffers[0]);
1070 int offsets_size = num_buffers * sizeof(int);
1071 int cmd_size = sizeof(struct marshal_cmd_MultiDrawElementsUserBuf) +
1072 count_size + indices_size + basevertex_size + buffers_size +
1073 offsets_size;
1074 struct marshal_cmd_MultiDrawElementsUserBuf *cmd;
1075
1076 /* Make sure cmd can fit the queue buffer */
1077 if (cmd_size <= MARSHAL_MAX_CMD_SIZE) {
1078 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawElementsUserBuf, cmd_size);
1079 cmd->num_slots = align(cmd_size, 8) / 8;
1080 cmd->mode = MIN2(mode, 0xff); /* primitive types go from 0 to 14 */
1081 cmd->type = encode_index_type(type);
1082 cmd->draw_count = draw_count;
1083 cmd->user_buffer_mask = user_buffer_mask;
1084 cmd->index_buffer = index_buffer;
1085 cmd->has_base_vertex = basevertex != NULL;
1086
1087 char *variable_data = (char*)(cmd + 1);
1088 memcpy(variable_data, count, count_size);
1089 variable_data += count_size;
1090 if (basevertex) {
1091 memcpy(variable_data, basevertex, basevertex_size);
1092 variable_data += basevertex_size;
1093 }
1094 if (user_buffer_mask) {
1095 memcpy(variable_data, offsets, offsets_size);
1096 variable_data += offsets_size;
1097 }
1098
1099 /* Align for pointers. */
1100 if ((uintptr_t)variable_data % sizeof(uintptr_t))
1101 variable_data += 4;
1102
1103 memcpy(variable_data, indices, indices_size);
1104 variable_data += indices_size;
1105
1106 if (user_buffer_mask)
1107 memcpy(variable_data, buffers, buffers_size);
1108 } else {
1109 /* The call is too large, so sync and execute the unmarshal code here. */
1110 _mesa_glthread_finish_before(ctx, "DrawElements");
1111
1112 /* Bind uploaded buffers if needed. */
1113 if (user_buffer_mask) {
1114 _mesa_InternalBindVertexBuffers(ctx, buffers, offsets,
1115 user_buffer_mask);
1116 }
1117
1118 /* Draw. */
1119 CALL_MultiDrawElementsUserBuf(ctx->Dispatch.Current,
1120 ((GLintptr)index_buffer, mode, count,
1121 type, indices, draw_count, basevertex));
1122 _mesa_reference_buffer_object(ctx, &index_buffer, NULL);
1123 }
1124 }
1125
1126 void GLAPIENTRY
_mesa_marshal_MultiDrawElementsBaseVertex(GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei draw_count,const GLsizei * basevertex)1127 _mesa_marshal_MultiDrawElementsBaseVertex(GLenum mode, const GLsizei *count,
1128 GLenum type,
1129 const GLvoid *const *indices,
1130 GLsizei draw_count,
1131 const GLsizei *basevertex)
1132 {
1133 GET_CURRENT_CONTEXT(ctx);
1134
1135 if (unlikely(ctx->GLThread.ListMode)) {
1136 _mesa_glthread_finish_before(ctx, "MultiDrawElements");
1137
1138 if (basevertex) {
1139 CALL_MultiDrawElementsBaseVertex(ctx->Dispatch.Current,
1140 (mode, count, type, indices, draw_count,
1141 basevertex));
1142 } else {
1143 CALL_MultiDrawElements(ctx->Dispatch.Current,
1144 (mode, count, type, indices, draw_count));
1145 }
1146 return;
1147 }
1148
1149 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1150 unsigned user_buffer_mask = 0;
1151 bool has_user_indices = false;
1152
1153 /* Non-VBO vertex arrays are used only if this is true.
1154 * When nothing needs to be uploaded or the draw is no-op or generates
1155 * a GL error, we don't upload anything.
1156 */
1157 if (draw_count > 0 && _mesa_is_index_type_valid(type) &&
1158 ctx->Dispatch.Current != ctx->Dispatch.ContextLost &&
1159 !ctx->GLThread.inside_begin_end) {
1160 user_buffer_mask = _mesa_is_desktop_gl_core(ctx) ? 0 : get_user_buffer_mask(ctx);
1161 has_user_indices = vao->CurrentElementBufferName == 0;
1162 }
1163
1164 /* Fast path when we don't need to upload anything. */
1165 if (!user_buffer_mask && !has_user_indices) {
1166 multi_draw_elements_async(ctx, mode, count, type, indices,
1167 draw_count, basevertex, NULL, 0, NULL, NULL);
1168 return;
1169 }
1170
1171 bool need_index_bounds = user_buffer_mask & ~vao->NonZeroDivisorMask;
1172 unsigned index_size = get_index_size(type);
1173 unsigned min_index = ~0;
1174 unsigned max_index = 0;
1175 unsigned total_count = 0;
1176 unsigned num_vertices = 0;
1177
1178 /* This is always true if there is per-vertex data that needs to be
1179 * uploaded.
1180 */
1181 if (need_index_bounds) {
1182 bool synced = false;
1183
1184 /* Compute the index bounds. */
1185 for (unsigned i = 0; i < draw_count; i++) {
1186 GLsizei vertex_count = count[i];
1187
1188 if (vertex_count < 0) {
1189 /* Just call the driver to set the error. */
1190 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1191 basevertex, NULL, 0, NULL, NULL);
1192 return;
1193 }
1194 if (vertex_count == 0)
1195 continue;
1196
1197 unsigned min = ~0, max = 0;
1198 if (has_user_indices) {
1199 vbo_get_minmax_index_mapped(vertex_count, index_size,
1200 ctx->GLThread._RestartIndex[index_size - 1],
1201 ctx->GLThread._PrimitiveRestart, indices[i],
1202 &min, &max);
1203 } else {
1204 if (!synced) {
1205 _mesa_glthread_finish_before(ctx, "MultiDrawElements - need index bounds");
1206 synced = true;
1207 }
1208 vbo_get_minmax_index(ctx, ctx->Array.VAO->IndexBufferObj,
1209 NULL, (intptr_t)indices[i], vertex_count,
1210 index_size, ctx->GLThread._PrimitiveRestart,
1211 ctx->GLThread._RestartIndex[index_size - 1],
1212 &min, &max);
1213 }
1214
1215 if (basevertex) {
1216 min += basevertex[i];
1217 max += basevertex[i];
1218 }
1219 min_index = MIN2(min_index, min);
1220 max_index = MAX2(max_index, max);
1221 total_count += vertex_count;
1222 }
1223
1224 num_vertices = max_index + 1 - min_index;
1225
1226 if (total_count == 0 || num_vertices == 0) {
1227 /* Nothing to do, but call the driver to set possible GL errors. */
1228 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1229 basevertex, NULL, 0, NULL, NULL);
1230 return;
1231 }
1232 } else if (has_user_indices) {
1233 /* Only compute total_count for the upload of indices. */
1234 for (unsigned i = 0; i < draw_count; i++) {
1235 GLsizei vertex_count = count[i];
1236
1237 if (vertex_count < 0) {
1238 /* Just call the driver to set the error. */
1239 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1240 basevertex, NULL, 0, NULL, NULL);
1241 return;
1242 }
1243 if (vertex_count == 0)
1244 continue;
1245
1246 total_count += vertex_count;
1247 }
1248
1249 if (total_count == 0) {
1250 /* Nothing to do, but call the driver to set possible GL errors. */
1251 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1252 basevertex, NULL, 0, NULL, NULL);
1253 return;
1254 }
1255 }
1256
1257 /* Upload vertices. */
1258 struct gl_buffer_object *buffers[VERT_ATTRIB_MAX];
1259 int offsets[VERT_ATTRIB_MAX];
1260
1261 if (user_buffer_mask) {
1262 if (!upload_vertices(ctx, user_buffer_mask, min_index, num_vertices,
1263 0, 1, buffers, offsets))
1264 return; /* the error is set by upload_vertices */
1265 }
1266
1267 /* Upload indices. */
1268 struct gl_buffer_object *index_buffer = NULL;
1269 if (has_user_indices) {
1270 const GLvoid **out_indices = alloca(sizeof(indices[0]) * draw_count);
1271
1272 index_buffer = upload_multi_indices(ctx, total_count, index_size,
1273 draw_count, count, indices,
1274 out_indices);
1275 if (!index_buffer)
1276 return; /* the error is set by upload_multi_indices */
1277
1278 indices = out_indices;
1279 }
1280
1281 /* Draw asynchronously. */
1282 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1283 basevertex, index_buffer, user_buffer_mask,
1284 buffers, offsets);
1285 }
1286
1287 void GLAPIENTRY
_mesa_marshal_MultiModeDrawArraysIBM(const GLenum * mode,const GLint * first,const GLsizei * count,GLsizei primcount,GLint modestride)1288 _mesa_marshal_MultiModeDrawArraysIBM(const GLenum *mode, const GLint *first,
1289 const GLsizei *count, GLsizei primcount,
1290 GLint modestride)
1291 {
1292 for (int i = 0 ; i < primcount; i++) {
1293 if (count[i] > 0) {
1294 GLenum m = *((GLenum *)((GLubyte *)mode + i * modestride));
1295 _mesa_marshal_DrawArrays(m, first[i], count[i]);
1296 }
1297 }
1298 }
1299
1300 void GLAPIENTRY
_mesa_marshal_MultiModeDrawElementsIBM(const GLenum * mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei primcount,GLint modestride)1301 _mesa_marshal_MultiModeDrawElementsIBM(const GLenum *mode,
1302 const GLsizei *count, GLenum type,
1303 const GLvoid * const *indices,
1304 GLsizei primcount, GLint modestride)
1305 {
1306 for (int i = 0 ; i < primcount; i++) {
1307 if (count[i] > 0) {
1308 GLenum m = *((GLenum *)((GLubyte *)mode + i * modestride));
1309 _mesa_marshal_DrawElements(m, count[i], type, indices[i]);
1310 }
1311 }
1312 }
1313
1314 static const void *
map_draw_indirect_params(struct gl_context * ctx,GLintptr offset,unsigned count,unsigned stride)1315 map_draw_indirect_params(struct gl_context *ctx, GLintptr offset,
1316 unsigned count, unsigned stride)
1317 {
1318 struct gl_buffer_object *obj = ctx->DrawIndirectBuffer;
1319
1320 if (!obj)
1321 return (void*)offset;
1322
1323 return _mesa_bufferobj_map_range(ctx, offset,
1324 MIN2((size_t)count * stride, obj->Size),
1325 GL_MAP_READ_BIT, obj, MAP_INTERNAL);
1326 }
1327
1328 static void
unmap_draw_indirect_params(struct gl_context * ctx)1329 unmap_draw_indirect_params(struct gl_context *ctx)
1330 {
1331 if (ctx->DrawIndirectBuffer)
1332 _mesa_bufferobj_unmap(ctx, ctx->DrawIndirectBuffer, MAP_INTERNAL);
1333 }
1334
1335 static unsigned
read_draw_indirect_count(struct gl_context * ctx,GLintptr offset)1336 read_draw_indirect_count(struct gl_context *ctx, GLintptr offset)
1337 {
1338 unsigned result = 0;
1339
1340 if (ctx->ParameterBuffer) {
1341 _mesa_bufferobj_get_subdata(ctx, offset, sizeof(result), &result,
1342 ctx->ParameterBuffer);
1343 }
1344 return result;
1345 }
1346
1347 static void
lower_draw_arrays_indirect(struct gl_context * ctx,GLenum mode,GLintptr indirect,GLsizei stride,unsigned draw_count)1348 lower_draw_arrays_indirect(struct gl_context *ctx, GLenum mode,
1349 GLintptr indirect, GLsizei stride,
1350 unsigned draw_count)
1351 {
1352 /* If <stride> is zero, the elements are tightly packed. */
1353 if (stride == 0)
1354 stride = 4 * sizeof(GLuint); /* sizeof(DrawArraysIndirectCommand) */
1355
1356 const uint32_t *params =
1357 map_draw_indirect_params(ctx, indirect, draw_count, stride);
1358
1359 for (unsigned i = 0; i < draw_count; i++) {
1360 draw_arrays(i, mode,
1361 params[i * stride / 4 + 2],
1362 params[i * stride / 4 + 0],
1363 params[i * stride / 4 + 1],
1364 params[i * stride / 4 + 3], false, false);
1365 }
1366
1367 unmap_draw_indirect_params(ctx);
1368 }
1369
1370 static void
lower_draw_elements_indirect(struct gl_context * ctx,GLenum mode,GLenum type,GLintptr indirect,GLsizei stride,unsigned draw_count)1371 lower_draw_elements_indirect(struct gl_context *ctx, GLenum mode, GLenum type,
1372 GLintptr indirect, GLsizei stride,
1373 unsigned draw_count)
1374 {
1375 /* If <stride> is zero, the elements are tightly packed. */
1376 if (stride == 0)
1377 stride = 5 * sizeof(GLuint); /* sizeof(DrawArraysIndirectCommand) */
1378
1379 const uint32_t *params =
1380 map_draw_indirect_params(ctx, indirect, draw_count, stride);
1381
1382 for (unsigned i = 0; i < draw_count; i++) {
1383 draw_elements(i, mode,
1384 params[i * stride / 4 + 0],
1385 type,
1386 (GLvoid*)((uintptr_t)params[i * stride / 4 + 2] *
1387 get_index_size(type)),
1388 params[i * stride / 4 + 1],
1389 params[i * stride / 4 + 3],
1390 params[i * stride / 4 + 4],
1391 false, 0, 0, false, false);
1392 }
1393 unmap_draw_indirect_params(ctx);
1394 }
1395
1396 static inline bool
draw_indirect_async_allowed(struct gl_context * ctx,unsigned user_buffer_mask)1397 draw_indirect_async_allowed(struct gl_context *ctx, unsigned user_buffer_mask)
1398 {
1399 return ctx->API != API_OPENGL_COMPAT ||
1400 /* This will just generate GL_INVALID_OPERATION, as it should. */
1401 ctx->GLThread.inside_begin_end ||
1402 ctx->GLThread.ListMode ||
1403 ctx->Dispatch.Current == ctx->Dispatch.ContextLost ||
1404 /* If the DrawIndirect buffer is bound, it behaves like profile != compat
1405 * if there are no user VBOs. */
1406 (ctx->GLThread.CurrentDrawIndirectBufferName && !user_buffer_mask);
1407 }
1408
1409 uint32_t
_mesa_unmarshal_DrawArraysIndirect(struct gl_context * ctx,const struct marshal_cmd_DrawArraysIndirect * cmd)1410 _mesa_unmarshal_DrawArraysIndirect(struct gl_context *ctx,
1411 const struct marshal_cmd_DrawArraysIndirect *cmd)
1412 {
1413 GLenum mode = cmd->mode;
1414 const GLvoid * indirect = cmd->indirect;
1415
1416 CALL_DrawArraysIndirect(ctx->Dispatch.Current, (mode, indirect));
1417
1418 return align(sizeof(struct marshal_cmd_DrawArraysIndirect), 8) / 8;
1419 }
1420
1421 void GLAPIENTRY
_mesa_marshal_DrawArraysIndirect(GLenum mode,const GLvoid * indirect)1422 _mesa_marshal_DrawArraysIndirect(GLenum mode, const GLvoid *indirect)
1423 {
1424 GET_CURRENT_CONTEXT(ctx);
1425 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1426 unsigned user_buffer_mask =
1427 _mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
1428
1429 if (draw_indirect_async_allowed(ctx, user_buffer_mask)) {
1430 int cmd_size = sizeof(struct marshal_cmd_DrawArraysIndirect);
1431 struct marshal_cmd_DrawArraysIndirect *cmd;
1432
1433 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArraysIndirect, cmd_size);
1434 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
1435 cmd->indirect = indirect;
1436 return;
1437 }
1438
1439 _mesa_glthread_finish_before(ctx, "DrawArraysIndirect");
1440 lower_draw_arrays_indirect(ctx, mode, (GLintptr)indirect, 0, 1);
1441 }
1442
1443 uint32_t
_mesa_unmarshal_DrawElementsIndirect(struct gl_context * ctx,const struct marshal_cmd_DrawElementsIndirect * cmd)1444 _mesa_unmarshal_DrawElementsIndirect(struct gl_context *ctx,
1445 const struct marshal_cmd_DrawElementsIndirect *cmd)
1446 {
1447 GLenum mode = cmd->mode;
1448 const GLenum type = _mesa_decode_index_type(cmd->type);
1449 const GLvoid * indirect = cmd->indirect;
1450
1451 CALL_DrawElementsIndirect(ctx->Dispatch.Current, (mode, type, indirect));
1452 return align(sizeof(struct marshal_cmd_DrawElementsIndirect), 8) / 8;
1453 }
1454
1455 void GLAPIENTRY
_mesa_marshal_DrawElementsIndirect(GLenum mode,GLenum type,const GLvoid * indirect)1456 _mesa_marshal_DrawElementsIndirect(GLenum mode, GLenum type, const GLvoid *indirect)
1457 {
1458 GET_CURRENT_CONTEXT(ctx);
1459 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1460 unsigned user_buffer_mask =
1461 _mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
1462
1463 if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
1464 !_mesa_is_index_type_valid(type)) {
1465 int cmd_size = sizeof(struct marshal_cmd_DrawElementsIndirect);
1466 struct marshal_cmd_DrawElementsIndirect *cmd;
1467
1468 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsIndirect, cmd_size);
1469 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
1470 cmd->type = encode_index_type(type);
1471 cmd->indirect = indirect;
1472 return;
1473 }
1474
1475 _mesa_glthread_finish_before(ctx, "DrawElementsIndirect");
1476 lower_draw_elements_indirect(ctx, mode, type, (GLintptr)indirect, 0, 1);
1477 }
1478
1479 uint32_t
_mesa_unmarshal_MultiDrawArraysIndirect(struct gl_context * ctx,const struct marshal_cmd_MultiDrawArraysIndirect * cmd)1480 _mesa_unmarshal_MultiDrawArraysIndirect(struct gl_context *ctx,
1481 const struct marshal_cmd_MultiDrawArraysIndirect *cmd)
1482 {
1483 GLenum mode = cmd->mode;
1484 const GLvoid * indirect = cmd->indirect;
1485 GLsizei primcount = cmd->primcount;
1486 GLsizei stride = cmd->stride;
1487
1488 CALL_MultiDrawArraysIndirect(ctx->Dispatch.Current,
1489 (mode, indirect, primcount, stride));
1490 return align(sizeof(struct marshal_cmd_MultiDrawArraysIndirect), 8) / 8;
1491 }
1492
1493 void GLAPIENTRY
_mesa_marshal_MultiDrawArraysIndirect(GLenum mode,const GLvoid * indirect,GLsizei primcount,GLsizei stride)1494 _mesa_marshal_MultiDrawArraysIndirect(GLenum mode, const GLvoid *indirect,
1495 GLsizei primcount, GLsizei stride)
1496 {
1497 GET_CURRENT_CONTEXT(ctx);
1498 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1499 unsigned user_buffer_mask =
1500 _mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
1501
1502 if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
1503 primcount <= 0) {
1504 int cmd_size = sizeof(struct marshal_cmd_MultiDrawArraysIndirect);
1505 struct marshal_cmd_MultiDrawArraysIndirect *cmd;
1506
1507 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawArraysIndirect,
1508 cmd_size);
1509 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
1510 cmd->indirect = indirect;
1511 cmd->primcount = primcount;
1512 cmd->stride = stride;
1513 return;
1514 }
1515
1516 /* Lower the draw to direct due to non-VBO vertex arrays. */
1517 _mesa_glthread_finish_before(ctx, "MultiDrawArraysIndirect");
1518 lower_draw_arrays_indirect(ctx, mode, (GLintptr)indirect, stride, primcount);
1519 }
1520
1521 uint32_t
_mesa_unmarshal_MultiDrawElementsIndirect(struct gl_context * ctx,const struct marshal_cmd_MultiDrawElementsIndirect * cmd)1522 _mesa_unmarshal_MultiDrawElementsIndirect(struct gl_context *ctx,
1523 const struct marshal_cmd_MultiDrawElementsIndirect *cmd)
1524 {
1525 GLenum mode = cmd->mode;
1526 const GLenum type = _mesa_decode_index_type(cmd->type);
1527 const GLvoid * indirect = cmd->indirect;
1528 GLsizei primcount = cmd->primcount;
1529 GLsizei stride = cmd->stride;
1530
1531 CALL_MultiDrawElementsIndirect(ctx->Dispatch.Current,
1532 (mode, type, indirect, primcount, stride));
1533 return align(sizeof(struct marshal_cmd_MultiDrawElementsIndirect), 8) / 8;
1534 }
1535
1536 void GLAPIENTRY
_mesa_marshal_MultiDrawElementsIndirect(GLenum mode,GLenum type,const GLvoid * indirect,GLsizei primcount,GLsizei stride)1537 _mesa_marshal_MultiDrawElementsIndirect(GLenum mode, GLenum type,
1538 const GLvoid *indirect,
1539 GLsizei primcount, GLsizei stride)
1540 {
1541 GET_CURRENT_CONTEXT(ctx);
1542 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1543 unsigned user_buffer_mask =
1544 _mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
1545
1546 if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
1547 primcount <= 0 ||
1548 !_mesa_is_index_type_valid(type)) {
1549 int cmd_size = sizeof(struct marshal_cmd_MultiDrawElementsIndirect);
1550 struct marshal_cmd_MultiDrawElementsIndirect *cmd;
1551
1552 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawElementsIndirect,
1553 cmd_size);
1554 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
1555 cmd->type = encode_index_type(type);
1556 cmd->indirect = indirect;
1557 cmd->primcount = primcount;
1558 cmd->stride = stride;
1559 return;
1560 }
1561
1562 /* Lower the draw to direct due to non-VBO vertex arrays. */
1563 _mesa_glthread_finish_before(ctx, "MultiDrawElementsIndirect");
1564 lower_draw_elements_indirect(ctx, mode, type, (GLintptr)indirect, stride,
1565 primcount);
1566 }
1567
1568 uint32_t
_mesa_unmarshal_MultiDrawArraysIndirectCountARB(struct gl_context * ctx,const struct marshal_cmd_MultiDrawArraysIndirectCountARB * cmd)1569 _mesa_unmarshal_MultiDrawArraysIndirectCountARB(struct gl_context *ctx,
1570 const struct marshal_cmd_MultiDrawArraysIndirectCountARB *cmd)
1571 {
1572 GLenum mode = cmd->mode;
1573 GLintptr indirect = cmd->indirect;
1574 GLintptr drawcount = cmd->drawcount;
1575 GLsizei maxdrawcount = cmd->maxdrawcount;
1576 GLsizei stride = cmd->stride;
1577
1578 CALL_MultiDrawArraysIndirectCountARB(ctx->Dispatch.Current,
1579 (mode, indirect, drawcount,
1580 maxdrawcount, stride));
1581 return align(sizeof(struct marshal_cmd_MultiDrawArraysIndirectCountARB), 8) / 8;
1582 }
1583
1584 void GLAPIENTRY
_mesa_marshal_MultiDrawArraysIndirectCountARB(GLenum mode,GLintptr indirect,GLintptr drawcount,GLsizei maxdrawcount,GLsizei stride)1585 _mesa_marshal_MultiDrawArraysIndirectCountARB(GLenum mode, GLintptr indirect,
1586 GLintptr drawcount,
1587 GLsizei maxdrawcount,
1588 GLsizei stride)
1589 {
1590 GET_CURRENT_CONTEXT(ctx);
1591 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1592 unsigned user_buffer_mask =
1593 _mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
1594
1595 if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
1596 /* This will just generate GL_INVALID_OPERATION because Draw*IndirectCount
1597 * functions forbid a user indirect buffer in the Compat profile. */
1598 !ctx->GLThread.CurrentDrawIndirectBufferName) {
1599 int cmd_size =
1600 sizeof(struct marshal_cmd_MultiDrawArraysIndirectCountARB);
1601 struct marshal_cmd_MultiDrawArraysIndirectCountARB *cmd =
1602 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawArraysIndirectCountARB,
1603 cmd_size);
1604
1605 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
1606 cmd->indirect = indirect;
1607 cmd->drawcount = drawcount;
1608 cmd->maxdrawcount = maxdrawcount;
1609 cmd->stride = stride;
1610 return;
1611 }
1612
1613 /* Lower the draw to direct due to non-VBO vertex arrays. */
1614 _mesa_glthread_finish_before(ctx, "MultiDrawArraysIndirectCountARB");
1615 lower_draw_arrays_indirect(ctx, mode, indirect, stride,
1616 read_draw_indirect_count(ctx, drawcount));
1617 }
1618
1619 uint32_t
_mesa_unmarshal_MultiDrawElementsIndirectCountARB(struct gl_context * ctx,const struct marshal_cmd_MultiDrawElementsIndirectCountARB * cmd)1620 _mesa_unmarshal_MultiDrawElementsIndirectCountARB(struct gl_context *ctx,
1621 const struct marshal_cmd_MultiDrawElementsIndirectCountARB *cmd)
1622 {
1623 GLenum mode = cmd->mode;
1624 const GLenum type = _mesa_decode_index_type(cmd->type);
1625 GLintptr indirect = cmd->indirect;
1626 GLintptr drawcount = cmd->drawcount;
1627 GLsizei maxdrawcount = cmd->maxdrawcount;
1628 GLsizei stride = cmd->stride;
1629
1630 CALL_MultiDrawElementsIndirectCountARB(ctx->Dispatch.Current, (mode, type, indirect, drawcount, maxdrawcount, stride));
1631
1632 return align(sizeof(struct marshal_cmd_MultiDrawElementsIndirectCountARB), 8) / 8;
1633 }
1634
1635 void GLAPIENTRY
_mesa_marshal_MultiDrawElementsIndirectCountARB(GLenum mode,GLenum type,GLintptr indirect,GLintptr drawcount,GLsizei maxdrawcount,GLsizei stride)1636 _mesa_marshal_MultiDrawElementsIndirectCountARB(GLenum mode, GLenum type,
1637 GLintptr indirect,
1638 GLintptr drawcount,
1639 GLsizei maxdrawcount,
1640 GLsizei stride)
1641 {
1642 GET_CURRENT_CONTEXT(ctx);
1643 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1644 unsigned user_buffer_mask =
1645 _mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
1646
1647 if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
1648 /* This will just generate GL_INVALID_OPERATION because Draw*IndirectCount
1649 * functions forbid a user indirect buffer in the Compat profile. */
1650 !ctx->GLThread.CurrentDrawIndirectBufferName ||
1651 !_mesa_is_index_type_valid(type)) {
1652 int cmd_size = sizeof(struct marshal_cmd_MultiDrawElementsIndirectCountARB);
1653 struct marshal_cmd_MultiDrawElementsIndirectCountARB *cmd =
1654 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawElementsIndirectCountARB, cmd_size);
1655
1656 cmd->mode = MIN2(mode, 0xff); /* clamped to 0xff (invalid enum) */
1657 cmd->type = encode_index_type(type);
1658 cmd->indirect = indirect;
1659 cmd->drawcount = drawcount;
1660 cmd->maxdrawcount = maxdrawcount;
1661 cmd->stride = stride;
1662 return;
1663 }
1664
1665 /* Lower the draw to direct due to non-VBO vertex arrays. */
1666 _mesa_glthread_finish_before(ctx, "MultiDrawElementsIndirectCountARB");
1667 lower_draw_elements_indirect(ctx, mode, type, indirect, stride,
1668 read_draw_indirect_count(ctx, drawcount));
1669 }
1670
1671 void GLAPIENTRY
_mesa_marshal_DrawArrays(GLenum mode,GLint first,GLsizei count)1672 _mesa_marshal_DrawArrays(GLenum mode, GLint first, GLsizei count)
1673 {
1674 draw_arrays(0, mode, first, count, 1, 0, true, false);
1675 }
1676
1677 void GLAPIENTRY
_mesa_marshal_DrawArrays_no_error(GLenum mode,GLint first,GLsizei count)1678 _mesa_marshal_DrawArrays_no_error(GLenum mode, GLint first, GLsizei count)
1679 {
1680 draw_arrays(0, mode, first, count, 1, 0, true, true);
1681 }
1682
1683 void GLAPIENTRY
_mesa_marshal_DrawArraysInstanced(GLenum mode,GLint first,GLsizei count,GLsizei instance_count)1684 _mesa_marshal_DrawArraysInstanced(GLenum mode, GLint first, GLsizei count,
1685 GLsizei instance_count)
1686 {
1687 draw_arrays(0, mode, first, count, instance_count, 0, false, false);
1688 }
1689
1690 void GLAPIENTRY
_mesa_marshal_DrawArraysInstanced_no_error(GLenum mode,GLint first,GLsizei count,GLsizei instance_count)1691 _mesa_marshal_DrawArraysInstanced_no_error(GLenum mode, GLint first, GLsizei count,
1692 GLsizei instance_count)
1693 {
1694 draw_arrays(0, mode, first, count, instance_count, 0, false, true);
1695 }
1696
1697 void GLAPIENTRY
_mesa_marshal_DrawArraysInstancedBaseInstance(GLenum mode,GLint first,GLsizei count,GLsizei instance_count,GLuint baseinstance)1698 _mesa_marshal_DrawArraysInstancedBaseInstance(GLenum mode, GLint first,
1699 GLsizei count, GLsizei instance_count,
1700 GLuint baseinstance)
1701 {
1702 draw_arrays(0, mode, first, count, instance_count, baseinstance, false, false);
1703 }
1704
1705 void GLAPIENTRY
_mesa_marshal_DrawArraysInstancedBaseInstance_no_error(GLenum mode,GLint first,GLsizei count,GLsizei instance_count,GLuint baseinstance)1706 _mesa_marshal_DrawArraysInstancedBaseInstance_no_error(GLenum mode, GLint first,
1707 GLsizei count, GLsizei instance_count,
1708 GLuint baseinstance)
1709 {
1710 draw_arrays(0, mode, first, count, instance_count, baseinstance, false, true);
1711 }
1712
1713 void GLAPIENTRY
_mesa_marshal_DrawElements(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)1714 _mesa_marshal_DrawElements(GLenum mode, GLsizei count, GLenum type,
1715 const GLvoid *indices)
1716 {
1717 draw_elements(0, mode, count, type, indices, 1, 0,
1718 0, false, 0, 0, true, false);
1719 }
1720
1721 void GLAPIENTRY
_mesa_marshal_DrawElements_no_error(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)1722 _mesa_marshal_DrawElements_no_error(GLenum mode, GLsizei count, GLenum type,
1723 const GLvoid *indices)
1724 {
1725 draw_elements(0, mode, count, type, indices, 1, 0,
1726 0, false, 0, 0, true, true);
1727 }
1728
1729 void GLAPIENTRY
_mesa_marshal_DrawRangeElements(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices)1730 _mesa_marshal_DrawRangeElements(GLenum mode, GLuint start, GLuint end,
1731 GLsizei count, GLenum type,
1732 const GLvoid *indices)
1733 {
1734 draw_elements(0, mode, count, type, indices, 1, 0,
1735 0, true, start, end, true, false);
1736 }
1737
1738 void GLAPIENTRY
_mesa_marshal_DrawRangeElements_no_error(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices)1739 _mesa_marshal_DrawRangeElements_no_error(GLenum mode, GLuint start, GLuint end,
1740 GLsizei count, GLenum type,
1741 const GLvoid *indices)
1742 {
1743 draw_elements(0, mode, count, type, indices, 1, 0,
1744 0, true, start, end, true, true);
1745 }
1746
1747 void GLAPIENTRY
_mesa_marshal_DrawElementsInstanced(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count)1748 _mesa_marshal_DrawElementsInstanced(GLenum mode, GLsizei count, GLenum type,
1749 const GLvoid *indices, GLsizei instance_count)
1750 {
1751 draw_elements(0, mode, count, type, indices, instance_count, 0,
1752 0, false, 0, 0, false, false);
1753 }
1754
1755 void GLAPIENTRY
_mesa_marshal_DrawElementsInstanced_no_error(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count)1756 _mesa_marshal_DrawElementsInstanced_no_error(GLenum mode, GLsizei count,
1757 GLenum type, const GLvoid *indices,
1758 GLsizei instance_count)
1759 {
1760 draw_elements(0, mode, count, type, indices, instance_count, 0,
1761 0, false, 0, 0, false, true);
1762 }
1763
1764 void GLAPIENTRY
_mesa_marshal_DrawElementsBaseVertex(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLint basevertex)1765 _mesa_marshal_DrawElementsBaseVertex(GLenum mode, GLsizei count, GLenum type,
1766 const GLvoid *indices, GLint basevertex)
1767 {
1768 draw_elements(0, mode, count, type, indices, 1, basevertex,
1769 0, false, 0, 0, true, false);
1770 }
1771
1772 void GLAPIENTRY
_mesa_marshal_DrawElementsBaseVertex_no_error(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLint basevertex)1773 _mesa_marshal_DrawElementsBaseVertex_no_error(GLenum mode, GLsizei count,
1774 GLenum type, const GLvoid *indices,
1775 GLint basevertex)
1776 {
1777 draw_elements(0, mode, count, type, indices, 1, basevertex,
1778 0, false, 0, 0, true, true);
1779 }
1780
1781 void GLAPIENTRY
_mesa_marshal_DrawRangeElementsBaseVertex(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices,GLint basevertex)1782 _mesa_marshal_DrawRangeElementsBaseVertex(GLenum mode, GLuint start, GLuint end,
1783 GLsizei count, GLenum type,
1784 const GLvoid *indices, GLint basevertex)
1785 {
1786 draw_elements(0, mode, count, type, indices, 1, basevertex,
1787 0, true, start, end, true, false);
1788 }
1789
1790 void GLAPIENTRY
_mesa_marshal_DrawRangeElementsBaseVertex_no_error(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices,GLint basevertex)1791 _mesa_marshal_DrawRangeElementsBaseVertex_no_error(GLenum mode, GLuint start,
1792 GLuint end, GLsizei count, GLenum type,
1793 const GLvoid *indices, GLint basevertex)
1794 {
1795 draw_elements(0, mode, count, type, indices, 1, basevertex,
1796 0, true, start, end, true, true);
1797 }
1798
1799 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseVertex(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex)1800 _mesa_marshal_DrawElementsInstancedBaseVertex(GLenum mode, GLsizei count,
1801 GLenum type, const GLvoid *indices,
1802 GLsizei instance_count, GLint basevertex)
1803 {
1804 draw_elements(0, mode, count, type, indices, instance_count, basevertex,
1805 0, false, 0, 0, false, false);
1806 }
1807
1808 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseVertex_no_error(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex)1809 _mesa_marshal_DrawElementsInstancedBaseVertex_no_error(GLenum mode, GLsizei count,
1810 GLenum type, const GLvoid *indices,
1811 GLsizei instance_count, GLint basevertex)
1812 {
1813 draw_elements(0, mode, count, type, indices, instance_count, basevertex,
1814 0, false, 0, 0, false, true);
1815 }
1816
1817 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseInstance(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLuint baseinstance)1818 _mesa_marshal_DrawElementsInstancedBaseInstance(GLenum mode, GLsizei count,
1819 GLenum type, const GLvoid *indices,
1820 GLsizei instance_count, GLuint baseinstance)
1821 {
1822 draw_elements(0, mode, count, type, indices, instance_count, 0,
1823 baseinstance, false, 0, 0, false, false);
1824 }
1825
1826 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseInstance_no_error(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLuint baseinstance)1827 _mesa_marshal_DrawElementsInstancedBaseInstance_no_error(GLenum mode, GLsizei count,
1828 GLenum type, const GLvoid *indices,
1829 GLsizei instance_count, GLuint baseinstance)
1830 {
1831 draw_elements(0, mode, count, type, indices, instance_count, 0,
1832 baseinstance, false, 0, 0, false, true);
1833 }
1834
1835 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance)1836 _mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance(GLenum mode, GLsizei count,
1837 GLenum type, const GLvoid *indices,
1838 GLsizei instance_count, GLint basevertex,
1839 GLuint baseinstance)
1840 {
1841 draw_elements(0, mode, count, type, indices, instance_count, basevertex,
1842 baseinstance, false, 0, 0, false, false);
1843 }
1844
1845 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance_no_error(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance)1846 _mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance_no_error(GLenum mode, GLsizei count,
1847 GLenum type, const GLvoid *indices,
1848 GLsizei instance_count,
1849 GLint basevertex, GLuint baseinstance)
1850 {
1851 draw_elements(0, mode, count, type, indices, instance_count, basevertex,
1852 baseinstance, false, 0, 0, false, true);
1853 }
1854
1855 void GLAPIENTRY
_mesa_marshal_MultiDrawElements(GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei draw_count)1856 _mesa_marshal_MultiDrawElements(GLenum mode, const GLsizei *count,
1857 GLenum type, const GLvoid *const *indices,
1858 GLsizei draw_count)
1859 {
1860 _mesa_marshal_MultiDrawElementsBaseVertex(mode, count, type, indices,
1861 draw_count, NULL);
1862 }
1863
1864 uint32_t
_mesa_unmarshal_DrawArrays(struct gl_context * ctx,const struct marshal_cmd_DrawArrays * restrict cmd)1865 _mesa_unmarshal_DrawArrays(struct gl_context *ctx,
1866 const struct marshal_cmd_DrawArrays *restrict cmd)
1867 {
1868 unreachable("should never end up here");
1869 return 0;
1870 }
1871
1872 uint32_t
_mesa_unmarshal_DrawArraysInstancedBaseInstance(struct gl_context * ctx,const struct marshal_cmd_DrawArraysInstancedBaseInstance * restrict cmd)1873 _mesa_unmarshal_DrawArraysInstancedBaseInstance(struct gl_context *ctx,
1874 const struct marshal_cmd_DrawArraysInstancedBaseInstance *restrict cmd)
1875 {
1876 unreachable("should never end up here");
1877 return 0;
1878 }
1879
1880 uint32_t
_mesa_unmarshal_MultiDrawArrays(struct gl_context * ctx,const struct marshal_cmd_MultiDrawArrays * restrict cmd)1881 _mesa_unmarshal_MultiDrawArrays(struct gl_context *ctx,
1882 const struct marshal_cmd_MultiDrawArrays *restrict cmd)
1883 {
1884 unreachable("should never end up here");
1885 return 0;
1886 }
1887
1888 uint32_t
_mesa_unmarshal_DrawRangeElements(struct gl_context * ctx,const struct marshal_cmd_DrawRangeElements * restrict cmd)1889 _mesa_unmarshal_DrawRangeElements(struct gl_context *ctx,
1890 const struct marshal_cmd_DrawRangeElements *restrict cmd)
1891 {
1892 unreachable("should never end up here");
1893 return 0;
1894 }
1895
1896 uint32_t
_mesa_unmarshal_DrawRangeElementsBaseVertex(struct gl_context * ctx,const struct marshal_cmd_DrawRangeElementsBaseVertex * cmd)1897 _mesa_unmarshal_DrawRangeElementsBaseVertex(struct gl_context *ctx,
1898 const struct marshal_cmd_DrawRangeElementsBaseVertex *cmd)
1899 {
1900 unreachable("should never end up here");
1901 return 0;
1902 }
1903
1904 uint32_t
_mesa_unmarshal_DrawElementsInstanced(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstanced * restrict cmd)1905 _mesa_unmarshal_DrawElementsInstanced(struct gl_context *ctx,
1906 const struct marshal_cmd_DrawElementsInstanced *restrict cmd)
1907 {
1908 unreachable("should never end up here");
1909 return 0;
1910 }
1911
1912 uint32_t
_mesa_unmarshal_DrawElementsBaseVertex(struct gl_context * ctx,const struct marshal_cmd_DrawElementsBaseVertex * restrict cmd)1913 _mesa_unmarshal_DrawElementsBaseVertex(struct gl_context *ctx,
1914 const struct marshal_cmd_DrawElementsBaseVertex *restrict cmd)
1915 {
1916 unreachable("should never end up here");
1917 return 0;
1918 }
1919
1920 uint32_t
_mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstance(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance * restrict cmd)1921 _mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstance(struct gl_context *ctx,
1922 const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance *restrict cmd)
1923 {
1924 unreachable("should never end up here");
1925 return 0;
1926 }
1927
1928 uint32_t
_mesa_unmarshal_MultiDrawElements(struct gl_context * ctx,const struct marshal_cmd_MultiDrawElements * restrict cmd)1929 _mesa_unmarshal_MultiDrawElements(struct gl_context *ctx,
1930 const struct marshal_cmd_MultiDrawElements *restrict cmd)
1931 {
1932 unreachable("should never end up here");
1933 return 0;
1934 }
1935
1936 uint32_t
_mesa_unmarshal_MultiDrawElementsBaseVertex(struct gl_context * ctx,const struct marshal_cmd_MultiDrawElementsBaseVertex * restrict cmd)1937 _mesa_unmarshal_MultiDrawElementsBaseVertex(struct gl_context *ctx,
1938 const struct marshal_cmd_MultiDrawElementsBaseVertex *restrict cmd)
1939 {
1940 unreachable("should never end up here");
1941 return 0;
1942 }
1943
1944 uint32_t
_mesa_unmarshal_MultiModeDrawArraysIBM(struct gl_context * ctx,const struct marshal_cmd_MultiModeDrawArraysIBM * cmd)1945 _mesa_unmarshal_MultiModeDrawArraysIBM(struct gl_context *ctx,
1946 const struct marshal_cmd_MultiModeDrawArraysIBM *cmd)
1947 {
1948 unreachable("should never end up here");
1949 return 0;
1950 }
1951
1952 uint32_t
_mesa_unmarshal_MultiModeDrawElementsIBM(struct gl_context * ctx,const struct marshal_cmd_MultiModeDrawElementsIBM * cmd)1953 _mesa_unmarshal_MultiModeDrawElementsIBM(struct gl_context *ctx,
1954 const struct marshal_cmd_MultiModeDrawElementsIBM *cmd)
1955 {
1956 unreachable("should never end up here");
1957 return 0;
1958 }
1959
1960 void GLAPIENTRY
_mesa_marshal_DrawArraysUserBuf(void)1961 _mesa_marshal_DrawArraysUserBuf(void)
1962 {
1963 unreachable("should never end up here");
1964 }
1965
1966 void GLAPIENTRY
_mesa_marshal_DrawElementsUserBuf(const GLvoid * cmd)1967 _mesa_marshal_DrawElementsUserBuf(const GLvoid *cmd)
1968 {
1969 unreachable("should never end up here");
1970 }
1971
1972 void GLAPIENTRY
_mesa_marshal_DrawElementsUserBufPacked(const GLvoid * cmd)1973 _mesa_marshal_DrawElementsUserBufPacked(const GLvoid *cmd)
1974 {
1975 unreachable("should never end up here");
1976 }
1977
1978 void GLAPIENTRY
_mesa_marshal_MultiDrawArraysUserBuf(void)1979 _mesa_marshal_MultiDrawArraysUserBuf(void)
1980 {
1981 unreachable("should never end up here");
1982 }
1983
1984 void GLAPIENTRY
_mesa_marshal_MultiDrawElementsUserBuf(GLintptr indexBuf,GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei primcount,const GLint * basevertex)1985 _mesa_marshal_MultiDrawElementsUserBuf(GLintptr indexBuf, GLenum mode,
1986 const GLsizei *count, GLenum type,
1987 const GLvoid * const *indices,
1988 GLsizei primcount,
1989 const GLint *basevertex)
1990 {
1991 unreachable("should never end up here");
1992 }
1993
1994 void GLAPIENTRY
_mesa_marshal_DrawArraysInstancedBaseInstanceDrawID(void)1995 _mesa_marshal_DrawArraysInstancedBaseInstanceDrawID(void)
1996 {
1997 unreachable("should never end up here");
1998 }
1999
_mesa_marshal_DrawElementsPacked(GLenum mode,GLenum type,GLushort count,GLushort indices)2000 void GLAPIENTRY _mesa_marshal_DrawElementsPacked(GLenum mode, GLenum type,
2001 GLushort count, GLushort indices)
2002 {
2003 unreachable("should never end up here");
2004 }
2005
2006 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseVertexBaseInstanceDrawID(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance,GLuint drawid)2007 _mesa_marshal_DrawElementsInstancedBaseVertexBaseInstanceDrawID(GLenum mode, GLsizei count,
2008 GLenum type, const GLvoid *indices,
2009 GLsizei instance_count, GLint basevertex,
2010 GLuint baseinstance, GLuint drawid)
2011 {
2012 unreachable("should never end up here");
2013 }
2014
2015 void GLAPIENTRY
_mesa_DrawArraysUserBuf(void)2016 _mesa_DrawArraysUserBuf(void)
2017 {
2018 unreachable("should never end up here");
2019 }
2020
2021 void GLAPIENTRY
_mesa_MultiDrawArraysUserBuf(void)2022 _mesa_MultiDrawArraysUserBuf(void)
2023 {
2024 unreachable("should never end up here");
2025 }
2026
2027 void GLAPIENTRY
_mesa_DrawArraysInstancedBaseInstanceDrawID(void)2028 _mesa_DrawArraysInstancedBaseInstanceDrawID(void)
2029 {
2030 unreachable("should never end up here");
2031 }
2032
_mesa_DrawElementsPacked(GLenum mode,GLenum type,GLushort count,GLushort indices)2033 void GLAPIENTRY _mesa_DrawElementsPacked(GLenum mode, GLenum type,
2034 GLushort count, GLushort indices)
2035 {
2036 unreachable("should never end up here");
2037 }
2038
2039 void GLAPIENTRY
_mesa_DrawElementsInstancedBaseVertexBaseInstanceDrawID(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance,GLuint drawid)2040 _mesa_DrawElementsInstancedBaseVertexBaseInstanceDrawID(GLenum mode, GLsizei count,
2041 GLenum type, const GLvoid *indices,
2042 GLsizei instance_count, GLint basevertex,
2043 GLuint baseinstance, GLuint drawid)
2044 {
2045 unreachable("should never end up here");
2046 }
2047
2048 uint32_t
_mesa_unmarshal_PushMatrix(struct gl_context * ctx,const struct marshal_cmd_PushMatrix * restrict cmd)2049 _mesa_unmarshal_PushMatrix(struct gl_context *ctx,
2050 const struct marshal_cmd_PushMatrix *restrict cmd)
2051 {
2052 const unsigned push_matrix_size = 1;
2053 const unsigned mult_matrixf_size = 9;
2054 const unsigned draw_elements_size =
2055 (align(sizeof(struct marshal_cmd_DrawElements), 8) / 8);
2056 const unsigned draw_elements_packed_size =
2057 (align(sizeof(struct marshal_cmd_DrawElementsPacked), 8) / 8);
2058 const unsigned pop_matrix_size = 1;
2059 uint64_t *next1 = _mesa_glthread_next_cmd((uint64_t *)cmd, push_matrix_size);
2060 uint64_t *next2;
2061
2062 /* Viewperf has these call patterns. */
2063 switch (_mesa_glthread_get_cmd(next1)->cmd_id) {
2064 case DISPATCH_CMD_DrawElements:
2065 /* Execute this sequence:
2066 * glPushMatrix
2067 * (glMultMatrixf with identity is eliminated by the marshal function)
2068 * glDrawElements
2069 * glPopMatrix
2070 * as:
2071 * glDrawElements
2072 */
2073 next2 = _mesa_glthread_next_cmd(next1, draw_elements_size);
2074
2075 if (_mesa_glthread_get_cmd(next2)->cmd_id == DISPATCH_CMD_PopMatrix) {
2076 /* The beauty of this is that this is inlined. */
2077 _mesa_unmarshal_DrawElements(ctx, (void*)next1);
2078 return push_matrix_size + draw_elements_size + pop_matrix_size;
2079 }
2080 break;
2081
2082 case DISPATCH_CMD_DrawElementsPacked:
2083 next2 = _mesa_glthread_next_cmd(next1, draw_elements_packed_size);
2084
2085 if (_mesa_glthread_get_cmd(next2)->cmd_id == DISPATCH_CMD_PopMatrix) {
2086 /* The beauty of this is that this is inlined. */
2087 _mesa_unmarshal_DrawElementsPacked(ctx, (void*)next1);
2088 return push_matrix_size + draw_elements_packed_size + pop_matrix_size;
2089 }
2090 break;
2091
2092 case DISPATCH_CMD_MultMatrixf:
2093 /* Skip this sequence:
2094 * glPushMatrix
2095 * glMultMatrixf
2096 * glPopMatrix
2097 */
2098 next2 = _mesa_glthread_next_cmd(next1, mult_matrixf_size);
2099
2100 if (_mesa_glthread_get_cmd(next2)->cmd_id == DISPATCH_CMD_PopMatrix)
2101 return push_matrix_size + mult_matrixf_size + pop_matrix_size;
2102 break;
2103 }
2104
2105 CALL_PushMatrix(ctx->Dispatch.Current, ());
2106 return push_matrix_size;
2107 }
2108
2109 void GLAPIENTRY
_mesa_marshal_PushMatrix(void)2110 _mesa_marshal_PushMatrix(void)
2111 {
2112 GET_CURRENT_CONTEXT(ctx);
2113
2114 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_PushMatrix,
2115 sizeof(struct marshal_cmd_PushMatrix));
2116 _mesa_glthread_PushMatrix(ctx);
2117 }
2118