1 /*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /* Draw function marshalling for glthread.
25 *
26 * The purpose of these glDraw wrappers is to upload non-VBO vertex and
27 * index data, so that glthread doesn't have to execute synchronously.
28 */
29
30 #include "c99_alloca.h"
31
32 #include "main/glthread_marshal.h"
33 #include "main/dispatch.h"
34 #include "main/varray.h"
35
36 static inline unsigned
get_index_size(GLenum type)37 get_index_size(GLenum type)
38 {
39 /* GL_UNSIGNED_BYTE - GL_UNSIGNED_BYTE = 0
40 * GL_UNSIGNED_SHORT - GL_UNSIGNED_BYTE = 2
41 * GL_UNSIGNED_INT - GL_UNSIGNED_BYTE = 4
42 *
43 * Divide by 2 to get n=0,1,2, then the index size is: 1 << n
44 */
45 return 1 << ((type - GL_UNSIGNED_BYTE) >> 1);
46 }
47
48 static inline bool
is_index_type_valid(GLenum type)49 is_index_type_valid(GLenum type)
50 {
51 /* GL_UNSIGNED_BYTE = 0x1401
52 * GL_UNSIGNED_SHORT = 0x1403
53 * GL_UNSIGNED_INT = 0x1405
54 *
55 * The trick is that bit 1 and bit 2 mean USHORT and UINT, respectively.
56 * After clearing those two bits (with ~6), we should get UBYTE.
57 * Both bits can't be set, because the enum would be greater than UINT.
58 */
59 return type <= GL_UNSIGNED_INT && (type & ~6) == GL_UNSIGNED_BYTE;
60 }
61
62 static ALWAYS_INLINE struct gl_buffer_object *
upload_indices(struct gl_context * ctx,unsigned count,unsigned index_size,const GLvoid ** indices)63 upload_indices(struct gl_context *ctx, unsigned count, unsigned index_size,
64 const GLvoid **indices)
65 {
66 struct gl_buffer_object *upload_buffer = NULL;
67 unsigned upload_offset = 0;
68
69 assert(count);
70
71 _mesa_glthread_upload(ctx, *indices, index_size * count,
72 &upload_offset, &upload_buffer, NULL);
73 assert(upload_buffer);
74 *indices = (const GLvoid*)(intptr_t)upload_offset;
75
76 return upload_buffer;
77 }
78
79 static ALWAYS_INLINE struct gl_buffer_object *
upload_multi_indices(struct gl_context * ctx,unsigned total_count,unsigned index_size,unsigned draw_count,const GLsizei * count,const GLvoid * const * indices,const GLvoid ** out_indices)80 upload_multi_indices(struct gl_context *ctx, unsigned total_count,
81 unsigned index_size, unsigned draw_count,
82 const GLsizei *count, const GLvoid *const *indices,
83 const GLvoid **out_indices)
84 {
85 struct gl_buffer_object *upload_buffer = NULL;
86 unsigned upload_offset = 0;
87 uint8_t *upload_ptr = NULL;
88
89 assert(total_count);
90
91 _mesa_glthread_upload(ctx, NULL, index_size * total_count,
92 &upload_offset, &upload_buffer, &upload_ptr);
93 assert(upload_buffer);
94
95 for (unsigned i = 0, offset = 0; i < draw_count; i++) {
96 if (count[i] == 0)
97 continue;
98
99 unsigned size = count[i] * index_size;
100
101 memcpy(upload_ptr + offset, indices[i], size);
102 out_indices[i] = (const GLvoid*)(intptr_t)(upload_offset + offset);
103 offset += size;
104 }
105
106 return upload_buffer;
107 }
108
109 static ALWAYS_INLINE bool
upload_vertices(struct gl_context * ctx,unsigned user_buffer_mask,unsigned start_vertex,unsigned num_vertices,unsigned start_instance,unsigned num_instances,struct glthread_attrib_binding * buffers)110 upload_vertices(struct gl_context *ctx, unsigned user_buffer_mask,
111 unsigned start_vertex, unsigned num_vertices,
112 unsigned start_instance, unsigned num_instances,
113 struct glthread_attrib_binding *buffers)
114 {
115 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
116 unsigned attrib_mask_iter = vao->Enabled;
117 unsigned num_buffers = 0;
118
119 assert((num_vertices || !(user_buffer_mask & ~vao->NonZeroDivisorMask)) &&
120 (num_instances || !(user_buffer_mask & vao->NonZeroDivisorMask)));
121
122 if (unlikely(vao->BufferInterleaved & user_buffer_mask)) {
123 /* Slower upload path where some buffers reference multiple attribs,
124 * so we have to use 2 while loops instead of 1.
125 */
126 unsigned start_offset[VERT_ATTRIB_MAX];
127 unsigned end_offset[VERT_ATTRIB_MAX];
128 uint32_t buffer_mask = 0;
129
130 while (attrib_mask_iter) {
131 unsigned i = u_bit_scan(&attrib_mask_iter);
132 unsigned binding_index = vao->Attrib[i].BufferIndex;
133
134 if (!(user_buffer_mask & (1 << binding_index)))
135 continue;
136
137 unsigned stride = vao->Attrib[binding_index].Stride;
138 unsigned instance_div = vao->Attrib[binding_index].Divisor;
139 unsigned element_size = vao->Attrib[i].ElementSize;
140 unsigned offset = vao->Attrib[i].RelativeOffset;
141 unsigned size;
142
143 if (instance_div) {
144 /* Per-instance attrib. */
145
146 /* Figure out how many instances we'll render given instance_div. We
147 * can't use the typical div_round_up() pattern because the CTS uses
148 * instance_div = ~0 for a test, which overflows div_round_up()'s
149 * addition.
150 */
151 unsigned count = num_instances / instance_div;
152 if (count * instance_div != num_instances)
153 count++;
154
155 offset += stride * start_instance;
156 size = stride * (count - 1) + element_size;
157 } else {
158 /* Per-vertex attrib. */
159 offset += stride * start_vertex;
160 size = stride * (num_vertices - 1) + element_size;
161 }
162
163 unsigned binding_index_bit = 1u << binding_index;
164
165 /* Update upload offsets. */
166 if (!(buffer_mask & binding_index_bit)) {
167 start_offset[binding_index] = offset;
168 end_offset[binding_index] = offset + size;
169 } else {
170 if (offset < start_offset[binding_index])
171 start_offset[binding_index] = offset;
172 if (offset + size > end_offset[binding_index])
173 end_offset[binding_index] = offset + size;
174 }
175
176 buffer_mask |= binding_index_bit;
177 }
178
179 /* Upload buffers. */
180 while (buffer_mask) {
181 struct gl_buffer_object *upload_buffer = NULL;
182 unsigned upload_offset = 0;
183 unsigned start, end;
184
185 unsigned binding_index = u_bit_scan(&buffer_mask);
186
187 start = start_offset[binding_index];
188 end = end_offset[binding_index];
189 assert(start < end);
190
191 const void *ptr = vao->Attrib[binding_index].Pointer;
192 _mesa_glthread_upload(ctx, (uint8_t*)ptr + start,
193 end - start, &upload_offset,
194 &upload_buffer, NULL);
195 assert(upload_buffer);
196
197 buffers[num_buffers].buffer = upload_buffer;
198 buffers[num_buffers].offset = upload_offset - start;
199 buffers[num_buffers].original_pointer = ptr;
200 num_buffers++;
201 }
202
203 return true;
204 }
205
206 /* Faster path where all attribs are separate. */
207 while (attrib_mask_iter) {
208 unsigned i = u_bit_scan(&attrib_mask_iter);
209 unsigned binding_index = vao->Attrib[i].BufferIndex;
210
211 if (!(user_buffer_mask & (1 << binding_index)))
212 continue;
213
214 struct gl_buffer_object *upload_buffer = NULL;
215 unsigned upload_offset = 0;
216 unsigned stride = vao->Attrib[binding_index].Stride;
217 unsigned instance_div = vao->Attrib[binding_index].Divisor;
218 unsigned element_size = vao->Attrib[i].ElementSize;
219 unsigned offset = vao->Attrib[i].RelativeOffset;
220 unsigned size;
221
222 if (instance_div) {
223 /* Per-instance attrib. */
224
225 /* Figure out how many instances we'll render given instance_div. We
226 * can't use the typical div_round_up() pattern because the CTS uses
227 * instance_div = ~0 for a test, which overflows div_round_up()'s
228 * addition.
229 */
230 unsigned count = num_instances / instance_div;
231 if (count * instance_div != num_instances)
232 count++;
233
234 offset += stride * start_instance;
235 size = stride * (count - 1) + element_size;
236 } else {
237 /* Per-vertex attrib. */
238 offset += stride * start_vertex;
239 size = stride * (num_vertices - 1) + element_size;
240 }
241
242 const void *ptr = vao->Attrib[binding_index].Pointer;
243 _mesa_glthread_upload(ctx, (uint8_t*)ptr + offset,
244 size, &upload_offset, &upload_buffer, NULL);
245 assert(upload_buffer);
246
247 buffers[num_buffers].buffer = upload_buffer;
248 buffers[num_buffers].offset = upload_offset - offset;
249 buffers[num_buffers].original_pointer = ptr;
250 num_buffers++;
251 }
252
253 return true;
254 }
255
256 /* Generic DrawArrays structure NOT supporting user buffers. Ignore the name. */
257 struct marshal_cmd_DrawArrays
258 {
259 struct marshal_cmd_base cmd_base;
260 GLenum mode;
261 GLint first;
262 GLsizei count;
263 GLsizei instance_count;
264 GLuint baseinstance;
265 };
266
267 uint32_t
_mesa_unmarshal_DrawArrays(struct gl_context * ctx,const struct marshal_cmd_DrawArrays * cmd,const uint64_t * last)268 _mesa_unmarshal_DrawArrays(struct gl_context *ctx,
269 const struct marshal_cmd_DrawArrays *cmd,
270 const uint64_t *last)
271 {
272 /* Ignore the function name. We use DISPATCH_CMD_DrawArrays
273 * for all DrawArrays variants without user buffers, and
274 * DISPATCH_CMD_DrawArraysInstancedBaseInstance for all DrawArrays
275 * variants with user buffrs.
276 */
277 const GLenum mode = cmd->mode;
278 const GLint first = cmd->first;
279 const GLsizei count = cmd->count;
280 const GLsizei instance_count = cmd->instance_count;
281 const GLuint baseinstance = cmd->baseinstance;
282
283 CALL_DrawArraysInstancedBaseInstance(ctx->CurrentServerDispatch,
284 (mode, first, count, instance_count,
285 baseinstance));
286 return cmd->cmd_base.cmd_size;
287 }
288
289 static ALWAYS_INLINE void
draw_arrays_async(struct gl_context * ctx,GLenum mode,GLint first,GLsizei count,GLsizei instance_count,GLuint baseinstance)290 draw_arrays_async(struct gl_context *ctx, GLenum mode, GLint first,
291 GLsizei count, GLsizei instance_count, GLuint baseinstance)
292 {
293 int cmd_size = sizeof(struct marshal_cmd_DrawArrays);
294 struct marshal_cmd_DrawArrays *cmd =
295 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArrays, cmd_size);
296
297 cmd->mode = mode;
298 cmd->first = first;
299 cmd->count = count;
300 cmd->instance_count = instance_count;
301 cmd->baseinstance = baseinstance;
302 }
303
304 /* Generic DrawArrays structure supporting user buffers. Ignore the name. */
305 struct marshal_cmd_DrawArraysInstancedBaseInstance
306 {
307 struct marshal_cmd_base cmd_base;
308 GLenum mode;
309 GLint first;
310 GLsizei count;
311 GLsizei instance_count;
312 GLuint baseinstance;
313 GLuint user_buffer_mask;
314 };
315
316 uint32_t
_mesa_unmarshal_DrawArraysInstancedBaseInstance(struct gl_context * ctx,const struct marshal_cmd_DrawArraysInstancedBaseInstance * cmd,const uint64_t * last)317 _mesa_unmarshal_DrawArraysInstancedBaseInstance(struct gl_context *ctx,
318 const struct marshal_cmd_DrawArraysInstancedBaseInstance *cmd,
319 const uint64_t *last)
320 {
321 /* Ignore the function name. We use DISPATCH_CMD_DrawArrays
322 * for all DrawArrays variants without user buffers, and
323 * DISPATCH_CMD_DrawArraysInstancedBaseInstance for all DrawArrays
324 * variants with user buffrs.
325 */
326 const GLenum mode = cmd->mode;
327 const GLint first = cmd->first;
328 const GLsizei count = cmd->count;
329 const GLsizei instance_count = cmd->instance_count;
330 const GLuint baseinstance = cmd->baseinstance;
331 const GLuint user_buffer_mask = cmd->user_buffer_mask;
332 const struct glthread_attrib_binding *buffers =
333 (const struct glthread_attrib_binding *)(cmd + 1);
334
335 /* Bind uploaded buffers if needed. */
336 if (user_buffer_mask) {
337 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
338 false);
339 }
340
341 CALL_DrawArraysInstancedBaseInstance(ctx->CurrentServerDispatch,
342 (mode, first, count, instance_count,
343 baseinstance));
344
345 /* Restore states. */
346 if (user_buffer_mask) {
347 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
348 true);
349 }
350 return cmd->cmd_base.cmd_size;
351 }
352
353 static ALWAYS_INLINE void
draw_arrays_async_user(struct gl_context * ctx,GLenum mode,GLint first,GLsizei count,GLsizei instance_count,GLuint baseinstance,unsigned user_buffer_mask,const struct glthread_attrib_binding * buffers)354 draw_arrays_async_user(struct gl_context *ctx, GLenum mode, GLint first,
355 GLsizei count, GLsizei instance_count, GLuint baseinstance,
356 unsigned user_buffer_mask,
357 const struct glthread_attrib_binding *buffers)
358 {
359 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]);
360 int cmd_size = sizeof(struct marshal_cmd_DrawArraysInstancedBaseInstance) +
361 buffers_size;
362 struct marshal_cmd_DrawArraysInstancedBaseInstance *cmd;
363
364 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArraysInstancedBaseInstance,
365 cmd_size);
366 cmd->mode = mode;
367 cmd->first = first;
368 cmd->count = count;
369 cmd->instance_count = instance_count;
370 cmd->baseinstance = baseinstance;
371 cmd->user_buffer_mask = user_buffer_mask;
372
373 if (user_buffer_mask)
374 memcpy(cmd + 1, buffers, buffers_size);
375 }
376
377 static ALWAYS_INLINE void
draw_arrays(GLenum mode,GLint first,GLsizei count,GLsizei instance_count,GLuint baseinstance,bool compiled_into_dlist)378 draw_arrays(GLenum mode, GLint first, GLsizei count, GLsizei instance_count,
379 GLuint baseinstance, bool compiled_into_dlist)
380 {
381 GET_CURRENT_CONTEXT(ctx);
382
383 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
384 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled;
385
386 if (compiled_into_dlist && ctx->GLThread.ListMode) {
387 _mesa_glthread_finish_before(ctx, "DrawArrays");
388 /* Use the function that's compiled into a display list. */
389 CALL_DrawArrays(ctx->CurrentServerDispatch, (mode, first, count));
390 return;
391 }
392
393 /* Fast path when nothing needs to be done.
394 *
395 * This is also an error path. Zero counts should still call the driver
396 * for possible GL errors.
397 */
398 if (ctx->API == API_OPENGL_CORE || !user_buffer_mask ||
399 count <= 0 || instance_count <= 0) {
400 draw_arrays_async(ctx, mode, first, count, instance_count, baseinstance);
401 return;
402 }
403
404 /* Upload and draw. */
405 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX];
406 if (!ctx->GLThread.SupportsNonVBOUploads ||
407 !upload_vertices(ctx, user_buffer_mask, first, count, baseinstance,
408 instance_count, buffers)) {
409 _mesa_glthread_finish_before(ctx, "DrawArrays");
410 CALL_DrawArraysInstancedBaseInstance(ctx->CurrentServerDispatch,
411 (mode, first, count, instance_count,
412 baseinstance));
413 return;
414 }
415
416 draw_arrays_async_user(ctx, mode, first, count, instance_count, baseinstance,
417 user_buffer_mask, buffers);
418 }
419
420 struct marshal_cmd_MultiDrawArrays
421 {
422 struct marshal_cmd_base cmd_base;
423 GLenum mode;
424 GLsizei draw_count;
425 GLuint user_buffer_mask;
426 };
427
428 uint32_t
_mesa_unmarshal_MultiDrawArrays(struct gl_context * ctx,const struct marshal_cmd_MultiDrawArrays * cmd,const uint64_t * last)429 _mesa_unmarshal_MultiDrawArrays(struct gl_context *ctx,
430 const struct marshal_cmd_MultiDrawArrays *cmd,
431 const uint64_t *last)
432 {
433 const GLenum mode = cmd->mode;
434 const GLsizei draw_count = cmd->draw_count;
435 const GLuint user_buffer_mask = cmd->user_buffer_mask;
436
437 const char *variable_data = (const char *)(cmd + 1);
438 const GLint *first = (GLint *)variable_data;
439 variable_data += sizeof(GLint) * draw_count;
440 const GLsizei *count = (GLsizei *)variable_data;
441 variable_data += sizeof(GLsizei) * draw_count;
442 const struct glthread_attrib_binding *buffers =
443 (const struct glthread_attrib_binding *)variable_data;
444
445 /* Bind uploaded buffers if needed. */
446 if (user_buffer_mask) {
447 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
448 false);
449 }
450
451 CALL_MultiDrawArrays(ctx->CurrentServerDispatch,
452 (mode, first, count, draw_count));
453
454 /* Restore states. */
455 if (user_buffer_mask) {
456 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
457 true);
458 }
459 return cmd->cmd_base.cmd_size;
460 }
461
462 static ALWAYS_INLINE bool
multi_draw_arrays_async(struct gl_context * ctx,GLenum mode,const GLint * first,const GLsizei * count,GLsizei draw_count,unsigned user_buffer_mask,const struct glthread_attrib_binding * buffers)463 multi_draw_arrays_async(struct gl_context *ctx, GLenum mode,
464 const GLint *first, const GLsizei *count,
465 GLsizei draw_count, unsigned user_buffer_mask,
466 const struct glthread_attrib_binding *buffers)
467 {
468 int first_size = sizeof(GLint) * draw_count;
469 int count_size = sizeof(GLsizei) * draw_count;
470 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]);
471 int cmd_size = sizeof(struct marshal_cmd_MultiDrawArrays) +
472 first_size + count_size + buffers_size;
473 struct marshal_cmd_MultiDrawArrays *cmd;
474
475 /* Make sure cmd can fit the queue buffer */
476 if (cmd_size > MARSHAL_MAX_CMD_SIZE) {
477 return false;
478 }
479
480 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawArrays,
481 cmd_size);
482 cmd->mode = mode;
483 cmd->draw_count = draw_count;
484 cmd->user_buffer_mask = user_buffer_mask;
485
486 char *variable_data = (char*)(cmd + 1);
487 memcpy(variable_data, first, first_size);
488 variable_data += first_size;
489 memcpy(variable_data, count, count_size);
490
491 if (user_buffer_mask) {
492 variable_data += count_size;
493 memcpy(variable_data, buffers, buffers_size);
494 }
495
496 return true;
497 }
498
499 void GLAPIENTRY
_mesa_marshal_MultiDrawArrays(GLenum mode,const GLint * first,const GLsizei * count,GLsizei draw_count)500 _mesa_marshal_MultiDrawArrays(GLenum mode, const GLint *first,
501 const GLsizei *count, GLsizei draw_count)
502 {
503 GET_CURRENT_CONTEXT(ctx);
504
505 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
506 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled;
507
508 if (ctx->GLThread.ListMode)
509 goto sync;
510
511 if (draw_count >= 0 &&
512 (ctx->API == API_OPENGL_CORE || !user_buffer_mask) &&
513 multi_draw_arrays_async(ctx, mode, first, count, draw_count, 0, NULL)) {
514 return;
515 }
516
517 /* If the draw count is too high or negative, the queue can't be used. */
518 if (!ctx->GLThread.SupportsNonVBOUploads ||
519 draw_count < 0 || draw_count > MARSHAL_MAX_CMD_SIZE / 16)
520 goto sync;
521
522 unsigned min_index = ~0;
523 unsigned max_index_exclusive = 0;
524
525 for (unsigned i = 0; i < draw_count; i++) {
526 GLsizei vertex_count = count[i];
527
528 if (vertex_count < 0) {
529 /* Just call the driver to set the error. */
530 multi_draw_arrays_async(ctx, mode, first, count, draw_count, 0, NULL);
531 return;
532 }
533 if (vertex_count == 0)
534 continue;
535
536 min_index = MIN2(min_index, first[i]);
537 max_index_exclusive = MAX2(max_index_exclusive, first[i] + vertex_count);
538 }
539
540 unsigned num_vertices = max_index_exclusive - min_index;
541 if (num_vertices == 0) {
542 /* Nothing to do, but call the driver to set possible GL errors. */
543 multi_draw_arrays_async(ctx, mode, first, count, draw_count, 0, NULL);
544 return;
545 }
546
547 /* Upload and draw. */
548 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX];
549 if (!upload_vertices(ctx, user_buffer_mask, min_index, num_vertices,
550 0, 1, buffers))
551 goto sync;
552
553 multi_draw_arrays_async(ctx, mode, first, count, draw_count,
554 user_buffer_mask, buffers);
555 return;
556
557 sync:
558 _mesa_glthread_finish_before(ctx, "MultiDrawArrays");
559 CALL_MultiDrawArrays(ctx->CurrentServerDispatch,
560 (mode, first, count, draw_count));
561 }
562
563 /* DrawElementsInstancedBaseVertexBaseInstance not supporting user buffers.
564 * Ignore the name.
565 */
566 struct marshal_cmd_DrawElementsInstancedARB
567 {
568 struct marshal_cmd_base cmd_base;
569 GLenum mode;
570 GLenum type;
571 GLsizei count;
572 GLsizei instance_count;
573 GLint basevertex;
574 GLuint baseinstance;
575 const GLvoid *indices;
576 };
577
578 uint32_t
_mesa_unmarshal_DrawElementsInstancedARB(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedARB * cmd,const uint64_t * last)579 _mesa_unmarshal_DrawElementsInstancedARB(struct gl_context *ctx,
580 const struct marshal_cmd_DrawElementsInstancedARB *cmd,
581 const uint64_t *last)
582 {
583 /* Ignore the function name. We use DISPATCH_CMD_DrawElementsInstanced-
584 * BaseVertexBaseInstance for all DrawElements variants with user buffers,
585 * and both DISPATCH_CMD_DrawElementsInstancedARB and DISPATCH_CMD_Draw-
586 * RangeElementsBaseVertex for all draw elements variants without user
587 * buffers.
588 */
589 const GLenum mode = cmd->mode;
590 const GLsizei count = cmd->count;
591 const GLenum type = cmd->type;
592 const GLvoid *indices = cmd->indices;
593 const GLsizei instance_count = cmd->instance_count;
594 const GLint basevertex = cmd->basevertex;
595 const GLuint baseinstance = cmd->baseinstance;
596
597 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx->CurrentServerDispatch,
598 (mode, count, type, indices,
599 instance_count, basevertex,
600 baseinstance));
601 return cmd->cmd_base.cmd_size;
602 }
603
604 struct marshal_cmd_DrawRangeElementsBaseVertex
605 {
606 struct marshal_cmd_base cmd_base;
607 GLenum mode;
608 GLenum type;
609 GLsizei count;
610 GLint basevertex;
611 GLuint min_index;
612 GLuint max_index;
613 const GLvoid *indices;
614 };
615
616 uint32_t
_mesa_unmarshal_DrawRangeElementsBaseVertex(struct gl_context * ctx,const struct marshal_cmd_DrawRangeElementsBaseVertex * cmd,const uint64_t * last)617 _mesa_unmarshal_DrawRangeElementsBaseVertex(struct gl_context *ctx,
618 const struct marshal_cmd_DrawRangeElementsBaseVertex *cmd,
619 const uint64_t *last)
620 {
621 const GLenum mode = cmd->mode;
622 const GLsizei count = cmd->count;
623 const GLenum type = cmd->type;
624 const GLvoid *indices = cmd->indices;
625 const GLint basevertex = cmd->basevertex;
626 const GLuint min_index = cmd->min_index;
627 const GLuint max_index = cmd->max_index;
628
629 CALL_DrawRangeElementsBaseVertex(ctx->CurrentServerDispatch,
630 (mode, min_index, max_index, count,
631 type, indices, basevertex));
632 return cmd->cmd_base.cmd_size;
633 }
634
635 static ALWAYS_INLINE void
draw_elements_async(struct gl_context * ctx,GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance,bool index_bounds_valid,GLuint min_index,GLuint max_index)636 draw_elements_async(struct gl_context *ctx, GLenum mode, GLsizei count,
637 GLenum type, const GLvoid *indices, GLsizei instance_count,
638 GLint basevertex, GLuint baseinstance,
639 bool index_bounds_valid, GLuint min_index, GLuint max_index)
640 {
641 if (index_bounds_valid) {
642 int cmd_size = sizeof(struct marshal_cmd_DrawRangeElementsBaseVertex);
643 struct marshal_cmd_DrawRangeElementsBaseVertex *cmd =
644 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawRangeElementsBaseVertex, cmd_size);
645
646 cmd->mode = mode;
647 cmd->count = count;
648 cmd->type = type;
649 cmd->indices = indices;
650 cmd->basevertex = basevertex;
651 cmd->min_index = min_index;
652 cmd->max_index = max_index;
653 } else {
654 int cmd_size = sizeof(struct marshal_cmd_DrawElementsInstancedARB);
655 struct marshal_cmd_DrawElementsInstancedARB *cmd =
656 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsInstancedARB, cmd_size);
657
658 cmd->mode = mode;
659 cmd->count = count;
660 cmd->type = type;
661 cmd->indices = indices;
662 cmd->instance_count = instance_count;
663 cmd->basevertex = basevertex;
664 cmd->baseinstance = baseinstance;
665 }
666 }
667
668 struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance
669 {
670 struct marshal_cmd_base cmd_base;
671 bool index_bounds_valid;
672 GLenum mode;
673 GLenum type;
674 GLsizei count;
675 GLsizei instance_count;
676 GLint basevertex;
677 GLuint baseinstance;
678 GLuint min_index;
679 GLuint max_index;
680 GLuint user_buffer_mask;
681 const GLvoid *indices;
682 struct gl_buffer_object *index_buffer;
683 };
684
685 uint32_t
_mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstance(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance * cmd,const uint64_t * last)686 _mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstance(struct gl_context *ctx,
687 const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance *cmd,
688 const uint64_t *last)
689 {
690 /* Ignore the function name. We use DISPATCH_CMD_DrawElementsInstanced-
691 * BaseVertexBaseInstance for all DrawElements variants with user buffers,
692 * and both DISPATCH_CMD_DrawElementsInstancedARB and DISPATCH_CMD_Draw-
693 * RangeElementsBaseVertex for all draw elements variants without user
694 * buffers.
695 */
696 const GLenum mode = cmd->mode;
697 const GLsizei count = cmd->count;
698 const GLenum type = cmd->type;
699 const GLvoid *indices = cmd->indices;
700 const GLsizei instance_count = cmd->instance_count;
701 const GLint basevertex = cmd->basevertex;
702 const GLuint baseinstance = cmd->baseinstance;
703 const GLuint min_index = cmd->min_index;
704 const GLuint max_index = cmd->max_index;
705 const GLuint user_buffer_mask = cmd->user_buffer_mask;
706 struct gl_buffer_object *index_buffer = cmd->index_buffer;
707 const struct glthread_attrib_binding *buffers =
708 (const struct glthread_attrib_binding *)(cmd + 1);
709
710 /* Bind uploaded buffers if needed. */
711 if (user_buffer_mask) {
712 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
713 false);
714 }
715 if (index_buffer) {
716 _mesa_InternalBindElementBuffer(ctx, index_buffer);
717 }
718
719 /* Draw. */
720 if (cmd->index_bounds_valid && instance_count == 1 && baseinstance == 0) {
721 CALL_DrawRangeElementsBaseVertex(ctx->CurrentServerDispatch,
722 (mode, min_index, max_index, count,
723 type, indices, basevertex));
724 } else {
725 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx->CurrentServerDispatch,
726 (mode, count, type, indices,
727 instance_count, basevertex,
728 baseinstance));
729 }
730
731 /* Restore states. */
732 if (index_buffer) {
733 _mesa_InternalBindElementBuffer(ctx, NULL);
734 }
735 if (user_buffer_mask) {
736 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
737 true);
738 }
739 return cmd->cmd_base.cmd_size;
740 }
741
742 static ALWAYS_INLINE void
draw_elements_async_user(struct gl_context * ctx,GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance,bool index_bounds_valid,GLuint min_index,GLuint max_index,struct gl_buffer_object * index_buffer,unsigned user_buffer_mask,const struct glthread_attrib_binding * buffers)743 draw_elements_async_user(struct gl_context *ctx, GLenum mode, GLsizei count,
744 GLenum type, const GLvoid *indices, GLsizei instance_count,
745 GLint basevertex, GLuint baseinstance,
746 bool index_bounds_valid, GLuint min_index, GLuint max_index,
747 struct gl_buffer_object *index_buffer,
748 unsigned user_buffer_mask,
749 const struct glthread_attrib_binding *buffers)
750 {
751 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]);
752 int cmd_size = sizeof(struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance) +
753 buffers_size;
754 struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance *cmd;
755
756 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsInstancedBaseVertexBaseInstance, cmd_size);
757 cmd->mode = mode;
758 cmd->count = count;
759 cmd->type = type;
760 cmd->indices = indices;
761 cmd->instance_count = instance_count;
762 cmd->basevertex = basevertex;
763 cmd->baseinstance = baseinstance;
764 cmd->min_index = min_index;
765 cmd->max_index = max_index;
766 cmd->user_buffer_mask = user_buffer_mask;
767 cmd->index_bounds_valid = index_bounds_valid;
768 cmd->index_buffer = index_buffer;
769
770 if (user_buffer_mask)
771 memcpy(cmd + 1, buffers, buffers_size);
772 }
773
774 static void
draw_elements(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance,bool index_bounds_valid,GLuint min_index,GLuint max_index,bool compiled_into_dlist)775 draw_elements(GLenum mode, GLsizei count, GLenum type, const GLvoid *indices,
776 GLsizei instance_count, GLint basevertex, GLuint baseinstance,
777 bool index_bounds_valid, GLuint min_index, GLuint max_index,
778 bool compiled_into_dlist)
779 {
780 GET_CURRENT_CONTEXT(ctx);
781
782 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
783 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled;
784 bool has_user_indices = vao->CurrentElementBufferName == 0;
785
786 if (compiled_into_dlist && ctx->GLThread.ListMode)
787 goto sync;
788
789 /* Fast path when nothing needs to be done.
790 *
791 * This is also an error path. Zero counts should still call the driver
792 * for possible GL errors.
793 */
794 if (ctx->API == API_OPENGL_CORE ||
795 count <= 0 || instance_count <= 0 || max_index < min_index ||
796 !is_index_type_valid(type) ||
797 (!user_buffer_mask && !has_user_indices)) {
798 draw_elements_async(ctx, mode, count, type, indices, instance_count,
799 basevertex, baseinstance, index_bounds_valid,
800 min_index, max_index);
801 return;
802 }
803
804 if (!ctx->GLThread.SupportsNonVBOUploads)
805 goto sync;
806
807 bool need_index_bounds = user_buffer_mask & ~vao->NonZeroDivisorMask;
808 unsigned index_size = get_index_size(type);
809
810 if (need_index_bounds && !index_bounds_valid) {
811 /* Sync if indices come from a buffer and vertices come from memory
812 * and index bounds are not valid.
813 *
814 * We would have to map the indices to compute the index bounds, and
815 * for that we would have to sync anyway.
816 */
817 if (!has_user_indices)
818 goto sync;
819
820 /* Compute the index bounds. */
821 min_index = ~0;
822 max_index = 0;
823 vbo_get_minmax_index_mapped(count, index_size,
824 ctx->GLThread._RestartIndex[index_size - 1],
825 ctx->GLThread._PrimitiveRestart, indices,
826 &min_index, &max_index);
827 index_bounds_valid = true;
828 }
829
830 unsigned start_vertex = min_index + basevertex;
831 unsigned num_vertices = max_index + 1 - min_index;
832
833 /* If there is too much data to upload, sync and let the driver unroll
834 * indices. */
835 if (util_is_vbo_upload_ratio_too_large(count, num_vertices))
836 goto sync;
837
838 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX];
839 if (user_buffer_mask &&
840 !upload_vertices(ctx, user_buffer_mask, start_vertex, num_vertices,
841 baseinstance, instance_count, buffers))
842 goto sync;
843
844 /* Upload indices. */
845 struct gl_buffer_object *index_buffer = NULL;
846 if (has_user_indices)
847 index_buffer = upload_indices(ctx, count, index_size, &indices);
848
849 /* Draw asynchronously. */
850 draw_elements_async_user(ctx, mode, count, type, indices, instance_count,
851 basevertex, baseinstance, index_bounds_valid,
852 min_index, max_index, index_buffer,
853 user_buffer_mask, buffers);
854 return;
855
856 sync:
857 _mesa_glthread_finish_before(ctx, "DrawElements");
858
859 if (compiled_into_dlist && ctx->GLThread.ListMode) {
860 /* Only use the ones that are compiled into display lists. */
861 if (basevertex) {
862 CALL_DrawElementsBaseVertex(ctx->CurrentServerDispatch,
863 (mode, count, type, indices, basevertex));
864 } else if (index_bounds_valid) {
865 CALL_DrawRangeElements(ctx->CurrentServerDispatch,
866 (mode, min_index, max_index, count, type, indices));
867 } else {
868 CALL_DrawElements(ctx->CurrentServerDispatch, (mode, count, type, indices));
869 }
870 } else if (index_bounds_valid && instance_count == 1 && baseinstance == 0) {
871 CALL_DrawRangeElementsBaseVertex(ctx->CurrentServerDispatch,
872 (mode, min_index, max_index, count,
873 type, indices, basevertex));
874 } else {
875 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx->CurrentServerDispatch,
876 (mode, count, type, indices,
877 instance_count, basevertex,
878 baseinstance));
879 }
880 }
881
882 struct marshal_cmd_MultiDrawElementsBaseVertex
883 {
884 struct marshal_cmd_base cmd_base;
885 bool has_base_vertex;
886 GLenum mode;
887 GLenum type;
888 GLsizei draw_count;
889 GLuint user_buffer_mask;
890 struct gl_buffer_object *index_buffer;
891 };
892
893 uint32_t
_mesa_unmarshal_MultiDrawElementsBaseVertex(struct gl_context * ctx,const struct marshal_cmd_MultiDrawElementsBaseVertex * cmd,const uint64_t * last)894 _mesa_unmarshal_MultiDrawElementsBaseVertex(struct gl_context *ctx,
895 const struct marshal_cmd_MultiDrawElementsBaseVertex *cmd,
896 const uint64_t *last)
897 {
898 const GLenum mode = cmd->mode;
899 const GLenum type = cmd->type;
900 const GLsizei draw_count = cmd->draw_count;
901 const GLuint user_buffer_mask = cmd->user_buffer_mask;
902 struct gl_buffer_object *index_buffer = cmd->index_buffer;
903 const bool has_base_vertex = cmd->has_base_vertex;
904
905 const char *variable_data = (const char *)(cmd + 1);
906 const GLsizei *count = (GLsizei *)variable_data;
907 variable_data += sizeof(GLsizei) * draw_count;
908 const GLvoid *const *indices = (const GLvoid *const *)variable_data;
909 variable_data += sizeof(const GLvoid *const *) * draw_count;
910 const GLsizei *basevertex = NULL;
911 if (has_base_vertex) {
912 basevertex = (GLsizei *)variable_data;
913 variable_data += sizeof(GLsizei) * draw_count;
914 }
915 const struct glthread_attrib_binding *buffers =
916 (const struct glthread_attrib_binding *)variable_data;
917
918 /* Bind uploaded buffers if needed. */
919 if (user_buffer_mask) {
920 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
921 false);
922 }
923 if (index_buffer) {
924 _mesa_InternalBindElementBuffer(ctx, index_buffer);
925 }
926
927 /* Draw. */
928 if (has_base_vertex) {
929 CALL_MultiDrawElementsBaseVertex(ctx->CurrentServerDispatch,
930 (mode, count, type, indices, draw_count,
931 basevertex));
932 } else {
933 CALL_MultiDrawElementsEXT(ctx->CurrentServerDispatch,
934 (mode, count, type, indices, draw_count));
935 }
936
937 /* Restore states. */
938 if (index_buffer) {
939 _mesa_InternalBindElementBuffer(ctx, NULL);
940 }
941 if (user_buffer_mask) {
942 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask,
943 true);
944 }
945 return cmd->cmd_base.cmd_size;
946 }
947
948 static ALWAYS_INLINE bool
multi_draw_elements_async(struct gl_context * ctx,GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei draw_count,const GLsizei * basevertex,struct gl_buffer_object * index_buffer,unsigned user_buffer_mask,const struct glthread_attrib_binding * buffers)949 multi_draw_elements_async(struct gl_context *ctx, GLenum mode,
950 const GLsizei *count, GLenum type,
951 const GLvoid *const *indices, GLsizei draw_count,
952 const GLsizei *basevertex,
953 struct gl_buffer_object *index_buffer,
954 unsigned user_buffer_mask,
955 const struct glthread_attrib_binding *buffers)
956 {
957 int count_size = sizeof(GLsizei) * draw_count;
958 int indices_size = sizeof(indices[0]) * draw_count;
959 int basevertex_size = basevertex ? sizeof(GLsizei) * draw_count : 0;
960 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]);
961 int cmd_size = sizeof(struct marshal_cmd_MultiDrawElementsBaseVertex) +
962 count_size + indices_size + basevertex_size + buffers_size;
963 struct marshal_cmd_MultiDrawElementsBaseVertex *cmd;
964
965 /* Make sure cmd can fit the queue buffer */
966 if (cmd_size > MARSHAL_MAX_CMD_SIZE) {
967 return false;
968 }
969
970 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawElementsBaseVertex, cmd_size);
971 cmd->mode = mode;
972 cmd->type = type;
973 cmd->draw_count = draw_count;
974 cmd->user_buffer_mask = user_buffer_mask;
975 cmd->index_buffer = index_buffer;
976 cmd->has_base_vertex = basevertex != NULL;
977
978 char *variable_data = (char*)(cmd + 1);
979 memcpy(variable_data, count, count_size);
980 variable_data += count_size;
981 memcpy(variable_data, indices, indices_size);
982 variable_data += indices_size;
983
984 if (basevertex) {
985 memcpy(variable_data, basevertex, basevertex_size);
986 variable_data += basevertex_size;
987 }
988
989 if (user_buffer_mask)
990 memcpy(variable_data, buffers, buffers_size);
991
992 return true;
993 }
994
995 void GLAPIENTRY
_mesa_marshal_MultiDrawElementsBaseVertex(GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei draw_count,const GLsizei * basevertex)996 _mesa_marshal_MultiDrawElementsBaseVertex(GLenum mode, const GLsizei *count,
997 GLenum type,
998 const GLvoid *const *indices,
999 GLsizei draw_count,
1000 const GLsizei *basevertex)
1001 {
1002 GET_CURRENT_CONTEXT(ctx);
1003
1004 struct glthread_vao *vao = ctx->GLThread.CurrentVAO;
1005 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled;
1006 bool has_user_indices = vao->CurrentElementBufferName == 0;
1007
1008 if (ctx->GLThread.ListMode)
1009 goto sync;
1010
1011 /* Fast path when nothing needs to be done. */
1012 if (draw_count >= 0 &&
1013 (ctx->API == API_OPENGL_CORE ||
1014 !is_index_type_valid(type) ||
1015 (!user_buffer_mask && !has_user_indices))) {
1016 if (multi_draw_elements_async(ctx, mode, count, type, indices,
1017 draw_count, basevertex, NULL, 0, NULL))
1018 return;
1019 }
1020
1021 bool need_index_bounds = user_buffer_mask & ~vao->NonZeroDivisorMask;
1022
1023 /* If the draw count is too high or negative, the queue can't be used.
1024 *
1025 * Sync if indices come from a buffer and vertices come from memory
1026 * and index bounds are not valid. We would have to map the indices
1027 * to compute the index bounds, and for that we would have to sync anyway.
1028 */
1029 if (!ctx->GLThread.SupportsNonVBOUploads ||
1030 draw_count < 0 || draw_count > MARSHAL_MAX_CMD_SIZE / 32 ||
1031 (need_index_bounds && !has_user_indices))
1032 goto sync;
1033
1034 unsigned index_size = get_index_size(type);
1035 unsigned min_index = ~0;
1036 unsigned max_index = 0;
1037 unsigned total_count = 0;
1038 unsigned num_vertices = 0;
1039
1040 /* This is always true if there is per-vertex data that needs to be
1041 * uploaded.
1042 */
1043 if (need_index_bounds) {
1044 /* Compute the index bounds. */
1045 for (unsigned i = 0; i < draw_count; i++) {
1046 GLsizei vertex_count = count[i];
1047
1048 if (vertex_count < 0) {
1049 /* Just call the driver to set the error. */
1050 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1051 basevertex, NULL, 0, NULL);
1052 return;
1053 }
1054 if (vertex_count == 0)
1055 continue;
1056
1057 unsigned min = ~0, max = 0;
1058 vbo_get_minmax_index_mapped(vertex_count, index_size,
1059 ctx->GLThread._RestartIndex[index_size - 1],
1060 ctx->GLThread._PrimitiveRestart, indices[i],
1061 &min, &max);
1062 if (basevertex) {
1063 min += basevertex[i];
1064 max += basevertex[i];
1065 }
1066 min_index = MIN2(min_index, min);
1067 max_index = MAX2(max_index, max);
1068 total_count += vertex_count;
1069 }
1070
1071 num_vertices = max_index + 1 - min_index;
1072
1073 if (total_count == 0 || num_vertices == 0) {
1074 /* Nothing to do, but call the driver to set possible GL errors. */
1075 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1076 basevertex, NULL, 0, NULL);
1077 return;
1078 }
1079
1080 /* If there is too much data to upload, sync and let the driver unroll
1081 * indices. */
1082 if (util_is_vbo_upload_ratio_too_large(total_count, num_vertices))
1083 goto sync;
1084 } else if (has_user_indices) {
1085 /* Only compute total_count for the upload of indices. */
1086 for (unsigned i = 0; i < draw_count; i++) {
1087 GLsizei vertex_count = count[i];
1088
1089 if (vertex_count < 0) {
1090 /* Just call the driver to set the error. */
1091 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1092 basevertex, NULL, 0, NULL);
1093 return;
1094 }
1095 if (vertex_count == 0)
1096 continue;
1097
1098 total_count += vertex_count;
1099 }
1100
1101 if (total_count == 0) {
1102 /* Nothing to do, but call the driver to set possible GL errors. */
1103 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1104 basevertex, NULL, 0, NULL);
1105 return;
1106 }
1107 }
1108
1109 /* Upload vertices. */
1110 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX];
1111 if (user_buffer_mask &&
1112 !upload_vertices(ctx, user_buffer_mask, min_index, num_vertices,
1113 0, 1, buffers))
1114 goto sync;
1115
1116 /* Upload indices. */
1117 struct gl_buffer_object *index_buffer = NULL;
1118 if (has_user_indices) {
1119 const GLvoid **out_indices = alloca(sizeof(indices[0]) * draw_count);
1120
1121 index_buffer = upload_multi_indices(ctx, total_count, index_size,
1122 draw_count, count, indices,
1123 out_indices);
1124 indices = out_indices;
1125 }
1126
1127 /* Draw asynchronously. */
1128 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count,
1129 basevertex, index_buffer, user_buffer_mask,
1130 buffers);
1131 return;
1132
1133 sync:
1134 _mesa_glthread_finish_before(ctx, "DrawElements");
1135
1136 if (basevertex) {
1137 CALL_MultiDrawElementsBaseVertex(ctx->CurrentServerDispatch,
1138 (mode, count, type, indices, draw_count,
1139 basevertex));
1140 } else {
1141 CALL_MultiDrawElementsEXT(ctx->CurrentServerDispatch,
1142 (mode, count, type, indices, draw_count));
1143 }
1144 }
1145
1146 void GLAPIENTRY
_mesa_marshal_DrawArrays(GLenum mode,GLint first,GLsizei count)1147 _mesa_marshal_DrawArrays(GLenum mode, GLint first, GLsizei count)
1148 {
1149 draw_arrays(mode, first, count, 1, 0, true);
1150 }
1151
1152 void GLAPIENTRY
_mesa_marshal_DrawArraysInstancedARB(GLenum mode,GLint first,GLsizei count,GLsizei instance_count)1153 _mesa_marshal_DrawArraysInstancedARB(GLenum mode, GLint first, GLsizei count,
1154 GLsizei instance_count)
1155 {
1156 draw_arrays(mode, first, count, instance_count, 0, false);
1157 }
1158
1159 void GLAPIENTRY
_mesa_marshal_DrawArraysInstancedBaseInstance(GLenum mode,GLint first,GLsizei count,GLsizei instance_count,GLuint baseinstance)1160 _mesa_marshal_DrawArraysInstancedBaseInstance(GLenum mode, GLint first,
1161 GLsizei count, GLsizei instance_count,
1162 GLuint baseinstance)
1163 {
1164 draw_arrays(mode, first, count, instance_count, baseinstance, false);
1165 }
1166
1167 void GLAPIENTRY
_mesa_marshal_DrawElements(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)1168 _mesa_marshal_DrawElements(GLenum mode, GLsizei count, GLenum type,
1169 const GLvoid *indices)
1170 {
1171 draw_elements(mode, count, type, indices, 1, 0, 0, false, 0, 0, true);
1172 }
1173
1174 void GLAPIENTRY
_mesa_marshal_DrawRangeElements(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices)1175 _mesa_marshal_DrawRangeElements(GLenum mode, GLuint start, GLuint end,
1176 GLsizei count, GLenum type,
1177 const GLvoid *indices)
1178 {
1179 draw_elements(mode, count, type, indices, 1, 0, 0, true, start, end, true);
1180 }
1181
1182 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedARB(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count)1183 _mesa_marshal_DrawElementsInstancedARB(GLenum mode, GLsizei count, GLenum type,
1184 const GLvoid *indices, GLsizei instance_count)
1185 {
1186 draw_elements(mode, count, type, indices, instance_count, 0, 0, false, 0, 0, false);
1187 }
1188
1189 void GLAPIENTRY
_mesa_marshal_DrawElementsBaseVertex(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLint basevertex)1190 _mesa_marshal_DrawElementsBaseVertex(GLenum mode, GLsizei count, GLenum type,
1191 const GLvoid *indices, GLint basevertex)
1192 {
1193 draw_elements(mode, count, type, indices, 1, basevertex, 0, false, 0, 0, true);
1194 }
1195
1196 void GLAPIENTRY
_mesa_marshal_DrawRangeElementsBaseVertex(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices,GLint basevertex)1197 _mesa_marshal_DrawRangeElementsBaseVertex(GLenum mode, GLuint start, GLuint end,
1198 GLsizei count, GLenum type,
1199 const GLvoid *indices, GLint basevertex)
1200 {
1201 draw_elements(mode, count, type, indices, 1, basevertex, 0, true, start, end, false);
1202 }
1203
1204 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseVertex(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex)1205 _mesa_marshal_DrawElementsInstancedBaseVertex(GLenum mode, GLsizei count,
1206 GLenum type, const GLvoid *indices,
1207 GLsizei instance_count, GLint basevertex)
1208 {
1209 draw_elements(mode, count, type, indices, instance_count, basevertex, 0, false, 0, 0, false);
1210 }
1211
1212 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseInstance(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLuint baseinstance)1213 _mesa_marshal_DrawElementsInstancedBaseInstance(GLenum mode, GLsizei count,
1214 GLenum type, const GLvoid *indices,
1215 GLsizei instance_count, GLuint baseinstance)
1216 {
1217 draw_elements(mode, count, type, indices, instance_count, 0, baseinstance, false, 0, 0, false);
1218 }
1219
1220 void GLAPIENTRY
_mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices,GLsizei instance_count,GLint basevertex,GLuint baseinstance)1221 _mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance(GLenum mode, GLsizei count,
1222 GLenum type, const GLvoid *indices,
1223 GLsizei instance_count, GLint basevertex,
1224 GLuint baseinstance)
1225 {
1226 draw_elements(mode, count, type, indices, instance_count, basevertex, baseinstance, false, 0, 0, false);
1227 }
1228
1229 void GLAPIENTRY
_mesa_marshal_MultiDrawElementsEXT(GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei draw_count)1230 _mesa_marshal_MultiDrawElementsEXT(GLenum mode, const GLsizei *count,
1231 GLenum type, const GLvoid *const *indices,
1232 GLsizei draw_count)
1233 {
1234 _mesa_marshal_MultiDrawElementsBaseVertex(mode, count, type, indices,
1235 draw_count, NULL);
1236 }
1237
1238 uint32_t
_mesa_unmarshal_DrawArraysInstancedARB(struct gl_context * ctx,const struct marshal_cmd_DrawArraysInstancedARB * cmd,const uint64_t * last)1239 _mesa_unmarshal_DrawArraysInstancedARB(struct gl_context *ctx, const struct marshal_cmd_DrawArraysInstancedARB *cmd, const uint64_t *last)
1240 {
1241 unreachable("never used - DrawArraysInstancedBaseInstance is used instead");
1242 return 0;
1243 }
1244
1245 uint32_t
_mesa_unmarshal_DrawElements(struct gl_context * ctx,const struct marshal_cmd_DrawElements * cmd,const uint64_t * last)1246 _mesa_unmarshal_DrawElements(struct gl_context *ctx, const struct marshal_cmd_DrawElements *cmd, const uint64_t *last)
1247 {
1248 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1249 return 0;
1250 }
1251
1252 uint32_t
_mesa_unmarshal_DrawRangeElements(struct gl_context * ctx,const struct marshal_cmd_DrawRangeElements * cmd,const uint64_t * last)1253 _mesa_unmarshal_DrawRangeElements(struct gl_context *ctx, const struct marshal_cmd_DrawRangeElements *cmd, const uint64_t *last)
1254 {
1255 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1256 return 0;
1257 }
1258
1259 uint32_t
_mesa_unmarshal_DrawElementsBaseVertex(struct gl_context * ctx,const struct marshal_cmd_DrawElementsBaseVertex * cmd,const uint64_t * last)1260 _mesa_unmarshal_DrawElementsBaseVertex(struct gl_context *ctx, const struct marshal_cmd_DrawElementsBaseVertex *cmd, const uint64_t *last)
1261 {
1262 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1263 return 0;
1264 }
1265
1266 uint32_t
_mesa_unmarshal_DrawElementsInstancedBaseVertex(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedBaseVertex * cmd,const uint64_t * last)1267 _mesa_unmarshal_DrawElementsInstancedBaseVertex(struct gl_context *ctx, const struct marshal_cmd_DrawElementsInstancedBaseVertex *cmd, const uint64_t *last)
1268 {
1269 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1270 return 0;
1271 }
1272
1273 uint32_t
_mesa_unmarshal_DrawElementsInstancedBaseInstance(struct gl_context * ctx,const struct marshal_cmd_DrawElementsInstancedBaseInstance * cmd,const uint64_t * last)1274 _mesa_unmarshal_DrawElementsInstancedBaseInstance(struct gl_context *ctx, const struct marshal_cmd_DrawElementsInstancedBaseInstance *cmd, const uint64_t *last)
1275 {
1276 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1277 return 0;
1278 }
1279
1280 uint32_t
_mesa_unmarshal_MultiDrawElementsEXT(struct gl_context * ctx,const struct marshal_cmd_MultiDrawElementsEXT * cmd,const uint64_t * last)1281 _mesa_unmarshal_MultiDrawElementsEXT(struct gl_context *ctx, const struct marshal_cmd_MultiDrawElementsEXT *cmd, const uint64_t *last)
1282 {
1283 unreachable("never used - MultiDrawElementsBaseVertex is used instead");
1284 return 0;
1285 }
1286