1 /*
2 * (C) Copyright IBM Corporation 2004, 2005
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * IBM,
20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <inttypes.h>
27 #include <assert.h>
28 #include <string.h>
29
30 #include "util/compiler.h"
31
32 #include "glxclient.h"
33 #include "indirect.h"
34 #include <GL/glxproto.h>
35 #include "glxextensions.h"
36 #include "indirect_vertex_array.h"
37 #include "indirect_vertex_array_priv.h"
38
39 #define __GLX_PAD(n) (((n)+3) & ~3)
40
41 /**
42 * \file indirect_vertex_array.c
43 * Implement GLX protocol for vertex arrays and vertex buffer objects.
44 *
45 * The most important function in this fill is \c fill_array_info_cache.
46 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
47 * in the DrawArrays protocol. Certain operations, such as enabling or
48 * disabling an array, can invalidate this cache. \c fill_array_info_cache
49 * fills-in this data. Additionally, it examines the enabled state and
50 * other factors to determine what "version" of DrawArrays protocoal can be
51 * used.
52 *
53 * Current, only two versions of DrawArrays protocol are implemented. The
54 * first version is the "none" protocol. This is the fallback when the
55 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented
56 * by sending batches of immediate mode commands that are equivalent to the
57 * DrawArrays protocol.
58 *
59 * The other protocol that is currently implemented is the "old" protocol.
60 * This is the GL 1.1 DrawArrays protocol. The only difference between GL
61 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
62 * This protocol is called "old" because the ARB is in the process of
63 * defining a new protocol, which will probably be called wither "new" or
64 * "vbo", to support multiple texture coordinate arrays, generic attributes,
65 * and vertex buffer objects.
66 *
67 * \author Ian Romanick <ian.d.romanick@intel.com>
68 */
69
70 static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count);
71 static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count);
72
73 static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
74 const GLvoid * indices);
75 static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
76 const GLvoid * indices);
77
78
79 static GLubyte *emit_element_none(GLubyte * dst,
80 const struct array_state_vector *arrays,
81 unsigned index);
82 static GLubyte *emit_element_old(GLubyte * dst,
83 const struct array_state_vector *arrays,
84 unsigned index);
85 static struct array_state *get_array_entry(const struct array_state_vector
86 *arrays, GLenum key,
87 unsigned index);
88 static void fill_array_info_cache(struct array_state_vector *arrays);
89 static GLboolean validate_mode(struct glx_context * gc, GLenum mode);
90 static GLboolean validate_count(struct glx_context * gc, GLsizei count);
91 static GLboolean validate_type(struct glx_context * gc, GLenum type);
92
93
94 /**
95 * Table of sizes, in bytes, of a GL types. All of the type enums are be in
96 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e.,
97 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the
98 * type enums masked with 0x0f.
99 *
100 * \notes
101 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES,
102 * \c GL_3_BYTES, or \c GL_4_BYTES.
103 */
104 const GLuint __glXTypeSize_table[16] = {
105 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
106 };
107
108
109 /**
110 * Free the per-context array state that was allocated with
111 * __glXInitVertexArrayState().
112 */
113 void
__glXFreeVertexArrayState(struct glx_context * gc)114 __glXFreeVertexArrayState(struct glx_context * gc)
115 {
116 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
117 struct array_state_vector *arrays = state->array_state;
118
119 if (arrays) {
120 free(arrays->stack);
121 arrays->stack = NULL;
122 free(arrays->arrays);
123 arrays->arrays = NULL;
124 free(arrays);
125 state->array_state = NULL;
126 }
127 }
128
129
130 /**
131 * Initialize vertex array state of a GLX context.
132 *
133 * \param gc GLX context whose vertex array state is to be initialized.
134 *
135 * \warning
136 * This function may only be called after struct glx_context::gl_extension_bits,
137 * struct glx_context::server_minor, and __GLXcontext::server_major have been
138 * initialized. These values are used to determine what vertex arrays are
139 * supported.
140 */
141 void
__glXInitVertexArrayState(struct glx_context * gc)142 __glXInitVertexArrayState(struct glx_context * gc)
143 {
144 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
145 struct array_state_vector *arrays;
146
147 unsigned array_count;
148 int texture_units = 1, vertex_program_attribs = 0;
149 unsigned i, j;
150
151 GLboolean got_fog = GL_FALSE;
152 GLboolean got_secondary_color = GL_FALSE;
153
154
155 arrays = calloc(1, sizeof(struct array_state_vector));
156 state->array_state = arrays;
157
158 if (arrays == NULL) {
159 __glXSetError(gc, GL_OUT_OF_MEMORY);
160 return;
161 }
162
163 arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
164 arrays->new_DrawArrays_possible = GL_FALSE;
165 arrays->DrawArrays = NULL;
166
167 arrays->active_texture_unit = 0;
168
169
170 /* Determine how many arrays are actually needed. Only arrays that
171 * are supported by the server are create. For example, if the server
172 * supports only 2 texture units, then only 2 texture coordinate arrays
173 * are created.
174 *
175 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
176 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
177 * GL_EDGE_FLAG_ARRAY are supported.
178 */
179
180 array_count = 5;
181
182 if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit)
183 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
184 got_fog = GL_TRUE;
185 array_count++;
186 }
187
188 if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit)
189 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
190 got_secondary_color = GL_TRUE;
191 array_count++;
192 }
193
194 if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit)
195 || (gc->server_major > 1) || (gc->server_minor >= 3)) {
196 __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units);
197 }
198
199 if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) {
200 __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB,
201 GL_MAX_PROGRAM_ATTRIBS_ARB,
202 &vertex_program_attribs);
203 }
204
205 arrays->num_texture_units = texture_units;
206 arrays->num_vertex_program_attribs = vertex_program_attribs;
207 array_count += texture_units + vertex_program_attribs;
208 arrays->num_arrays = array_count;
209 arrays->arrays = calloc(array_count, sizeof(struct array_state));
210
211 if (arrays->arrays == NULL) {
212 state->array_state = NULL;
213 free(arrays);
214 __glXSetError(gc, GL_OUT_OF_MEMORY);
215 return;
216 }
217
218 arrays->arrays[0].data_type = GL_FLOAT;
219 arrays->arrays[0].count = 3;
220 arrays->arrays[0].key = GL_NORMAL_ARRAY;
221 arrays->arrays[0].normalized = GL_TRUE;
222 arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
223
224 arrays->arrays[1].data_type = GL_FLOAT;
225 arrays->arrays[1].count = 4;
226 arrays->arrays[1].key = GL_COLOR_ARRAY;
227 arrays->arrays[1].normalized = GL_TRUE;
228 arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
229
230 arrays->arrays[2].data_type = GL_FLOAT;
231 arrays->arrays[2].count = 1;
232 arrays->arrays[2].key = GL_INDEX_ARRAY;
233 arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
234
235 arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
236 arrays->arrays[3].count = 1;
237 arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
238 arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
239
240 for (i = 0; i < texture_units; i++) {
241 arrays->arrays[4 + i].data_type = GL_FLOAT;
242 arrays->arrays[4 + i].count = 4;
243 arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
244
245 arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
246 arrays->arrays[4 + i].index = i;
247 }
248
249 i = 4 + texture_units;
250
251 if (got_fog) {
252 arrays->arrays[i].data_type = GL_FLOAT;
253 arrays->arrays[i].count = 1;
254 arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
255 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
256 i++;
257 }
258
259 if (got_secondary_color) {
260 arrays->arrays[i].data_type = GL_FLOAT;
261 arrays->arrays[i].count = 3;
262 arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
263 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
264 arrays->arrays[i].normalized = GL_TRUE;
265 i++;
266 }
267
268
269 for (j = 0; j < vertex_program_attribs; j++) {
270 const unsigned idx = (vertex_program_attribs - (j + 1));
271
272
273 arrays->arrays[idx + i].data_type = GL_FLOAT;
274 arrays->arrays[idx + i].count = 4;
275 arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
276
277 arrays->arrays[idx + i].old_DrawArrays_possible = 0;
278 arrays->arrays[idx + i].index = idx;
279 }
280
281 i += vertex_program_attribs;
282
283
284 /* Vertex array *must* be last because of the way that
285 * emit_DrawArrays_none works.
286 */
287
288 arrays->arrays[i].data_type = GL_FLOAT;
289 arrays->arrays[i].count = 4;
290 arrays->arrays[i].key = GL_VERTEX_ARRAY;
291 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
292
293 assert((i + 1) == arrays->num_arrays);
294
295 arrays->stack_index = 0;
296 arrays->stack = malloc(sizeof(struct array_stack_state)
297 * arrays->num_arrays
298 * __GL_CLIENT_ATTRIB_STACK_DEPTH);
299
300 if (arrays->stack == NULL) {
301 state->array_state = NULL;
302 free(arrays->arrays);
303 free(arrays);
304 __glXSetError(gc, GL_OUT_OF_MEMORY);
305 return;
306 }
307 }
308
309
310 /**
311 * Calculate the size of a single vertex for the "none" protocol. This is
312 * essentially the size of all the immediate-mode commands required to
313 * implement the enabled vertex arrays.
314 */
315 static size_t
calculate_single_vertex_size_none(const struct array_state_vector * arrays)316 calculate_single_vertex_size_none(const struct array_state_vector *arrays)
317 {
318 size_t single_vertex_size = 0;
319 unsigned i;
320
321
322 for (i = 0; i < arrays->num_arrays; i++) {
323 if (arrays->arrays[i].enabled) {
324 single_vertex_size += arrays->arrays[i].header[0];
325 }
326 }
327
328 return single_vertex_size;
329 }
330
331
332 /**
333 * Emit a single element using non-DrawArrays protocol.
334 */
335 GLubyte *
emit_element_none(GLubyte * dst,const struct array_state_vector * arrays,unsigned index)336 emit_element_none(GLubyte * dst,
337 const struct array_state_vector * arrays, unsigned index)
338 {
339 unsigned i;
340
341
342 for (i = 0; i < arrays->num_arrays; i++) {
343 if (arrays->arrays[i].enabled) {
344 const size_t offset = index * arrays->arrays[i].true_stride;
345
346 /* The generic attributes can have more data than is in the
347 * elements. This is because a vertex array can be a 2 element,
348 * normalized, unsigned short, but the "closest" immediate mode
349 * protocol is for a 4Nus. Since the sizes are small, the
350 * performance impact on modern processors should be negligible.
351 */
352 (void) memset(dst, 0, arrays->arrays[i].header[0]);
353
354 (void) memcpy(dst, arrays->arrays[i].header, 4);
355
356 dst += 4;
357
358 if (arrays->arrays[i].key == GL_TEXTURE_COORD_ARRAY &&
359 arrays->arrays[i].index > 0) {
360 /* Multi-texture coordinate arrays require the texture target
361 * to be sent. For doubles it is after the data, for everything
362 * else it is before.
363 */
364 GLenum texture = arrays->arrays[i].index + GL_TEXTURE0;
365 if (arrays->arrays[i].data_type == GL_DOUBLE) {
366 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
367 arrays->arrays[i].element_size);
368 dst += arrays->arrays[i].element_size;
369 (void) memcpy(dst, &texture, 4);
370 dst += 4;
371 } else {
372 (void) memcpy(dst, &texture, 4);
373 dst += 4;
374 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
375 arrays->arrays[i].element_size);
376 dst += __GLX_PAD(arrays->arrays[i].element_size);
377 }
378 } else if (arrays->arrays[i].key == GL_VERTEX_ATTRIB_ARRAY_POINTER) {
379 /* Vertex attribute data requires the index sent first.
380 */
381 (void) memcpy(dst, &arrays->arrays[i].index, 4);
382 dst += 4;
383 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
384 arrays->arrays[i].element_size);
385 dst += __GLX_PAD(arrays->arrays[i].element_size);
386 } else {
387 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
388 arrays->arrays[i].element_size);
389 dst += __GLX_PAD(arrays->arrays[i].element_size);
390 }
391 }
392 }
393
394 return dst;
395 }
396
397
398 /**
399 * Emit a single element using "old" DrawArrays protocol from
400 * EXT_vertex_arrays / OpenGL 1.1.
401 */
402 GLubyte *
emit_element_old(GLubyte * dst,const struct array_state_vector * arrays,unsigned index)403 emit_element_old(GLubyte * dst,
404 const struct array_state_vector * arrays, unsigned index)
405 {
406 unsigned i;
407
408
409 for (i = 0; i < arrays->num_arrays; i++) {
410 if (arrays->arrays[i].enabled) {
411 const size_t offset = index * arrays->arrays[i].true_stride;
412
413 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
414 arrays->arrays[i].element_size);
415
416 dst += __GLX_PAD(arrays->arrays[i].element_size);
417 }
418 }
419
420 return dst;
421 }
422
423
424 struct array_state *
get_array_entry(const struct array_state_vector * arrays,GLenum key,unsigned index)425 get_array_entry(const struct array_state_vector *arrays,
426 GLenum key, unsigned index)
427 {
428 unsigned i;
429
430 for (i = 0; i < arrays->num_arrays; i++) {
431 if ((arrays->arrays[i].key == key)
432 && (arrays->arrays[i].index == index)) {
433 return &arrays->arrays[i];
434 }
435 }
436
437 return NULL;
438 }
439
440
441 static GLboolean
allocate_array_info_cache(struct array_state_vector * arrays,size_t required_size)442 allocate_array_info_cache(struct array_state_vector *arrays,
443 size_t required_size)
444 {
445 #define MAX_HEADER_SIZE 20
446 if (arrays->array_info_cache_buffer_size < required_size) {
447 GLubyte *temp = realloc(arrays->array_info_cache_base,
448 required_size + MAX_HEADER_SIZE);
449
450 if (temp == NULL) {
451 return GL_FALSE;
452 }
453
454 arrays->array_info_cache_base = temp;
455 arrays->array_info_cache = temp + MAX_HEADER_SIZE;
456 arrays->array_info_cache_buffer_size = required_size;
457 }
458
459 arrays->array_info_cache_size = required_size;
460 return GL_TRUE;
461 }
462
463
464 /**
465 */
466 void
fill_array_info_cache(struct array_state_vector * arrays)467 fill_array_info_cache(struct array_state_vector *arrays)
468 {
469 GLboolean old_DrawArrays_possible;
470 unsigned i;
471
472
473 /* Determine how many arrays are enabled.
474 */
475
476 arrays->enabled_client_array_count = 0;
477 old_DrawArrays_possible = arrays->old_DrawArrays_possible;
478 for (i = 0; i < arrays->num_arrays; i++) {
479 if (arrays->arrays[i].enabled) {
480 arrays->enabled_client_array_count++;
481 old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
482 }
483 }
484
485 if (arrays->new_DrawArrays_possible) {
486 assert(!arrays->new_DrawArrays_possible);
487 }
488 else if (old_DrawArrays_possible) {
489 const size_t required_size = arrays->enabled_client_array_count * 12;
490 uint32_t *info;
491
492
493 if (!allocate_array_info_cache(arrays, required_size)) {
494 return;
495 }
496
497
498 info = (uint32_t *) arrays->array_info_cache;
499 for (i = 0; i < arrays->num_arrays; i++) {
500 if (arrays->arrays[i].enabled) {
501 *(info++) = arrays->arrays[i].data_type;
502 *(info++) = arrays->arrays[i].count;
503 *(info++) = arrays->arrays[i].key;
504 }
505 }
506
507 arrays->DrawArrays = emit_DrawArrays_old;
508 arrays->DrawElements = emit_DrawElements_old;
509 }
510 else {
511 arrays->DrawArrays = emit_DrawArrays_none;
512 arrays->DrawElements = emit_DrawElements_none;
513 }
514
515 arrays->array_info_cache_valid = GL_TRUE;
516 }
517
518
519 /**
520 * Emit a \c glDrawArrays command using the "none" protocol. That is,
521 * emit immediate-mode commands that are equivalent to the requiested
522 * \c glDrawArrays command. This is used with servers that don't support
523 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
524 * vertex state is enabled that is not compatible with that protocol.
525 */
526 void
emit_DrawArrays_none(GLenum mode,GLint first,GLsizei count)527 emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count)
528 {
529 struct glx_context *gc = __glXGetCurrentContext();
530 const __GLXattribute *state =
531 (const __GLXattribute *) (gc->client_state_private);
532 struct array_state_vector *arrays = state->array_state;
533
534 size_t single_vertex_size;
535 GLubyte *pc;
536 unsigned i;
537 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
538 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
539
540
541 single_vertex_size = calculate_single_vertex_size_none(arrays);
542
543 pc = gc->pc;
544
545 (void) memcpy(pc, begin_cmd, 4);
546 *(int *) (pc + 4) = mode;
547
548 pc += 8;
549
550 for (i = 0; i < count; i++) {
551 if ((pc + single_vertex_size) >= gc->bufEnd) {
552 pc = __glXFlushRenderBuffer(gc, pc);
553 }
554
555 pc = emit_element_none(pc, arrays, first + i);
556 }
557
558 if ((pc + 4) >= gc->bufEnd) {
559 pc = __glXFlushRenderBuffer(gc, pc);
560 }
561
562 (void) memcpy(pc, end_cmd, 4);
563 pc += 4;
564
565 gc->pc = pc;
566 if (gc->pc > gc->limit) {
567 (void) __glXFlushRenderBuffer(gc, gc->pc);
568 }
569 }
570
571
572 /**
573 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
574 * protocol.
575 *
576 * \param gc GLX context.
577 * \param arrays Array state.
578 * \param elements_per_request Location to store the number of elements that
579 * can fit in a single Render / RenderLarge
580 * command.
581 * \param total_request Total number of requests for a RenderLarge
582 * command. If a Render command is used, this
583 * will be zero.
584 * \param mode Drawing mode.
585 * \param count Number of vertices.
586 *
587 * \returns
588 * A pointer to the buffer for array data.
589 */
590 static GLubyte *
emit_DrawArrays_header_old(struct glx_context * gc,struct array_state_vector * arrays,size_t * elements_per_request,unsigned int * total_requests,GLenum mode,GLsizei count)591 emit_DrawArrays_header_old(struct glx_context * gc,
592 struct array_state_vector *arrays,
593 size_t * elements_per_request,
594 unsigned int *total_requests,
595 GLenum mode, GLsizei count)
596 {
597 size_t command_size;
598 size_t single_vertex_size;
599 const unsigned header_size = 16;
600 unsigned i;
601 GLubyte *pc;
602
603
604 /* Determine the size of the whole command. This includes the header,
605 * the ARRAY_INFO data and the array data. Once this size is calculated,
606 * it will be known whether a Render or RenderLarge command is needed.
607 */
608
609 single_vertex_size = 0;
610 for (i = 0; i < arrays->num_arrays; i++) {
611 if (arrays->arrays[i].enabled) {
612 single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size);
613 }
614 }
615
616 command_size = arrays->array_info_cache_size + header_size
617 + (single_vertex_size * count);
618
619
620 /* Write the header for either a Render command or a RenderLarge
621 * command. After the header is written, write the ARRAY_INFO data.
622 */
623
624 if (command_size > gc->maxSmallRenderCommandSize) {
625 /* maxSize is the maximum amount of data can be stuffed into a single
626 * packet. sz_xGLXRenderReq is added because bufSize is the maximum
627 * packet size minus sz_xGLXRenderReq.
628 */
629 const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
630 - sz_xGLXRenderLargeReq;
631 unsigned vertex_requests;
632
633
634 /* Calculate the number of data packets that will be required to send
635 * the whole command. To do this, the number of verticies that
636 * will fit in a single buffer must be calculated.
637 *
638 * The important value here is elements_per_request. This is the
639 * number of complete array elements that will fit in a single
640 * buffer. There may be some wasted space at the end of the buffer,
641 * but splitting elements across buffer boundries would be painful.
642 */
643
644 elements_per_request[0] = maxSize / single_vertex_size;
645
646 vertex_requests = (count + elements_per_request[0] - 1)
647 / elements_per_request[0];
648
649 *total_requests = vertex_requests + 1;
650
651
652 __glXFlushRenderBuffer(gc, gc->pc);
653
654 command_size += 4;
655
656 pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
657 *(uint32_t *) (pc + 0) = command_size;
658 *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays;
659 *(uint32_t *) (pc + 8) = count;
660 *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count;
661 *(uint32_t *) (pc + 16) = mode;
662
663 __glXSendLargeChunk(gc, 1, *total_requests, pc,
664 header_size + 4 + arrays->array_info_cache_size);
665
666 pc = gc->pc;
667 }
668 else {
669 if ((gc->pc + command_size) >= gc->bufEnd) {
670 (void) __glXFlushRenderBuffer(gc, gc->pc);
671 }
672
673 pc = gc->pc;
674 *(uint16_t *) (pc + 0) = command_size;
675 *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays;
676 *(uint32_t *) (pc + 4) = count;
677 *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count;
678 *(uint32_t *) (pc + 12) = mode;
679
680 pc += header_size;
681
682 (void) memcpy(pc, arrays->array_info_cache,
683 arrays->array_info_cache_size);
684 pc += arrays->array_info_cache_size;
685
686 *elements_per_request = count;
687 *total_requests = 0;
688 }
689
690
691 return pc;
692 }
693
694
695 /**
696 */
697 void
emit_DrawArrays_old(GLenum mode,GLint first,GLsizei count)698 emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count)
699 {
700 struct glx_context *gc = __glXGetCurrentContext();
701 const __GLXattribute *state =
702 (const __GLXattribute *) (gc->client_state_private);
703 struct array_state_vector *arrays = state->array_state;
704
705 GLubyte *pc;
706 size_t elements_per_request;
707 unsigned total_requests = 0;
708 unsigned i;
709
710
711 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
712 &total_requests, mode, count);
713
714
715 /* Write the arrays.
716 */
717
718 if (total_requests == 0) {
719 assert(elements_per_request >= count);
720
721 for (i = 0; i < count; i++) {
722 pc = emit_element_old(pc, arrays, i + first);
723 }
724
725 assert(pc <= gc->bufEnd);
726
727 gc->pc = pc;
728 if (gc->pc > gc->limit) {
729 (void) __glXFlushRenderBuffer(gc, gc->pc);
730 }
731 }
732 else {
733 unsigned req;
734
735
736 for (req = 2; req <= total_requests; req++) {
737 if (count < elements_per_request) {
738 elements_per_request = count;
739 }
740
741 pc = gc->pc;
742 for (i = 0; i < elements_per_request; i++) {
743 pc = emit_element_old(pc, arrays, i + first);
744 }
745
746 first += elements_per_request;
747
748 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
749
750 count -= elements_per_request;
751 }
752 }
753 }
754
755
756 void
emit_DrawElements_none(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)757 emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
758 const GLvoid * indices)
759 {
760 struct glx_context *gc = __glXGetCurrentContext();
761 const __GLXattribute *state =
762 (const __GLXattribute *) (gc->client_state_private);
763 struct array_state_vector *arrays = state->array_state;
764 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
765 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
766
767 GLubyte *pc;
768 size_t single_vertex_size;
769 unsigned i;
770
771
772 single_vertex_size = calculate_single_vertex_size_none(arrays);
773
774
775 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
776 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
777 }
778
779 pc = gc->pc;
780
781 (void) memcpy(pc, begin_cmd, 4);
782 *(int *) (pc + 4) = mode;
783
784 pc += 8;
785
786 for (i = 0; i < count; i++) {
787 unsigned index = 0;
788
789 if ((pc + single_vertex_size) >= gc->bufEnd) {
790 pc = __glXFlushRenderBuffer(gc, pc);
791 }
792
793 switch (type) {
794 case GL_UNSIGNED_INT:
795 index = (unsigned) (((GLuint *) indices)[i]);
796 break;
797 case GL_UNSIGNED_SHORT:
798 index = (unsigned) (((GLushort *) indices)[i]);
799 break;
800 case GL_UNSIGNED_BYTE:
801 index = (unsigned) (((GLubyte *) indices)[i]);
802 break;
803 }
804 pc = emit_element_none(pc, arrays, index);
805 }
806
807 if ((pc + 4) >= gc->bufEnd) {
808 pc = __glXFlushRenderBuffer(gc, pc);
809 }
810
811 (void) memcpy(pc, end_cmd, 4);
812 pc += 4;
813
814 gc->pc = pc;
815 if (gc->pc > gc->limit) {
816 (void) __glXFlushRenderBuffer(gc, gc->pc);
817 }
818 }
819
820
821 /**
822 */
823 void
emit_DrawElements_old(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)824 emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
825 const GLvoid * indices)
826 {
827 struct glx_context *gc = __glXGetCurrentContext();
828 const __GLXattribute *state =
829 (const __GLXattribute *) (gc->client_state_private);
830 struct array_state_vector *arrays = state->array_state;
831
832 GLubyte *pc;
833 size_t elements_per_request;
834 unsigned total_requests = 0;
835 unsigned i;
836 unsigned req;
837 unsigned req_element = 0;
838
839
840 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
841 &total_requests, mode, count);
842
843
844 /* Write the arrays.
845 */
846
847 req = 2;
848 while (count > 0) {
849 if (count < elements_per_request) {
850 elements_per_request = count;
851 }
852
853 switch (type) {
854 case GL_UNSIGNED_INT:{
855 const GLuint *ui_ptr = (const GLuint *) indices + req_element;
856
857 for (i = 0; i < elements_per_request; i++) {
858 const GLint index = (GLint) * (ui_ptr++);
859 pc = emit_element_old(pc, arrays, index);
860 }
861 break;
862 }
863 case GL_UNSIGNED_SHORT:{
864 const GLushort *us_ptr = (const GLushort *) indices + req_element;
865
866 for (i = 0; i < elements_per_request; i++) {
867 const GLint index = (GLint) * (us_ptr++);
868 pc = emit_element_old(pc, arrays, index);
869 }
870 break;
871 }
872 case GL_UNSIGNED_BYTE:{
873 const GLubyte *ub_ptr = (const GLubyte *) indices + req_element;
874
875 for (i = 0; i < elements_per_request; i++) {
876 const GLint index = (GLint) * (ub_ptr++);
877 pc = emit_element_old(pc, arrays, index);
878 }
879 break;
880 }
881 }
882
883 if (total_requests != 0) {
884 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
885 pc = gc->pc;
886 req++;
887 }
888
889 count -= elements_per_request;
890 req_element += elements_per_request;
891 }
892
893
894 assert((total_requests == 0) || ((req - 1) == total_requests));
895
896 if (total_requests == 0) {
897 assert(pc <= gc->bufEnd);
898
899 gc->pc = pc;
900 if (gc->pc > gc->limit) {
901 (void) __glXFlushRenderBuffer(gc, gc->pc);
902 }
903 }
904 }
905
906
907 /**
908 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
909 * If it is not valid, then an error code is set in the GLX context.
910 *
911 * \returns
912 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
913 */
914 static GLboolean
validate_mode(struct glx_context * gc,GLenum mode)915 validate_mode(struct glx_context * gc, GLenum mode)
916 {
917 switch (mode) {
918 case GL_POINTS:
919 case GL_LINE_STRIP:
920 case GL_LINE_LOOP:
921 case GL_LINES:
922 case GL_TRIANGLE_STRIP:
923 case GL_TRIANGLE_FAN:
924 case GL_TRIANGLES:
925 case GL_QUAD_STRIP:
926 case GL_QUADS:
927 case GL_POLYGON:
928 break;
929 default:
930 __glXSetError(gc, GL_INVALID_ENUM);
931 return GL_FALSE;
932 }
933
934 return GL_TRUE;
935 }
936
937
938 /**
939 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
940 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
941 * being set. A value of zero will not result in an error being set, but
942 * will result in \c GL_FALSE being returned.
943 *
944 * \returns
945 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
946 */
947 static GLboolean
validate_count(struct glx_context * gc,GLsizei count)948 validate_count(struct glx_context * gc, GLsizei count)
949 {
950 if (count < 0) {
951 __glXSetError(gc, GL_INVALID_VALUE);
952 }
953
954 return (count > 0);
955 }
956
957
958 /**
959 * Validate that the \c type parameter to \c glDrawElements, et. al. is
960 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
961 * \c GL_UNSIGNED_INT are valid.
962 *
963 * \returns
964 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
965 */
966 static GLboolean
validate_type(struct glx_context * gc,GLenum type)967 validate_type(struct glx_context * gc, GLenum type)
968 {
969 switch (type) {
970 case GL_UNSIGNED_INT:
971 case GL_UNSIGNED_SHORT:
972 case GL_UNSIGNED_BYTE:
973 return GL_TRUE;
974 default:
975 __glXSetError(gc, GL_INVALID_ENUM);
976 return GL_FALSE;
977 }
978 }
979
980
981 void
__indirect_glDrawArrays(GLenum mode,GLint first,GLsizei count)982 __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
983 {
984 struct glx_context *gc = __glXGetCurrentContext();
985 const __GLXattribute *state =
986 (const __GLXattribute *) (gc->client_state_private);
987 struct array_state_vector *arrays = state->array_state;
988
989
990 if (validate_mode(gc, mode) && validate_count(gc, count)) {
991 if (!arrays->array_info_cache_valid) {
992 fill_array_info_cache(arrays);
993 }
994
995 arrays->DrawArrays(mode, first, count);
996 }
997 }
998
999
1000 void
__indirect_glArrayElement(GLint index)1001 __indirect_glArrayElement(GLint index)
1002 {
1003 struct glx_context *gc = __glXGetCurrentContext();
1004 const __GLXattribute *state =
1005 (const __GLXattribute *) (gc->client_state_private);
1006 struct array_state_vector *arrays = state->array_state;
1007
1008 size_t single_vertex_size;
1009
1010
1011 single_vertex_size = calculate_single_vertex_size_none(arrays);
1012
1013 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
1014 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
1015 }
1016
1017 gc->pc = emit_element_none(gc->pc, arrays, index);
1018
1019 if (gc->pc > gc->limit) {
1020 (void) __glXFlushRenderBuffer(gc, gc->pc);
1021 }
1022 }
1023
1024
1025 void
__indirect_glDrawElements(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)1026 __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
1027 const GLvoid * indices)
1028 {
1029 struct glx_context *gc = __glXGetCurrentContext();
1030 const __GLXattribute *state =
1031 (const __GLXattribute *) (gc->client_state_private);
1032 struct array_state_vector *arrays = state->array_state;
1033
1034
1035 if (validate_mode(gc, mode) && validate_count(gc, count)
1036 && validate_type(gc, type)) {
1037 if (!arrays->array_info_cache_valid) {
1038 fill_array_info_cache(arrays);
1039 }
1040
1041 arrays->DrawElements(mode, count, type, indices);
1042 }
1043 }
1044
1045
1046 void
__indirect_glDrawRangeElements(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices)1047 __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
1048 GLsizei count, GLenum type,
1049 const GLvoid * indices)
1050 {
1051 struct glx_context *gc = __glXGetCurrentContext();
1052 const __GLXattribute *state =
1053 (const __GLXattribute *) (gc->client_state_private);
1054 struct array_state_vector *arrays = state->array_state;
1055
1056
1057 if (validate_mode(gc, mode) && validate_count(gc, count)
1058 && validate_type(gc, type)) {
1059 if (end < start) {
1060 __glXSetError(gc, GL_INVALID_VALUE);
1061 return;
1062 }
1063
1064 if (!arrays->array_info_cache_valid) {
1065 fill_array_info_cache(arrays);
1066 }
1067
1068 arrays->DrawElements(mode, count, type, indices);
1069 }
1070 }
1071
1072
1073 void
__indirect_glMultiDrawArrays(GLenum mode,const GLint * first,const GLsizei * count,GLsizei primcount)1074 __indirect_glMultiDrawArrays(GLenum mode, const GLint *first,
1075 const GLsizei *count, GLsizei primcount)
1076 {
1077 struct glx_context *gc = __glXGetCurrentContext();
1078 const __GLXattribute *state =
1079 (const __GLXattribute *) (gc->client_state_private);
1080 struct array_state_vector *arrays = state->array_state;
1081 GLsizei i;
1082
1083
1084 if (validate_mode(gc, mode)) {
1085 if (!arrays->array_info_cache_valid) {
1086 fill_array_info_cache(arrays);
1087 }
1088
1089 for (i = 0; i < primcount; i++) {
1090 if (validate_count(gc, count[i])) {
1091 arrays->DrawArrays(mode, first[i], count[i]);
1092 }
1093 }
1094 }
1095 }
1096
1097
1098 void
__indirect_glMultiDrawElementsEXT(GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei primcount)1099 __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count,
1100 GLenum type, const GLvoid * const * indices,
1101 GLsizei primcount)
1102 {
1103 struct glx_context *gc = __glXGetCurrentContext();
1104 const __GLXattribute *state =
1105 (const __GLXattribute *) (gc->client_state_private);
1106 struct array_state_vector *arrays = state->array_state;
1107 GLsizei i;
1108
1109
1110 if (validate_mode(gc, mode) && validate_type(gc, type)) {
1111 if (!arrays->array_info_cache_valid) {
1112 fill_array_info_cache(arrays);
1113 }
1114
1115 for (i = 0; i < primcount; i++) {
1116 if (validate_count(gc, count[i])) {
1117 arrays->DrawElements(mode, count[i], type, indices[i]);
1118 }
1119 }
1120 }
1121 }
1122
1123
1124 /* The HDR_SIZE macro argument is the command header size (4 bytes)
1125 * plus any additional index word e.g. for texture units or vertex
1126 * attributes.
1127 */
1128 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
1129 do { \
1130 (a)->data = PTR; \
1131 (a)->data_type = TYPE; \
1132 (a)->user_stride = STRIDE; \
1133 (a)->count = COUNT; \
1134 (a)->normalized = NORMALIZED; \
1135 \
1136 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \
1137 (a)->true_stride = (STRIDE == 0) \
1138 ? (a)->element_size : STRIDE; \
1139 \
1140 (a)->header[0] = __GLX_PAD(HDR_SIZE + (a)->element_size); \
1141 (a)->header[1] = OPCODE; \
1142 } while(0)
1143
1144
1145 void
__indirect_glVertexPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1146 __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride,
1147 const GLvoid * pointer)
1148 {
1149 static const uint16_t short_ops[5] = {
1150 0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
1151 };
1152 static const uint16_t int_ops[5] = {
1153 0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
1154 };
1155 static const uint16_t float_ops[5] = {
1156 0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
1157 };
1158 static const uint16_t double_ops[5] = {
1159 0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
1160 };
1161 uint16_t opcode;
1162 struct glx_context *gc = __glXGetCurrentContext();
1163 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1164 struct array_state_vector *arrays = state->array_state;
1165 struct array_state *a;
1166
1167
1168 if (size < 2 || size > 4 || stride < 0) {
1169 __glXSetError(gc, GL_INVALID_VALUE);
1170 return;
1171 }
1172
1173 switch (type) {
1174 case GL_SHORT:
1175 opcode = short_ops[size];
1176 break;
1177 case GL_INT:
1178 opcode = int_ops[size];
1179 break;
1180 case GL_FLOAT:
1181 opcode = float_ops[size];
1182 break;
1183 case GL_DOUBLE:
1184 opcode = double_ops[size];
1185 break;
1186 default:
1187 __glXSetError(gc, GL_INVALID_ENUM);
1188 return;
1189 }
1190
1191 a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0);
1192 assert(a != NULL);
1193 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4,
1194 opcode);
1195
1196 if (a->enabled) {
1197 arrays->array_info_cache_valid = GL_FALSE;
1198 }
1199 }
1200
1201
1202 void
__indirect_glNormalPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1203 __indirect_glNormalPointer(GLenum type, GLsizei stride,
1204 const GLvoid * pointer)
1205 {
1206 uint16_t opcode;
1207 struct glx_context *gc = __glXGetCurrentContext();
1208 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1209 struct array_state_vector *arrays = state->array_state;
1210 struct array_state *a;
1211
1212
1213 if (stride < 0) {
1214 __glXSetError(gc, GL_INVALID_VALUE);
1215 return;
1216 }
1217
1218 switch (type) {
1219 case GL_BYTE:
1220 opcode = X_GLrop_Normal3bv;
1221 break;
1222 case GL_SHORT:
1223 opcode = X_GLrop_Normal3sv;
1224 break;
1225 case GL_INT:
1226 opcode = X_GLrop_Normal3iv;
1227 break;
1228 case GL_FLOAT:
1229 opcode = X_GLrop_Normal3fv;
1230 break;
1231 case GL_DOUBLE:
1232 opcode = X_GLrop_Normal3dv;
1233 break;
1234 default:
1235 __glXSetError(gc, GL_INVALID_ENUM);
1236 return;
1237 }
1238
1239 a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0);
1240 assert(a != NULL);
1241 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode);
1242
1243 if (a->enabled) {
1244 arrays->array_info_cache_valid = GL_FALSE;
1245 }
1246 }
1247
1248
1249 void
__indirect_glColorPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1250 __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride,
1251 const GLvoid * pointer)
1252 {
1253 static const uint16_t byte_ops[5] = {
1254 0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
1255 };
1256 static const uint16_t ubyte_ops[5] = {
1257 0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
1258 };
1259 static const uint16_t short_ops[5] = {
1260 0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
1261 };
1262 static const uint16_t ushort_ops[5] = {
1263 0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
1264 };
1265 static const uint16_t int_ops[5] = {
1266 0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
1267 };
1268 static const uint16_t uint_ops[5] = {
1269 0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
1270 };
1271 static const uint16_t float_ops[5] = {
1272 0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
1273 };
1274 static const uint16_t double_ops[5] = {
1275 0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
1276 };
1277 uint16_t opcode;
1278 struct glx_context *gc = __glXGetCurrentContext();
1279 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1280 struct array_state_vector *arrays = state->array_state;
1281 struct array_state *a;
1282
1283
1284 if (size < 3 || size > 4 || stride < 0) {
1285 __glXSetError(gc, GL_INVALID_VALUE);
1286 return;
1287 }
1288
1289 switch (type) {
1290 case GL_BYTE:
1291 opcode = byte_ops[size];
1292 break;
1293 case GL_UNSIGNED_BYTE:
1294 opcode = ubyte_ops[size];
1295 break;
1296 case GL_SHORT:
1297 opcode = short_ops[size];
1298 break;
1299 case GL_UNSIGNED_SHORT:
1300 opcode = ushort_ops[size];
1301 break;
1302 case GL_INT:
1303 opcode = int_ops[size];
1304 break;
1305 case GL_UNSIGNED_INT:
1306 opcode = uint_ops[size];
1307 break;
1308 case GL_FLOAT:
1309 opcode = float_ops[size];
1310 break;
1311 case GL_DOUBLE:
1312 opcode = double_ops[size];
1313 break;
1314 default:
1315 __glXSetError(gc, GL_INVALID_ENUM);
1316 return;
1317 }
1318
1319 a = get_array_entry(arrays, GL_COLOR_ARRAY, 0);
1320 assert(a != NULL);
1321 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1322
1323 if (a->enabled) {
1324 arrays->array_info_cache_valid = GL_FALSE;
1325 }
1326 }
1327
1328
1329 void
__indirect_glIndexPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1330 __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer)
1331 {
1332 uint16_t opcode;
1333 struct glx_context *gc = __glXGetCurrentContext();
1334 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1335 struct array_state_vector *arrays = state->array_state;
1336 struct array_state *a;
1337
1338
1339 if (stride < 0) {
1340 __glXSetError(gc, GL_INVALID_VALUE);
1341 return;
1342 }
1343
1344 switch (type) {
1345 case GL_UNSIGNED_BYTE:
1346 opcode = X_GLrop_Indexubv;
1347 break;
1348 case GL_SHORT:
1349 opcode = X_GLrop_Indexsv;
1350 break;
1351 case GL_INT:
1352 opcode = X_GLrop_Indexiv;
1353 break;
1354 case GL_FLOAT:
1355 opcode = X_GLrop_Indexfv;
1356 break;
1357 case GL_DOUBLE:
1358 opcode = X_GLrop_Indexdv;
1359 break;
1360 default:
1361 __glXSetError(gc, GL_INVALID_ENUM);
1362 return;
1363 }
1364
1365 a = get_array_entry(arrays, GL_INDEX_ARRAY, 0);
1366 assert(a != NULL);
1367 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1368
1369 if (a->enabled) {
1370 arrays->array_info_cache_valid = GL_FALSE;
1371 }
1372 }
1373
1374
1375 void
__indirect_glEdgeFlagPointer(GLsizei stride,const GLvoid * pointer)1376 __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer)
1377 {
1378 struct glx_context *gc = __glXGetCurrentContext();
1379 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1380 struct array_state_vector *arrays = state->array_state;
1381 struct array_state *a;
1382
1383
1384 if (stride < 0) {
1385 __glXSetError(gc, GL_INVALID_VALUE);
1386 return;
1387 }
1388
1389
1390 a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0);
1391 assert(a != NULL);
1392 COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
1393 4, X_GLrop_EdgeFlagv);
1394
1395 if (a->enabled) {
1396 arrays->array_info_cache_valid = GL_FALSE;
1397 }
1398 }
1399
1400
1401 void
__indirect_glTexCoordPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1402 __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride,
1403 const GLvoid * pointer)
1404 {
1405 static const uint16_t short_ops[5] = {
1406 0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv,
1407 X_GLrop_TexCoord4sv
1408 };
1409 static const uint16_t int_ops[5] = {
1410 0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv,
1411 X_GLrop_TexCoord4iv
1412 };
1413 static const uint16_t float_ops[5] = {
1414 0, X_GLrop_TexCoord1fv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv,
1415 X_GLrop_TexCoord4fv
1416 };
1417 static const uint16_t double_ops[5] = {
1418 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv,
1419 X_GLrop_TexCoord4dv
1420 };
1421
1422 static const uint16_t mshort_ops[5] = {
1423 0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB,
1424 X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
1425 };
1426 static const uint16_t mint_ops[5] = {
1427 0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB,
1428 X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
1429 };
1430 static const uint16_t mfloat_ops[5] = {
1431 0, X_GLrop_MultiTexCoord1fvARB, X_GLrop_MultiTexCoord2fvARB,
1432 X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
1433 };
1434 static const uint16_t mdouble_ops[5] = {
1435 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB,
1436 X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
1437 };
1438
1439 uint16_t opcode;
1440 struct glx_context *gc = __glXGetCurrentContext();
1441 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1442 struct array_state_vector *arrays = state->array_state;
1443 struct array_state *a;
1444 unsigned header_size;
1445 unsigned index;
1446
1447
1448 if (size < 1 || size > 4 || stride < 0) {
1449 __glXSetError(gc, GL_INVALID_VALUE);
1450 return;
1451 }
1452
1453 index = arrays->active_texture_unit;
1454 if (index == 0) {
1455 switch (type) {
1456 case GL_SHORT:
1457 opcode = short_ops[size];
1458 break;
1459 case GL_INT:
1460 opcode = int_ops[size];
1461 break;
1462 case GL_FLOAT:
1463 opcode = float_ops[size];
1464 break;
1465 case GL_DOUBLE:
1466 opcode = double_ops[size];
1467 break;
1468 default:
1469 __glXSetError(gc, GL_INVALID_ENUM);
1470 return;
1471 }
1472
1473 header_size = 4;
1474 }
1475 else {
1476 switch (type) {
1477 case GL_SHORT:
1478 opcode = mshort_ops[size];
1479 break;
1480 case GL_INT:
1481 opcode = mint_ops[size];
1482 break;
1483 case GL_FLOAT:
1484 opcode = mfloat_ops[size];
1485 break;
1486 case GL_DOUBLE:
1487 opcode = mdouble_ops[size];
1488 break;
1489 default:
1490 __glXSetError(gc, GL_INVALID_ENUM);
1491 return;
1492 }
1493
1494 header_size = 8;
1495 }
1496
1497 a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index);
1498 assert(a != NULL);
1499 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE,
1500 header_size, opcode);
1501
1502 if (a->enabled) {
1503 arrays->array_info_cache_valid = GL_FALSE;
1504 }
1505 }
1506
1507
1508 void
__indirect_glSecondaryColorPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1509 __indirect_glSecondaryColorPointer(GLint size, GLenum type, GLsizei stride,
1510 const GLvoid * pointer)
1511 {
1512 uint16_t opcode;
1513 struct glx_context *gc = __glXGetCurrentContext();
1514 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1515 struct array_state_vector *arrays = state->array_state;
1516 struct array_state *a;
1517
1518
1519 if (size != 3 || stride < 0) {
1520 __glXSetError(gc, GL_INVALID_VALUE);
1521 return;
1522 }
1523
1524 switch (type) {
1525 case GL_BYTE:
1526 opcode = 4126;
1527 break;
1528 case GL_UNSIGNED_BYTE:
1529 opcode = 4131;
1530 break;
1531 case GL_SHORT:
1532 opcode = 4127;
1533 break;
1534 case GL_UNSIGNED_SHORT:
1535 opcode = 4132;
1536 break;
1537 case GL_INT:
1538 opcode = 4128;
1539 break;
1540 case GL_UNSIGNED_INT:
1541 opcode = 4133;
1542 break;
1543 case GL_FLOAT:
1544 opcode = 4129;
1545 break;
1546 case GL_DOUBLE:
1547 opcode = 4130;
1548 break;
1549 default:
1550 __glXSetError(gc, GL_INVALID_ENUM);
1551 return;
1552 }
1553
1554 a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0);
1555 if (a == NULL) {
1556 __glXSetError(gc, GL_INVALID_OPERATION);
1557 return;
1558 }
1559
1560 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1561
1562 if (a->enabled) {
1563 arrays->array_info_cache_valid = GL_FALSE;
1564 }
1565 }
1566
1567
1568 void
__indirect_glFogCoordPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1569 __indirect_glFogCoordPointer(GLenum type, GLsizei stride,
1570 const GLvoid * pointer)
1571 {
1572 uint16_t opcode;
1573 struct glx_context *gc = __glXGetCurrentContext();
1574 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1575 struct array_state_vector *arrays = state->array_state;
1576 struct array_state *a;
1577
1578
1579 if (stride < 0) {
1580 __glXSetError(gc, GL_INVALID_VALUE);
1581 return;
1582 }
1583
1584 switch (type) {
1585 case GL_FLOAT:
1586 opcode = 4124;
1587 break;
1588 case GL_DOUBLE:
1589 opcode = 4125;
1590 break;
1591 default:
1592 __glXSetError(gc, GL_INVALID_ENUM);
1593 return;
1594 }
1595
1596 a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0);
1597 if (a == NULL) {
1598 __glXSetError(gc, GL_INVALID_OPERATION);
1599 return;
1600 }
1601
1602 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1603
1604 if (a->enabled) {
1605 arrays->array_info_cache_valid = GL_FALSE;
1606 }
1607 }
1608
1609
1610 void
__indirect_glVertexAttribPointer(GLuint index,GLint size,GLenum type,GLboolean normalized,GLsizei stride,const GLvoid * pointer)1611 __indirect_glVertexAttribPointer(GLuint index, GLint size,
1612 GLenum type, GLboolean normalized,
1613 GLsizei stride, const GLvoid * pointer)
1614 {
1615 static const uint16_t short_ops[5] = {
1616 0, X_GLrop_VertexAttrib1svARB, X_GLrop_VertexAttrib2svARB,
1617 X_GLrop_VertexAttrib3svARB, X_GLrop_VertexAttrib4svARB
1618 };
1619 static const uint16_t float_ops[5] = {
1620 0, X_GLrop_VertexAttrib1fvARB, X_GLrop_VertexAttrib2fvARB,
1621 X_GLrop_VertexAttrib3fvARB, X_GLrop_VertexAttrib4fvARB
1622 };
1623 static const uint16_t double_ops[5] = {
1624 0, X_GLrop_VertexAttrib1dvARB, X_GLrop_VertexAttrib2dvARB,
1625 X_GLrop_VertexAttrib3dvARB, X_GLrop_VertexAttrib4dvARB
1626 };
1627
1628 uint16_t opcode;
1629 struct glx_context *gc = __glXGetCurrentContext();
1630 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1631 struct array_state_vector *arrays = state->array_state;
1632 struct array_state *a;
1633 unsigned true_immediate_count;
1634 unsigned true_immediate_size;
1635
1636
1637 if ((size < 1) || (size > 4) || (stride < 0)
1638 || (index > arrays->num_vertex_program_attribs)) {
1639 __glXSetError(gc, GL_INVALID_VALUE);
1640 return;
1641 }
1642
1643 if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
1644 switch (type) {
1645 case GL_BYTE:
1646 opcode = X_GLrop_VertexAttrib4NbvARB;
1647 break;
1648 case GL_UNSIGNED_BYTE:
1649 opcode = X_GLrop_VertexAttrib4NubvARB;
1650 break;
1651 case GL_SHORT:
1652 opcode = X_GLrop_VertexAttrib4NsvARB;
1653 break;
1654 case GL_UNSIGNED_SHORT:
1655 opcode = X_GLrop_VertexAttrib4NusvARB;
1656 break;
1657 case GL_INT:
1658 opcode = X_GLrop_VertexAttrib4NivARB;
1659 break;
1660 case GL_UNSIGNED_INT:
1661 opcode = X_GLrop_VertexAttrib4NuivARB;
1662 break;
1663 default:
1664 __glXSetError(gc, GL_INVALID_ENUM);
1665 return;
1666 }
1667
1668 true_immediate_count = 4;
1669 }
1670 else {
1671 true_immediate_count = size;
1672
1673 switch (type) {
1674 case GL_BYTE:
1675 opcode = X_GLrop_VertexAttrib4bvARB;
1676 true_immediate_count = 4;
1677 break;
1678 case GL_UNSIGNED_BYTE:
1679 opcode = X_GLrop_VertexAttrib4ubvARB;
1680 true_immediate_count = 4;
1681 break;
1682 case GL_SHORT:
1683 opcode = short_ops[size];
1684 break;
1685 case GL_UNSIGNED_SHORT:
1686 opcode = X_GLrop_VertexAttrib4usvARB;
1687 true_immediate_count = 4;
1688 break;
1689 case GL_INT:
1690 opcode = X_GLrop_VertexAttrib4ivARB;
1691 true_immediate_count = 4;
1692 break;
1693 case GL_UNSIGNED_INT:
1694 opcode = X_GLrop_VertexAttrib4uivARB;
1695 true_immediate_count = 4;
1696 break;
1697 case GL_FLOAT:
1698 opcode = float_ops[size];
1699 break;
1700 case GL_DOUBLE:
1701 opcode = double_ops[size];
1702 break;
1703 default:
1704 __glXSetError(gc, GL_INVALID_ENUM);
1705 return;
1706 }
1707 }
1708
1709 a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index);
1710 if (a == NULL) {
1711 __glXSetError(gc, GL_INVALID_OPERATION);
1712 return;
1713 }
1714
1715 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8,
1716 opcode);
1717
1718 true_immediate_size = __glXTypeSize(type) * true_immediate_count;
1719 a->header[0] = __GLX_PAD(8 + true_immediate_size);
1720
1721 if (a->enabled) {
1722 arrays->array_info_cache_valid = GL_FALSE;
1723 }
1724 }
1725
1726
1727 /**
1728 * I don't have 100% confidence that this is correct. The different rules
1729 * about whether or not generic vertex attributes alias "classic" vertex
1730 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
1731 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My
1732 * feeling is that the client-side doesn't have to worry about it. The
1733 * client just sends all the data to the server and lets the server deal
1734 * with it.
1735 */
1736 void
__indirect_glVertexAttribPointerNV(GLuint index,GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1737 __indirect_glVertexAttribPointerNV(GLuint index, GLint size,
1738 GLenum type, GLsizei stride,
1739 const GLvoid * pointer)
1740 {
1741 struct glx_context *gc = __glXGetCurrentContext();
1742 GLboolean normalized = GL_FALSE;
1743
1744
1745 switch (type) {
1746 case GL_UNSIGNED_BYTE:
1747 if (size != 4) {
1748 __glXSetError(gc, GL_INVALID_VALUE);
1749 return;
1750 }
1751 normalized = GL_TRUE;
1752 FALLTHROUGH;
1753 case GL_SHORT:
1754 case GL_FLOAT:
1755 case GL_DOUBLE:
1756 __indirect_glVertexAttribPointer(index, size, type,
1757 normalized, stride, pointer);
1758 return;
1759 default:
1760 __glXSetError(gc, GL_INVALID_ENUM);
1761 return;
1762 }
1763 }
1764
1765
1766 void
__indirect_glClientActiveTexture(GLenum texture)1767 __indirect_glClientActiveTexture(GLenum texture)
1768 {
1769 struct glx_context *const gc = __glXGetCurrentContext();
1770 __GLXattribute *const state =
1771 (__GLXattribute *) (gc->client_state_private);
1772 struct array_state_vector *const arrays = state->array_state;
1773 const GLint unit = (GLint) texture - GL_TEXTURE0;
1774
1775
1776 if ((unit < 0) || (unit >= arrays->num_texture_units)) {
1777 __glXSetError(gc, GL_INVALID_ENUM);
1778 return;
1779 }
1780
1781 arrays->active_texture_unit = unit;
1782 }
1783
1784
1785 /**
1786 * Modify the enable state for the selected array
1787 */
1788 GLboolean
__glXSetArrayEnable(__GLXattribute * state,GLenum key,unsigned index,GLboolean enable)1789 __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index,
1790 GLboolean enable)
1791 {
1792 struct array_state_vector *arrays = state->array_state;
1793 struct array_state *a;
1794
1795
1796 /* Texture coordinate arrays have an implict index set when the
1797 * application calls glClientActiveTexture.
1798 */
1799 if (key == GL_TEXTURE_COORD_ARRAY) {
1800 index = arrays->active_texture_unit;
1801 }
1802
1803 a = get_array_entry(arrays, key, index);
1804
1805 if ((a != NULL) && (a->enabled != enable)) {
1806 a->enabled = enable;
1807 arrays->array_info_cache_valid = GL_FALSE;
1808 }
1809
1810 return (a != NULL);
1811 }
1812
1813
1814 void
__glXArrayDisableAll(__GLXattribute * state)1815 __glXArrayDisableAll(__GLXattribute * state)
1816 {
1817 struct array_state_vector *arrays = state->array_state;
1818 unsigned i;
1819
1820
1821 for (i = 0; i < arrays->num_arrays; i++) {
1822 arrays->arrays[i].enabled = GL_FALSE;
1823 }
1824
1825 arrays->array_info_cache_valid = GL_FALSE;
1826 }
1827
1828
1829 /**
1830 */
1831 GLboolean
__glXGetArrayEnable(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1832 __glXGetArrayEnable(const __GLXattribute * const state,
1833 GLenum key, unsigned index, GLintptr * dest)
1834 {
1835 const struct array_state_vector *arrays = state->array_state;
1836 const struct array_state *a =
1837 get_array_entry((struct array_state_vector *) arrays,
1838 key, index);
1839
1840 if (a != NULL) {
1841 *dest = (GLintptr) a->enabled;
1842 }
1843
1844 return (a != NULL);
1845 }
1846
1847
1848 /**
1849 */
1850 GLboolean
__glXGetArrayType(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1851 __glXGetArrayType(const __GLXattribute * const state,
1852 GLenum key, unsigned index, GLintptr * dest)
1853 {
1854 const struct array_state_vector *arrays = state->array_state;
1855 const struct array_state *a =
1856 get_array_entry((struct array_state_vector *) arrays,
1857 key, index);
1858
1859 if (a != NULL) {
1860 *dest = (GLintptr) a->data_type;
1861 }
1862
1863 return (a != NULL);
1864 }
1865
1866
1867 /**
1868 */
1869 GLboolean
__glXGetArraySize(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1870 __glXGetArraySize(const __GLXattribute * const state,
1871 GLenum key, unsigned index, GLintptr * dest)
1872 {
1873 const struct array_state_vector *arrays = state->array_state;
1874 const struct array_state *a =
1875 get_array_entry((struct array_state_vector *) arrays,
1876 key, index);
1877
1878 if (a != NULL) {
1879 *dest = (GLintptr) a->count;
1880 }
1881
1882 return (a != NULL);
1883 }
1884
1885
1886 /**
1887 */
1888 GLboolean
__glXGetArrayStride(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1889 __glXGetArrayStride(const __GLXattribute * const state,
1890 GLenum key, unsigned index, GLintptr * dest)
1891 {
1892 const struct array_state_vector *arrays = state->array_state;
1893 const struct array_state *a =
1894 get_array_entry((struct array_state_vector *) arrays,
1895 key, index);
1896
1897 if (a != NULL) {
1898 *dest = (GLintptr) a->user_stride;
1899 }
1900
1901 return (a != NULL);
1902 }
1903
1904
1905 /**
1906 */
1907 GLboolean
__glXGetArrayPointer(const __GLXattribute * const state,GLenum key,unsigned index,void ** dest)1908 __glXGetArrayPointer(const __GLXattribute * const state,
1909 GLenum key, unsigned index, void **dest)
1910 {
1911 const struct array_state_vector *arrays = state->array_state;
1912 const struct array_state *a =
1913 get_array_entry((struct array_state_vector *) arrays,
1914 key, index);
1915
1916
1917 if (a != NULL) {
1918 *dest = (void *) (a->data);
1919 }
1920
1921 return (a != NULL);
1922 }
1923
1924
1925 /**
1926 */
1927 GLboolean
__glXGetArrayNormalized(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1928 __glXGetArrayNormalized(const __GLXattribute * const state,
1929 GLenum key, unsigned index, GLintptr * dest)
1930 {
1931 const struct array_state_vector *arrays = state->array_state;
1932 const struct array_state *a =
1933 get_array_entry((struct array_state_vector *) arrays,
1934 key, index);
1935
1936
1937 if (a != NULL) {
1938 *dest = (GLintptr) a->normalized;
1939 }
1940
1941 return (a != NULL);
1942 }
1943
1944
1945 /**
1946 */
1947 GLuint
__glXGetActiveTextureUnit(const __GLXattribute * const state)1948 __glXGetActiveTextureUnit(const __GLXattribute * const state)
1949 {
1950 return state->array_state->active_texture_unit;
1951 }
1952
1953
1954 void
__glXPushArrayState(__GLXattribute * state)1955 __glXPushArrayState(__GLXattribute * state)
1956 {
1957 struct array_state_vector *arrays = state->array_state;
1958 struct array_stack_state *stack =
1959 &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1960 unsigned i;
1961
1962 /* XXX are we pushing _all_ the necessary fields? */
1963 for (i = 0; i < arrays->num_arrays; i++) {
1964 stack[i].data = arrays->arrays[i].data;
1965 stack[i].data_type = arrays->arrays[i].data_type;
1966 stack[i].user_stride = arrays->arrays[i].user_stride;
1967 stack[i].count = arrays->arrays[i].count;
1968 stack[i].key = arrays->arrays[i].key;
1969 stack[i].index = arrays->arrays[i].index;
1970 stack[i].enabled = arrays->arrays[i].enabled;
1971 }
1972
1973 arrays->active_texture_unit_stack[arrays->stack_index] =
1974 arrays->active_texture_unit;
1975
1976 arrays->stack_index++;
1977 }
1978
1979
1980 void
__glXPopArrayState(__GLXattribute * state)1981 __glXPopArrayState(__GLXattribute * state)
1982 {
1983 struct array_state_vector *arrays = state->array_state;
1984 struct array_stack_state *stack;
1985 unsigned i;
1986
1987
1988 arrays->stack_index--;
1989 stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1990
1991 for (i = 0; i < arrays->num_arrays; i++) {
1992 switch (stack[i].key) {
1993 case GL_NORMAL_ARRAY:
1994 __indirect_glNormalPointer(stack[i].data_type,
1995 stack[i].user_stride, stack[i].data);
1996 break;
1997 case GL_COLOR_ARRAY:
1998 __indirect_glColorPointer(stack[i].count,
1999 stack[i].data_type,
2000 stack[i].user_stride, stack[i].data);
2001 break;
2002 case GL_INDEX_ARRAY:
2003 __indirect_glIndexPointer(stack[i].data_type,
2004 stack[i].user_stride, stack[i].data);
2005 break;
2006 case GL_EDGE_FLAG_ARRAY:
2007 __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data);
2008 break;
2009 case GL_TEXTURE_COORD_ARRAY:
2010 arrays->active_texture_unit = stack[i].index;
2011 __indirect_glTexCoordPointer(stack[i].count,
2012 stack[i].data_type,
2013 stack[i].user_stride, stack[i].data);
2014 break;
2015 case GL_SECONDARY_COLOR_ARRAY:
2016 __indirect_glSecondaryColorPointer(stack[i].count,
2017 stack[i].data_type,
2018 stack[i].user_stride,
2019 stack[i].data);
2020 break;
2021 case GL_FOG_COORDINATE_ARRAY:
2022 __indirect_glFogCoordPointer(stack[i].data_type,
2023 stack[i].user_stride, stack[i].data);
2024 break;
2025
2026 }
2027
2028 __glXSetArrayEnable(state, stack[i].key, stack[i].index,
2029 stack[i].enabled);
2030 }
2031
2032 arrays->active_texture_unit =
2033 arrays->active_texture_unit_stack[arrays->stack_index];
2034 }
2035