1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46 #include "util/u_memory.h"
47
48 #include "vbo_noop.h"
49 #include "vbo_private.h"
50
51
52 /** ID/name for immediate-mode VBO */
53 #define IMM_BUFFER_NAME 0xaabbccdd
54
55
56 static void GLAPIENTRY
57 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params);
58
59 static void GLAPIENTRY
60 vbo_exec_EvalCoord1f(GLfloat u);
61
62 static void GLAPIENTRY
63 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v);
64
65
66 static void
67 vbo_reset_all_attr(struct vbo_exec_context *exec);
68
69
70 /**
71 * Close off the last primitive, execute the buffer, restart the
72 * primitive. This is called when we fill a vertex buffer before
73 * hitting glEnd.
74 */
75 static void
vbo_exec_wrap_buffers(struct vbo_exec_context * exec)76 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
77 {
78 if (exec->vtx.prim_count == 0) {
79 exec->vtx.copied.nr = 0;
80 exec->vtx.vert_count = 0;
81 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
82 }
83 else {
84 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
85 unsigned last = exec->vtx.prim_count - 1;
86 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
87 const bool last_begin = exec->vtx.markers[last].begin;
88 GLuint last_count = 0;
89
90 if (_mesa_inside_begin_end(ctx)) {
91 last_draw->count = exec->vtx.vert_count - last_draw->start;
92 last_count = last_draw->count;
93 exec->vtx.markers[last].end = 0;
94 }
95
96 /* Special handling for wrapping GL_LINE_LOOP */
97 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
98 last_count > 0 &&
99 !exec->vtx.markers[last].end) {
100 /* draw this section of the incomplete line loop as a line strip */
101 exec->vtx.mode[last] = GL_LINE_STRIP;
102 if (!last_begin) {
103 /* This is not the first section of the line loop, so don't
104 * draw the 0th vertex. We're saving it until we draw the
105 * very last section of the loop.
106 */
107 last_draw->start++;
108 last_draw->count--;
109 }
110 }
111
112 /* Execute the buffer and save copied vertices.
113 */
114 if (exec->vtx.vert_count)
115 vbo_exec_vtx_flush(exec);
116 else {
117 exec->vtx.prim_count = 0;
118 exec->vtx.copied.nr = 0;
119 }
120
121 /* Emit a glBegin to start the new list.
122 */
123 assert(exec->vtx.prim_count == 0);
124
125 if (_mesa_inside_begin_end(ctx)) {
126 exec->vtx.mode[0] = ctx->Driver.CurrentExecPrimitive;
127 exec->vtx.draw[0].start = 0;
128 exec->vtx.markers[0].begin = 0;
129 exec->vtx.prim_count++;
130
131 if (exec->vtx.copied.nr == last_count)
132 exec->vtx.markers[0].begin = last_begin;
133 }
134 }
135 }
136
137
138 /**
139 * Deal with buffer wrapping where provoked by the vertex buffer
140 * filling up, as opposed to upgrade_vertex().
141 */
142 static void
vbo_exec_vtx_wrap(struct vbo_exec_context * exec)143 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
144 {
145 unsigned numComponents;
146
147 /* Run pipeline on current vertices, copy wrapped vertices
148 * to exec->vtx.copied.
149 */
150 vbo_exec_wrap_buffers(exec);
151
152 if (!exec->vtx.buffer_ptr) {
153 /* probably ran out of memory earlier when allocating the VBO */
154 return;
155 }
156
157 /* Copy stored stored vertices to start of new list.
158 */
159 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
160
161 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
162 memcpy(exec->vtx.buffer_ptr,
163 exec->vtx.copied.buffer,
164 numComponents * sizeof(fi_type));
165 exec->vtx.buffer_ptr += numComponents;
166 exec->vtx.vert_count += exec->vtx.copied.nr;
167
168 exec->vtx.copied.nr = 0;
169 }
170
171
172 /**
173 * Copy the active vertex's values to the ctx->Current fields.
174 */
175 static void
vbo_exec_copy_to_current(struct vbo_exec_context * exec)176 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
177 {
178 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
179 struct vbo_context *vbo = vbo_context(ctx);
180 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
181 bool color0_changed = false;
182
183 while (enabled) {
184 const int i = u_bit_scan64(&enabled);
185
186 /* Note: the exec->vtx.current[i] pointers point into the
187 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
188 */
189 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
190 fi_type tmp[8]; /* space for doubles */
191 int dmul_shift = 0;
192
193 assert(exec->vtx.attr[i].size);
194
195 if (exec->vtx.attr[i].type == GL_DOUBLE ||
196 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
197 memset(tmp, 0, sizeof(tmp));
198 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
199 dmul_shift = 1;
200 } else {
201 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
202 exec->vtx.attr[i].size,
203 exec->vtx.attrptr[i],
204 exec->vtx.attr[i].type);
205 }
206
207 if (memcmp(current, tmp, 4 * sizeof(GLfloat) << dmul_shift) != 0) {
208 memcpy(current, tmp, 4 * sizeof(GLfloat) << dmul_shift);
209
210 if (i == VBO_ATTRIB_COLOR0)
211 color0_changed = true;
212
213 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT) {
214 ctx->NewState |= _NEW_MATERIAL;
215 ctx->PopAttribState |= GL_LIGHTING_BIT;
216
217 /* The fixed-func vertex program uses this. */
218 if (i == VBO_ATTRIB_MAT_FRONT_SHININESS ||
219 i == VBO_ATTRIB_MAT_BACK_SHININESS)
220 ctx->NewState |= _NEW_FF_VERT_PROGRAM;
221 } else {
222 ctx->NewState |= _NEW_CURRENT_ATTRIB;
223 ctx->PopAttribState |= GL_CURRENT_BIT;
224 }
225 }
226
227 /* Given that we explicitly state size here, there is no need
228 * for the COPY_CLEAN above, could just copy 16 bytes and be
229 * done. The only problem is when Mesa accesses ctx->Current
230 * directly.
231 */
232 /* Size here is in components - not bytes */
233 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
234 (exec->vtx.attr[i].size >> dmul_shift) != vbo->current[i].Format.Size) {
235 vbo_set_vertex_format(&vbo->current[i].Format,
236 exec->vtx.attr[i].size >> dmul_shift,
237 exec->vtx.attr[i].type);
238 }
239 }
240
241 if (color0_changed && ctx->Light.ColorMaterialEnabled) {
242 _mesa_update_color_material(ctx,
243 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
244 }
245 }
246
247
248 /**
249 * Flush existing data, set new attrib size, replay copied vertices.
250 * This is called when we transition from a small vertex attribute size
251 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
252 * We need to go back over the previous 2-component texcoords and insert
253 * zero and one values.
254 * \param attr VBO_ATTRIB_x vertex attribute value
255 */
256 static void
vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context * exec,GLuint attr,GLuint newSize,GLenum newType)257 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
258 GLuint attr, GLuint newSize, GLenum newType)
259 {
260 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
261 struct vbo_context *vbo = vbo_context(ctx);
262 const GLint lastcount = exec->vtx.vert_count;
263 fi_type *old_attrptr[VBO_ATTRIB_MAX];
264 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
265 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
266 const GLuint oldSize = exec->vtx.attr[attr].size;
267 GLuint i;
268
269 assert(attr < VBO_ATTRIB_MAX);
270
271 if (unlikely(!exec->vtx.buffer_ptr)) {
272 /* We should only hit this when use_buffer_objects=true */
273 assert(exec->vtx.bufferobj);
274 vbo_exec_vtx_map(exec);
275 assert(exec->vtx.buffer_ptr);
276 }
277
278 /* Run pipeline on current vertices, copy wrapped vertices
279 * to exec->vtx.copied.
280 */
281 vbo_exec_wrap_buffers(exec);
282
283 if (unlikely(exec->vtx.copied.nr)) {
284 /* We're in the middle of a primitive, keep the old vertex
285 * format around to be able to translate the copied vertices to
286 * the new format.
287 */
288 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
289 }
290
291 /* Heuristic: Attempt to isolate attributes received outside
292 * begin/end so that they don't bloat the vertices.
293 */
294 if (!_mesa_inside_begin_end(ctx) &&
295 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
296 vbo_exec_copy_to_current(exec);
297 vbo_reset_all_attr(exec);
298 }
299
300 /* Fix up sizes:
301 */
302 exec->vtx.attr[attr].size = newSize;
303 exec->vtx.attr[attr].active_size = newSize;
304 exec->vtx.attr[attr].type = newType;
305 exec->vtx.vertex_size += newSize - oldSize;
306 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
307 exec->vtx.max_vert = vbo_compute_max_verts(exec);
308 exec->vtx.vert_count = 0;
309 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
310 exec->vtx.enabled |= BITFIELD64_BIT(attr);
311
312 if (attr != 0) {
313 if (unlikely(oldSize)) {
314 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
315
316 /* If there are attribs after the resized attrib... */
317 if (offset + oldSize < old_vtx_size_no_pos) {
318 int size_diff = newSize - oldSize;
319 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
320 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
321 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
322 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
323
324 if (size_diff < 0) {
325 /* Decreasing the size: Copy from first to last to move
326 * elements to the left.
327 */
328 fi_type *old_end = old_last + 1;
329 fi_type *old = old_first;
330 fi_type *new = new_first;
331
332 do {
333 *new++ = *old++;
334 } while (old != old_end);
335 } else {
336 /* Increasing the size: Copy from last to first to move
337 * elements to the right.
338 */
339 fi_type *old_end = old_first - 1;
340 fi_type *old = old_last;
341 fi_type *new = new_last;
342
343 do {
344 *new-- = *old--;
345 } while (old != old_end);
346 }
347
348 /* Update pointers to attribs, because we moved them. */
349 GLbitfield64 enabled = exec->vtx.enabled &
350 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
351 ~BITFIELD64_BIT(attr);
352 while (enabled) {
353 unsigned i = u_bit_scan64(&enabled);
354
355 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
356 exec->vtx.attrptr[i] += size_diff;
357 }
358 }
359 } else {
360 /* Just have to append the new attribute at the end */
361 exec->vtx.attrptr[attr] = exec->vtx.vertex +
362 exec->vtx.vertex_size_no_pos - newSize;
363 }
364 }
365
366 /* The position is always last. */
367 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
368
369 /* Replay stored vertices to translate them
370 * to new format here.
371 *
372 * -- No need to replay - just copy piecewise
373 */
374 if (unlikely(exec->vtx.copied.nr)) {
375 fi_type *data = exec->vtx.copied.buffer;
376 fi_type *dest = exec->vtx.buffer_ptr;
377
378 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
379
380 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
381 GLbitfield64 enabled = exec->vtx.enabled;
382 while (enabled) {
383 const int j = u_bit_scan64(&enabled);
384 GLuint sz = exec->vtx.attr[j].size;
385 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
386 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
387
388 assert(sz);
389
390 if (j == attr) {
391 if (oldSize) {
392 fi_type tmp[4];
393 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
394 data + old_offset,
395 exec->vtx.attr[j].type);
396 COPY_SZ_4V(dest + new_offset, newSize, tmp);
397 } else {
398 fi_type *current = (fi_type *)vbo->current[j].Ptr;
399 COPY_SZ_4V(dest + new_offset, sz, current);
400 }
401 }
402 else {
403 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
404 }
405 }
406
407 data += old_vtx_size;
408 dest += exec->vtx.vertex_size;
409 }
410
411 exec->vtx.buffer_ptr = dest;
412 exec->vtx.vert_count += exec->vtx.copied.nr;
413 exec->vtx.copied.nr = 0;
414 }
415 }
416
417
418 /**
419 * This is when a vertex attribute transitions to a different size.
420 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
421 * glTexCoord4f() call. We promote the array from size=2 to size=4.
422 * \param newSize size of new vertex (number of 32-bit words).
423 * \param attr VBO_ATTRIB_x vertex attribute value
424 */
425 static void
vbo_exec_fixup_vertex(struct gl_context * ctx,GLuint attr,GLuint newSize,GLenum newType)426 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
427 GLuint newSize, GLenum newType)
428 {
429 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
430
431 assert(attr < VBO_ATTRIB_MAX);
432
433 if (newSize > exec->vtx.attr[attr].size ||
434 newType != exec->vtx.attr[attr].type) {
435 /* New size is larger. Need to flush existing vertices and get
436 * an enlarged vertex format.
437 */
438 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize, newType);
439 }
440 else if (newSize < exec->vtx.attr[attr].active_size) {
441 GLuint i;
442 const fi_type *id =
443 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
444
445 /* New size is smaller - just need to fill in some
446 * zeros. Don't need to flush or wrap.
447 */
448 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
449 exec->vtx.attrptr[attr][i-1] = id[i-1];
450
451 exec->vtx.attr[attr].active_size = newSize;
452 }
453 }
454
455
456 /**
457 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
458 * It depends on a few things, including whether we're inside or outside
459 * of glBegin/glEnd.
460 */
461 static inline bool
is_vertex_position(const struct gl_context * ctx,GLuint index)462 is_vertex_position(const struct gl_context *ctx, GLuint index)
463 {
464 return (index == 0 &&
465 _mesa_attr_zero_aliases_vertex(ctx) &&
466 _mesa_inside_begin_end(ctx));
467 }
468
469 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
470 #if UTIL_ARCH_LITTLE_ENDIAN
471 #define SET_64BIT(dst32, u64) do { \
472 *(dst32)++ = (u64); \
473 *(dst32)++ = (uint64_t)(u64) >> 32; \
474 } while (0)
475 #else
476 #define SET_64BIT(dst32, u64) do { \
477 *(dst32)++ = (uint64_t)(u64) >> 32; \
478 *(dst32)++ = (u64); \
479 } while (0)
480 #endif
481
482
483 /**
484 * This macro is used to implement all the glVertex, glColor, glTexCoord,
485 * glVertexAttrib, etc functions.
486 * \param A VBO_ATTRIB_x attribute index
487 * \param N attribute size (1..4)
488 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
489 * \param C cast type (uint32_t or uint64_t)
490 * \param V0, V1, v2, V3 attribute value
491 */
492 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
493 do { \
494 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
495 int sz = (sizeof(C) / sizeof(GLfloat)); \
496 \
497 assert(sz == 1 || sz == 2); \
498 /* store a copy of the attribute in exec except for glVertex */ \
499 if ((A) != 0) { \
500 /* Check if attribute size or type is changing. */ \
501 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
502 exec->vtx.attr[A].type != T)) { \
503 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
504 } \
505 \
506 C *dest = (C *)exec->vtx.attrptr[A]; \
507 if (N>0) dest[0] = V0; \
508 if (N>1) dest[1] = V1; \
509 if (N>2) dest[2] = V2; \
510 if (N>3) dest[3] = V3; \
511 assert(exec->vtx.attr[A].type == T); \
512 \
513 /* we now have accumulated a per-vertex attribute */ \
514 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
515 } else { \
516 /* This is a glVertex call */ \
517 int size = exec->vtx.attr[0].size; \
518 \
519 /* Check if attribute size or type is changing. */ \
520 if (unlikely(size < N * sz || \
521 exec->vtx.attr[0].type != T)) { \
522 vbo_exec_wrap_upgrade_vertex(exec, 0, N * sz, T); \
523 } \
524 \
525 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
526 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
527 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
528 \
529 /* Copy over attributes from exec. */ \
530 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
531 *dst++ = *src++; \
532 \
533 /* Store the position, which is always last and can have 32 or */ \
534 /* 64 bits per channel. */ \
535 if (sizeof(C) == 4) { \
536 if (N > 0) *dst++ = V0; \
537 if (N > 1) *dst++ = V1; \
538 if (N > 2) *dst++ = V2; \
539 if (N > 3) *dst++ = V3; \
540 \
541 if (unlikely(N < size)) { \
542 if (N < 2 && size >= 2) *dst++ = V1; \
543 if (N < 3 && size >= 3) *dst++ = V2; \
544 if (N < 4 && size >= 4) *dst++ = V3; \
545 } \
546 } else { \
547 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
548 /* separately */ \
549 if (N > 0) SET_64BIT(dst, V0); \
550 if (N > 1) SET_64BIT(dst, V1); \
551 if (N > 2) SET_64BIT(dst, V2); \
552 if (N > 3) SET_64BIT(dst, V3); \
553 \
554 if (unlikely(N * 2 < size)) { \
555 if (N < 2 && size >= 4) SET_64BIT(dst, V1); \
556 if (N < 3 && size >= 6) SET_64BIT(dst, V2); \
557 if (N < 4 && size >= 8) SET_64BIT(dst, V3); \
558 } \
559 } \
560 \
561 /* dst now points at the beginning of the next vertex */ \
562 exec->vtx.buffer_ptr = (fi_type*)dst; \
563 \
564 /* Don't set FLUSH_UPDATE_CURRENT because */ \
565 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
566 \
567 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
568 vbo_exec_vtx_wrap(exec); \
569 } \
570 } while (0)
571
572
573 #undef ERROR
574 #define ERROR(err) _mesa_error(ctx, err, __func__)
575 #define TAG(x) vbo_exec_##x
576
577 #include "vbo_attrib_tmp.h"
578
579
580
581 /**
582 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
583 * this may be a (partial) no-op.
584 */
585 static void GLAPIENTRY
vbo_exec_Materialfv(GLenum face,GLenum pname,const GLfloat * params)586 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
587 {
588 GLbitfield updateMats;
589 GET_CURRENT_CONTEXT(ctx);
590
591 /* This function should be a no-op when it tries to update material
592 * attributes which are currently tracking glColor via glColorMaterial.
593 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
594 * indicating which material attributes can actually be updated below.
595 */
596 if (ctx->Light.ColorMaterialEnabled) {
597 updateMats = ~ctx->Light._ColorMaterialBitmask;
598 }
599 else {
600 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
601 updateMats = ALL_MATERIAL_BITS;
602 }
603
604 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
605 updateMats &= FRONT_MATERIAL_BITS;
606 }
607 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
608 updateMats &= BACK_MATERIAL_BITS;
609 }
610 else if (face != GL_FRONT_AND_BACK) {
611 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
612 return;
613 }
614
615 switch (pname) {
616 case GL_EMISSION:
617 if (updateMats & MAT_BIT_FRONT_EMISSION)
618 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
619 if (updateMats & MAT_BIT_BACK_EMISSION)
620 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
621 break;
622 case GL_AMBIENT:
623 if (updateMats & MAT_BIT_FRONT_AMBIENT)
624 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
625 if (updateMats & MAT_BIT_BACK_AMBIENT)
626 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
627 break;
628 case GL_DIFFUSE:
629 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
630 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
631 if (updateMats & MAT_BIT_BACK_DIFFUSE)
632 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
633 break;
634 case GL_SPECULAR:
635 if (updateMats & MAT_BIT_FRONT_SPECULAR)
636 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
637 if (updateMats & MAT_BIT_BACK_SPECULAR)
638 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
639 break;
640 case GL_SHININESS:
641 if (*params < 0 || *params > ctx->Const.MaxShininess) {
642 _mesa_error(ctx, GL_INVALID_VALUE,
643 "glMaterial(invalid shininess: %f out range [0, %f])",
644 *params, ctx->Const.MaxShininess);
645 return;
646 }
647 if (updateMats & MAT_BIT_FRONT_SHININESS)
648 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
649 if (updateMats & MAT_BIT_BACK_SHININESS)
650 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
651 break;
652 case GL_COLOR_INDEXES:
653 if (ctx->API != API_OPENGL_COMPAT) {
654 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
655 return;
656 }
657 if (updateMats & MAT_BIT_FRONT_INDEXES)
658 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
659 if (updateMats & MAT_BIT_BACK_INDEXES)
660 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
661 break;
662 case GL_AMBIENT_AND_DIFFUSE:
663 if (updateMats & MAT_BIT_FRONT_AMBIENT)
664 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
665 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
666 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
667 if (updateMats & MAT_BIT_BACK_AMBIENT)
668 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
669 if (updateMats & MAT_BIT_BACK_DIFFUSE)
670 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
671 break;
672 default:
673 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
674 return;
675 }
676 }
677
678
679 /**
680 * Flush (draw) vertices.
681 *
682 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
683 */
684 static void
vbo_exec_FlushVertices_internal(struct vbo_exec_context * exec,unsigned flags)685 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
686 {
687 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
688
689 if (flags & FLUSH_STORED_VERTICES) {
690 if (exec->vtx.vert_count) {
691 vbo_exec_vtx_flush(exec);
692 }
693
694 if (exec->vtx.vertex_size) {
695 vbo_exec_copy_to_current(exec);
696 vbo_reset_all_attr(exec);
697 }
698
699 /* All done. */
700 ctx->Driver.NeedFlush = 0;
701 } else {
702 assert(flags == FLUSH_UPDATE_CURRENT);
703
704 /* Note that the vertex size is unchanged.
705 * (vbo_reset_all_attr isn't called)
706 */
707 vbo_exec_copy_to_current(exec);
708
709 /* Only FLUSH_UPDATE_CURRENT is done. */
710 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
711 }
712 }
713
714
715 static void GLAPIENTRY
vbo_exec_EvalCoord1f(GLfloat u)716 vbo_exec_EvalCoord1f(GLfloat u)
717 {
718 GET_CURRENT_CONTEXT(ctx);
719 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
720
721 {
722 GLint i;
723 if (exec->eval.recalculate_maps)
724 vbo_exec_eval_update(exec);
725
726 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
727 if (exec->eval.map1[i].map)
728 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
729 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
730 }
731 }
732
733 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
734 exec->vtx.vertex_size * sizeof(GLfloat));
735
736 vbo_exec_do_EvalCoord1f(exec, u);
737
738 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
739 exec->vtx.vertex_size * sizeof(GLfloat));
740 }
741
742
743 static void GLAPIENTRY
vbo_exec_EvalCoord2f(GLfloat u,GLfloat v)744 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v)
745 {
746 GET_CURRENT_CONTEXT(ctx);
747 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
748
749 {
750 GLint i;
751 if (exec->eval.recalculate_maps)
752 vbo_exec_eval_update(exec);
753
754 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
755 if (exec->eval.map2[i].map)
756 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
757 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
758 }
759
760 if (ctx->Eval.AutoNormal)
761 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
762 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
763 }
764
765 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
766 exec->vtx.vertex_size * sizeof(GLfloat));
767
768 vbo_exec_do_EvalCoord2f(exec, u, v);
769
770 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
771 exec->vtx.vertex_size * sizeof(GLfloat));
772 }
773
774
775 static void GLAPIENTRY
vbo_exec_EvalCoord1fv(const GLfloat * u)776 vbo_exec_EvalCoord1fv(const GLfloat *u)
777 {
778 vbo_exec_EvalCoord1f(u[0]);
779 }
780
781
782 static void GLAPIENTRY
vbo_exec_EvalCoord2fv(const GLfloat * u)783 vbo_exec_EvalCoord2fv(const GLfloat *u)
784 {
785 vbo_exec_EvalCoord2f(u[0], u[1]);
786 }
787
788
789 static void GLAPIENTRY
vbo_exec_EvalPoint1(GLint i)790 vbo_exec_EvalPoint1(GLint i)
791 {
792 GET_CURRENT_CONTEXT(ctx);
793 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
794 (GLfloat) ctx->Eval.MapGrid1un);
795 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
796
797 vbo_exec_EvalCoord1f(u);
798 }
799
800
801 static void GLAPIENTRY
vbo_exec_EvalPoint2(GLint i,GLint j)802 vbo_exec_EvalPoint2(GLint i, GLint j)
803 {
804 GET_CURRENT_CONTEXT(ctx);
805 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
806 (GLfloat) ctx->Eval.MapGrid2un);
807 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
808 (GLfloat) ctx->Eval.MapGrid2vn);
809 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
810 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
811
812 vbo_exec_EvalCoord2f(u, v);
813 }
814
815
816 /**
817 * Called via glBegin.
818 */
819 static void GLAPIENTRY
vbo_exec_Begin(GLenum mode)820 vbo_exec_Begin(GLenum mode)
821 {
822 GET_CURRENT_CONTEXT(ctx);
823 struct vbo_context *vbo = vbo_context(ctx);
824 struct vbo_exec_context *exec = &vbo->exec;
825 int i;
826
827 if (_mesa_inside_begin_end(ctx)) {
828 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
829 return;
830 }
831
832 if (ctx->NewState)
833 _mesa_update_state(ctx);
834
835 GLenum error = _mesa_valid_prim_mode(ctx, mode);
836 if (error != GL_NO_ERROR) {
837 _mesa_error(ctx, error, "glBegin");
838 return;
839 }
840
841 /* Heuristic: attempt to isolate attributes occurring outside
842 * begin/end pairs.
843 *
844 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
845 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
846 */
847 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
848 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
849
850 i = exec->vtx.prim_count++;
851 exec->vtx.mode[i] = mode;
852 exec->vtx.draw[i].start = exec->vtx.vert_count;
853 exec->vtx.markers[i].begin = 1;
854
855 ctx->Driver.CurrentExecPrimitive = mode;
856
857 ctx->Exec = ctx->BeginEnd;
858
859 /* We may have been called from a display list, in which case we should
860 * leave dlist.c's dispatch table in place.
861 */
862 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
863 ctx->CurrentServerDispatch = ctx->Exec;
864 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
865 ctx->CurrentClientDispatch = ctx->Exec;
866 _glapi_set_dispatch(ctx->CurrentClientDispatch);
867 } else {
868 assert(ctx->CurrentClientDispatch == ctx->Save);
869 }
870 }
871
872
873 /**
874 * Try to merge / concatenate the two most recent VBO primitives.
875 */
876 static void
try_vbo_merge(struct vbo_exec_context * exec)877 try_vbo_merge(struct vbo_exec_context *exec)
878 {
879 unsigned cur = exec->vtx.prim_count - 1;
880
881 assert(exec->vtx.prim_count >= 1);
882
883 vbo_try_prim_conversion(&exec->vtx.mode[cur], &exec->vtx.draw[cur].count);
884
885 if (exec->vtx.prim_count >= 2) {
886 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
887 unsigned prev = cur - 1;
888
889 if (vbo_merge_draws(ctx, false,
890 exec->vtx.mode[prev],
891 exec->vtx.mode[cur],
892 exec->vtx.draw[prev].start,
893 exec->vtx.draw[cur].start,
894 &exec->vtx.draw[prev].count,
895 exec->vtx.draw[cur].count,
896 0, 0,
897 &exec->vtx.markers[prev].end,
898 exec->vtx.markers[cur].begin,
899 exec->vtx.markers[cur].end))
900 exec->vtx.prim_count--; /* drop the last primitive */
901 }
902 }
903
904
905 /**
906 * Called via glEnd.
907 */
908 static void GLAPIENTRY
vbo_exec_End(void)909 vbo_exec_End(void)
910 {
911 GET_CURRENT_CONTEXT(ctx);
912 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
913
914 if (!_mesa_inside_begin_end(ctx)) {
915 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
916 return;
917 }
918
919 ctx->Exec = ctx->OutsideBeginEnd;
920
921 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
922 ctx->CurrentServerDispatch = ctx->Exec;
923 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd) {
924 ctx->CurrentClientDispatch = ctx->Exec;
925 _glapi_set_dispatch(ctx->CurrentClientDispatch);
926 }
927
928 if (exec->vtx.prim_count > 0) {
929 /* close off current primitive */
930 unsigned last = exec->vtx.prim_count - 1;
931 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
932 unsigned count = exec->vtx.vert_count - last_draw->start;
933
934 last_draw->count = count;
935 exec->vtx.markers[last].end = 1;
936
937 if (count)
938 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
939
940 /* Special handling for GL_LINE_LOOP */
941 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
942 exec->vtx.markers[last].begin == 0) {
943 /* We're finishing drawing a line loop. Append 0th vertex onto
944 * end of vertex buffer so we can draw it as a line strip.
945 */
946 const fi_type *src = exec->vtx.buffer_map +
947 last_draw->start * exec->vtx.vertex_size;
948 fi_type *dst = exec->vtx.buffer_map +
949 exec->vtx.vert_count * exec->vtx.vertex_size;
950
951 /* copy 0th vertex to end of buffer */
952 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
953
954 last_draw->start++; /* skip vertex0 */
955 /* note that the count stays unchanged */
956 exec->vtx.mode[last] = GL_LINE_STRIP;
957
958 /* Increment the vertex count so the next primitive doesn't
959 * overwrite the last vertex which we just added.
960 */
961 exec->vtx.vert_count++;
962 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
963 }
964
965 try_vbo_merge(exec);
966 }
967
968 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
969
970 if (exec->vtx.prim_count == VBO_MAX_PRIM)
971 vbo_exec_vtx_flush(exec);
972
973 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
974 _mesa_flush(ctx);
975 }
976 }
977
978
979 /**
980 * Called via glPrimitiveRestartNV()
981 */
982 static void GLAPIENTRY
vbo_exec_PrimitiveRestartNV(void)983 vbo_exec_PrimitiveRestartNV(void)
984 {
985 GLenum curPrim;
986 GET_CURRENT_CONTEXT(ctx);
987
988 curPrim = ctx->Driver.CurrentExecPrimitive;
989
990 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
991 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
992 }
993 else {
994 vbo_exec_End();
995 vbo_exec_Begin(curPrim);
996 }
997 }
998
999
1000 static void
vbo_exec_vtxfmt_init(struct vbo_exec_context * exec)1001 vbo_exec_vtxfmt_init(struct vbo_exec_context *exec)
1002 {
1003 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1004 GLvertexformat *vfmt = &exec->vtxfmt;
1005
1006 #define NAME_AE(x) _ae_##x
1007 #define NAME_CALLLIST(x) _mesa_##x
1008 #define NAME(x) vbo_exec_##x
1009 #define NAME_ES(x) _es_##x
1010
1011 #include "vbo_init_tmp.h"
1012 }
1013
1014
1015 static void
vbo_reset_all_attr(struct vbo_exec_context * exec)1016 vbo_reset_all_attr(struct vbo_exec_context *exec)
1017 {
1018 while (exec->vtx.enabled) {
1019 const int i = u_bit_scan64(&exec->vtx.enabled);
1020
1021 /* Reset the vertex attribute by setting its size to zero. */
1022 exec->vtx.attr[i].size = 0;
1023 exec->vtx.attr[i].type = GL_FLOAT;
1024 exec->vtx.attr[i].active_size = 0;
1025 exec->vtx.attrptr[i] = NULL;
1026 }
1027
1028 exec->vtx.vertex_size = 0;
1029 }
1030
1031
1032 void
vbo_exec_vtx_init(struct vbo_exec_context * exec,bool use_buffer_objects)1033 vbo_exec_vtx_init(struct vbo_exec_context *exec, bool use_buffer_objects)
1034 {
1035 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1036
1037 if (use_buffer_objects) {
1038 /* Use buffer objects for immediate mode. */
1039 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1040 exec->vtx.bufferobj = ctx->Driver.NewBufferObject(ctx, IMM_BUFFER_NAME);
1041 } else {
1042 /* Use allocated memory for immediate mode. */
1043 exec->vtx.bufferobj = NULL;
1044 exec->vtx.buffer_map =
1045 align_malloc(ctx->Const.glBeginEndBufferSize, 64);
1046 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
1047 }
1048
1049 vbo_exec_vtxfmt_init(exec);
1050 _mesa_noop_vtxfmt_init(ctx, &exec->vtxfmt_noop);
1051
1052 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1053 vbo_reset_all_attr(exec);
1054
1055 exec->vtx.info.instance_count = 1;
1056 exec->vtx.info.max_index = ~0;
1057 }
1058
1059
1060 void
vbo_exec_vtx_destroy(struct vbo_exec_context * exec)1061 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1062 {
1063 /* using a real VBO for vertex data */
1064 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1065
1066 /* True VBOs should already be unmapped
1067 */
1068 if (exec->vtx.buffer_map) {
1069 assert(!exec->vtx.bufferobj ||
1070 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1071 if (!exec->vtx.bufferobj) {
1072 align_free(exec->vtx.buffer_map);
1073 exec->vtx.buffer_map = NULL;
1074 exec->vtx.buffer_ptr = NULL;
1075 }
1076 }
1077
1078 /* Free the vertex buffer. Unmap first if needed.
1079 */
1080 if (exec->vtx.bufferobj &&
1081 _mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1082 ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1083 }
1084 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1085 }
1086
1087
1088 /**
1089 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1090 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1091 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1092 * __struct gl_contextRec::Current and gl_light_attrib::Material
1093 *
1094 * Note that the default T&L engine never clears the
1095 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1096 *
1097 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1098 */
1099 void
vbo_exec_FlushVertices(struct gl_context * ctx,GLuint flags)1100 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1101 {
1102 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1103
1104 #ifndef NDEBUG
1105 /* debug check: make sure we don't get called recursively */
1106 exec->flush_call_depth++;
1107 assert(exec->flush_call_depth == 1);
1108 #endif
1109
1110 if (_mesa_inside_begin_end(ctx)) {
1111 /* We've had glBegin but not glEnd! */
1112 #ifndef NDEBUG
1113 exec->flush_call_depth--;
1114 assert(exec->flush_call_depth == 0);
1115 #endif
1116 return;
1117 }
1118
1119 /* Flush (draw). */
1120 vbo_exec_FlushVertices_internal(exec, flags);
1121
1122 #ifndef NDEBUG
1123 exec->flush_call_depth--;
1124 assert(exec->flush_call_depth == 0);
1125 #endif
1126 }
1127
1128
1129 void GLAPIENTRY
_es_Color4f(GLfloat r,GLfloat g,GLfloat b,GLfloat a)1130 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1131 {
1132 vbo_exec_Color4f(r, g, b, a);
1133 }
1134
1135
1136 void GLAPIENTRY
_es_Normal3f(GLfloat x,GLfloat y,GLfloat z)1137 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1138 {
1139 vbo_exec_Normal3f(x, y, z);
1140 }
1141
1142
1143 void GLAPIENTRY
_es_MultiTexCoord4f(GLenum target,GLfloat s,GLfloat t,GLfloat r,GLfloat q)1144 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1145 {
1146 vbo_exec_MultiTexCoord4f(target, s, t, r, q);
1147 }
1148
1149
1150 void GLAPIENTRY
_es_Materialfv(GLenum face,GLenum pname,const GLfloat * params)1151 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1152 {
1153 vbo_exec_Materialfv(face, pname, params);
1154 }
1155
1156
1157 void GLAPIENTRY
_es_Materialf(GLenum face,GLenum pname,GLfloat param)1158 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1159 {
1160 GLfloat p[4];
1161 p[0] = param;
1162 p[1] = p[2] = p[3] = 0.0F;
1163 vbo_exec_Materialfv(face, pname, p);
1164 }
1165
1166
1167 /**
1168 * A special version of glVertexAttrib4f that does not treat index 0 as
1169 * VBO_ATTRIB_POS.
1170 */
1171 static void
VertexAttrib4f_nopos(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1172 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1173 {
1174 GET_CURRENT_CONTEXT(ctx);
1175 if (index < MAX_VERTEX_GENERIC_ATTRIBS)
1176 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1177 else
1178 ERROR(GL_INVALID_VALUE);
1179 }
1180
1181 void GLAPIENTRY
_es_VertexAttrib4f(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1182 _es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1183 {
1184 VertexAttrib4f_nopos(index, x, y, z, w);
1185 }
1186
1187
1188 void GLAPIENTRY
_es_VertexAttrib1f(GLuint indx,GLfloat x)1189 _es_VertexAttrib1f(GLuint indx, GLfloat x)
1190 {
1191 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1192 }
1193
1194
1195 void GLAPIENTRY
_es_VertexAttrib1fv(GLuint indx,const GLfloat * values)1196 _es_VertexAttrib1fv(GLuint indx, const GLfloat* values)
1197 {
1198 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1199 }
1200
1201
1202 void GLAPIENTRY
_es_VertexAttrib2f(GLuint indx,GLfloat x,GLfloat y)1203 _es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y)
1204 {
1205 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1206 }
1207
1208
1209 void GLAPIENTRY
_es_VertexAttrib2fv(GLuint indx,const GLfloat * values)1210 _es_VertexAttrib2fv(GLuint indx, const GLfloat* values)
1211 {
1212 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1213 }
1214
1215
1216 void GLAPIENTRY
_es_VertexAttrib3f(GLuint indx,GLfloat x,GLfloat y,GLfloat z)1217 _es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1218 {
1219 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1220 }
1221
1222
1223 void GLAPIENTRY
_es_VertexAttrib3fv(GLuint indx,const GLfloat * values)1224 _es_VertexAttrib3fv(GLuint indx, const GLfloat* values)
1225 {
1226 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1227 }
1228
1229
1230 void GLAPIENTRY
_es_VertexAttrib4fv(GLuint indx,const GLfloat * values)1231 _es_VertexAttrib4fv(GLuint indx, const GLfloat* values)
1232 {
1233 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1234 }
1235