1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/dlist.h"
38 #include "main/eval.h"
39 #include "main/state.h"
40 #include "main/light.h"
41 #include "main/api_arrayelt.h"
42 #include "main/draw_validate.h"
43 #include "main/dispatch.h"
44 #include "util/bitscan.h"
45 #include "util/u_memory.h"
46 #include "api_exec_decl.h"
47
48 #include "vbo_private.h"
49
50 /** ID/name for immediate-mode VBO */
51 #define IMM_BUFFER_NAME 0xaabbccdd
52
53
54 static void
55 vbo_reset_all_attr(struct vbo_exec_context *exec);
56
57
58 /**
59 * Close off the last primitive, execute the buffer, restart the
60 * primitive. This is called when we fill a vertex buffer before
61 * hitting glEnd.
62 */
63 static void
vbo_exec_wrap_buffers(struct vbo_exec_context * exec)64 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
65 {
66 if (exec->vtx.prim_count == 0) {
67 exec->vtx.copied.nr = 0;
68 exec->vtx.vert_count = 0;
69 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
70 }
71 else {
72 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
73 unsigned last = exec->vtx.prim_count - 1;
74 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
75 const bool last_begin = exec->vtx.markers[last].begin;
76 GLuint last_count = 0;
77
78 if (_mesa_inside_begin_end(ctx)) {
79 last_draw->count = exec->vtx.vert_count - last_draw->start;
80 last_count = last_draw->count;
81 exec->vtx.markers[last].end = 0;
82 }
83
84 /* Special handling for wrapping GL_LINE_LOOP */
85 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
86 last_count > 0 &&
87 !exec->vtx.markers[last].end) {
88 /* draw this section of the incomplete line loop as a line strip */
89 exec->vtx.mode[last] = GL_LINE_STRIP;
90 if (!last_begin) {
91 /* This is not the first section of the line loop, so don't
92 * draw the 0th vertex. We're saving it until we draw the
93 * very last section of the loop.
94 */
95 last_draw->start++;
96 last_draw->count--;
97 }
98 }
99
100 /* Execute the buffer and save copied vertices.
101 */
102 if (exec->vtx.vert_count)
103 vbo_exec_vtx_flush(exec);
104 else {
105 exec->vtx.prim_count = 0;
106 exec->vtx.copied.nr = 0;
107 }
108
109 /* Emit a glBegin to start the new list.
110 */
111 assert(exec->vtx.prim_count == 0);
112
113 if (_mesa_inside_begin_end(ctx)) {
114 exec->vtx.mode[0] = ctx->Driver.CurrentExecPrimitive;
115 exec->vtx.draw[0].start = 0;
116 exec->vtx.markers[0].begin = 0;
117 exec->vtx.prim_count++;
118
119 if (exec->vtx.copied.nr == last_count)
120 exec->vtx.markers[0].begin = last_begin;
121 }
122 }
123 }
124
125
126 /**
127 * Deal with buffer wrapping where provoked by the vertex buffer
128 * filling up, as opposed to upgrade_vertex().
129 */
130 static void
vbo_exec_vtx_wrap(struct vbo_exec_context * exec)131 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
132 {
133 unsigned numComponents;
134
135 /* Run pipeline on current vertices, copy wrapped vertices
136 * to exec->vtx.copied.
137 */
138 vbo_exec_wrap_buffers(exec);
139
140 if (!exec->vtx.buffer_ptr) {
141 /* probably ran out of memory earlier when allocating the VBO */
142 return;
143 }
144
145 /* Copy stored stored vertices to start of new list.
146 */
147 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
148
149 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
150 memcpy(exec->vtx.buffer_ptr,
151 exec->vtx.copied.buffer,
152 numComponents * sizeof(fi_type));
153 exec->vtx.buffer_ptr += numComponents;
154 exec->vtx.vert_count += exec->vtx.copied.nr;
155
156 exec->vtx.copied.nr = 0;
157 }
158
159
160 /**
161 * Copy the active vertex's values to the ctx->Current fields.
162 */
163 static void
vbo_exec_copy_to_current(struct vbo_exec_context * exec)164 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
165 {
166 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
167 struct vbo_context *vbo = vbo_context(ctx);
168 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
169 bool color0_changed = false;
170
171 while (enabled) {
172 const int i = u_bit_scan64(&enabled);
173
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
176 */
177 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
178 fi_type tmp[8]; /* space for doubles */
179 int dmul_shift = 0;
180
181 assert(exec->vtx.attr[i].size);
182
183 /* VBO_ATTRIB_SELECT_RESULT_INDEX has no current */
184 if (!current)
185 continue;
186
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
189 memset(tmp, 0, sizeof(tmp));
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
191 dmul_shift = 1;
192 } else {
193 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
194 exec->vtx.attr[i].size,
195 exec->vtx.attrptr[i],
196 exec->vtx.attr[i].type);
197 }
198
199 if (memcmp(current, tmp, 4 * sizeof(GLfloat) << dmul_shift) != 0) {
200 memcpy(current, tmp, 4 * sizeof(GLfloat) << dmul_shift);
201
202 if (i == VBO_ATTRIB_COLOR0)
203 color0_changed = true;
204
205 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT) {
206 ctx->NewState |= _NEW_MATERIAL;
207 ctx->PopAttribState |= GL_LIGHTING_BIT;
208
209 /* The fixed-func vertex program uses this. */
210 if (i == VBO_ATTRIB_MAT_FRONT_SHININESS ||
211 i == VBO_ATTRIB_MAT_BACK_SHININESS)
212 ctx->NewState |= _NEW_FF_VERT_PROGRAM;
213 } else {
214 ctx->NewState |= _NEW_CURRENT_ATTRIB;
215 ctx->PopAttribState |= GL_CURRENT_BIT;
216 }
217 }
218
219 /* Given that we explicitly state size here, there is no need
220 * for the COPY_CLEAN above, could just copy 16 bytes and be
221 * done. The only problem is when Mesa accesses ctx->Current
222 * directly.
223 */
224 /* Size here is in components - not bytes */
225 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
226 (exec->vtx.attr[i].size >> dmul_shift) != vbo->current[i].Format.Size) {
227 vbo_set_vertex_format(&vbo->current[i].Format,
228 exec->vtx.attr[i].size >> dmul_shift,
229 exec->vtx.attr[i].type);
230 }
231 }
232
233 if (color0_changed && ctx->Light.ColorMaterialEnabled) {
234 _mesa_update_color_material(ctx,
235 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
236 }
237 }
238
239
240 /**
241 * Flush existing data, set new attrib size, replay copied vertices.
242 * This is called when we transition from a small vertex attribute size
243 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
244 * We need to go back over the previous 2-component texcoords and insert
245 * zero and one values.
246 * \param attr VBO_ATTRIB_x vertex attribute value
247 */
248 static void
vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context * exec,GLuint attr,GLuint newSize,GLenum newType)249 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
250 GLuint attr, GLuint newSize, GLenum newType)
251 {
252 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
253 struct vbo_context *vbo = vbo_context(ctx);
254 const GLint lastcount = exec->vtx.vert_count;
255 fi_type *old_attrptr[VBO_ATTRIB_MAX];
256 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
257 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
258 const GLuint oldSize = exec->vtx.attr[attr].size;
259 GLuint i;
260
261 assert(attr < VBO_ATTRIB_MAX);
262
263 if (unlikely(!exec->vtx.buffer_ptr)) {
264 /* We should only hit this when use_buffer_objects=true */
265 assert(exec->vtx.bufferobj);
266 vbo_exec_vtx_map(exec);
267 assert(exec->vtx.buffer_ptr);
268 }
269
270 /* Run pipeline on current vertices, copy wrapped vertices
271 * to exec->vtx.copied.
272 */
273 vbo_exec_wrap_buffers(exec);
274
275 if (unlikely(exec->vtx.copied.nr)) {
276 /* We're in the middle of a primitive, keep the old vertex
277 * format around to be able to translate the copied vertices to
278 * the new format.
279 */
280 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
281 }
282
283 /* Heuristic: Attempt to isolate attributes received outside
284 * begin/end so that they don't bloat the vertices.
285 */
286 if (!_mesa_inside_begin_end(ctx) &&
287 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
288 vbo_exec_copy_to_current(exec);
289 vbo_reset_all_attr(exec);
290 }
291
292 /* Fix up sizes:
293 */
294 exec->vtx.attr[attr].size = newSize;
295 exec->vtx.attr[attr].active_size = newSize;
296 exec->vtx.attr[attr].type = newType;
297 exec->vtx.vertex_size += newSize - oldSize;
298 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
299 exec->vtx.max_vert = vbo_compute_max_verts(exec);
300 exec->vtx.vert_count = 0;
301 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
302 exec->vtx.enabled |= BITFIELD64_BIT(attr);
303
304 if (attr != 0) {
305 if (unlikely(oldSize)) {
306 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
307
308 /* If there are attribs after the resized attrib... */
309 if (offset + oldSize < old_vtx_size_no_pos) {
310 int size_diff = newSize - oldSize;
311 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
312 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
313 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
314 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
315
316 if (size_diff < 0) {
317 /* Decreasing the size: Copy from first to last to move
318 * elements to the left.
319 */
320 fi_type *old_end = old_last + 1;
321 fi_type *old = old_first;
322 fi_type *new = new_first;
323
324 do {
325 *new++ = *old++;
326 } while (old != old_end);
327 } else {
328 /* Increasing the size: Copy from last to first to move
329 * elements to the right.
330 */
331 fi_type *old_end = old_first - 1;
332 fi_type *old = old_last;
333 fi_type *new = new_last;
334
335 do {
336 *new-- = *old--;
337 } while (old != old_end);
338 }
339
340 /* Update pointers to attribs, because we moved them. */
341 GLbitfield64 enabled = exec->vtx.enabled &
342 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
343 ~BITFIELD64_BIT(attr);
344 while (enabled) {
345 unsigned i = u_bit_scan64(&enabled);
346
347 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
348 exec->vtx.attrptr[i] += size_diff;
349 }
350 }
351 } else {
352 /* Just have to append the new attribute at the end */
353 exec->vtx.attrptr[attr] = exec->vtx.vertex +
354 exec->vtx.vertex_size_no_pos - newSize;
355 }
356 }
357
358 /* The position is always last. */
359 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
360
361 /* Replay stored vertices to translate them
362 * to new format here.
363 *
364 * -- No need to replay - just copy piecewise
365 */
366 if (unlikely(exec->vtx.copied.nr)) {
367 fi_type *data = exec->vtx.copied.buffer;
368 fi_type *dest = exec->vtx.buffer_ptr;
369
370 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
371
372 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
373 GLbitfield64 enabled = exec->vtx.enabled;
374 while (enabled) {
375 const int j = u_bit_scan64(&enabled);
376 GLuint sz = exec->vtx.attr[j].size;
377 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
378 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
379
380 assert(sz);
381
382 if (j == attr) {
383 if (oldSize) {
384 fi_type tmp[4];
385 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
386 data + old_offset,
387 exec->vtx.attr[j].type);
388 COPY_SZ_4V(dest + new_offset, newSize, tmp);
389 } else {
390 fi_type *current = (fi_type *)vbo->current[j].Ptr;
391 COPY_SZ_4V(dest + new_offset, sz, current);
392 }
393 }
394 else {
395 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
396 }
397 }
398
399 data += old_vtx_size;
400 dest += exec->vtx.vertex_size;
401 }
402
403 exec->vtx.buffer_ptr = dest;
404 exec->vtx.vert_count += exec->vtx.copied.nr;
405 exec->vtx.copied.nr = 0;
406 }
407 }
408
409
410 /**
411 * This is when a vertex attribute transitions to a different size.
412 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
413 * glTexCoord4f() call. We promote the array from size=2 to size=4.
414 * \param newSize size of new vertex (number of 32-bit words).
415 * \param attr VBO_ATTRIB_x vertex attribute value
416 */
417 static void
vbo_exec_fixup_vertex(struct gl_context * ctx,GLuint attr,GLuint newSize,GLenum newType)418 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
419 GLuint newSize, GLenum newType)
420 {
421 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
422
423 assert(attr < VBO_ATTRIB_MAX);
424
425 if (newSize > exec->vtx.attr[attr].size ||
426 newType != exec->vtx.attr[attr].type) {
427 /* New size is larger. Need to flush existing vertices and get
428 * an enlarged vertex format.
429 */
430 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize, newType);
431 }
432 else if (newSize < exec->vtx.attr[attr].active_size) {
433 GLuint i;
434 const fi_type *id =
435 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
436
437 /* New size is smaller - just need to fill in some
438 * zeros. Don't need to flush or wrap.
439 */
440 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
441 exec->vtx.attrptr[attr][i-1] = id[i-1];
442
443 exec->vtx.attr[attr].active_size = newSize;
444 }
445 }
446
447
448 /**
449 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
450 * It depends on a few things, including whether we're inside or outside
451 * of glBegin/glEnd.
452 */
453 static inline bool
is_vertex_position(const struct gl_context * ctx,GLuint index)454 is_vertex_position(const struct gl_context *ctx, GLuint index)
455 {
456 return (index == 0 &&
457 _mesa_attr_zero_aliases_vertex(ctx) &&
458 _mesa_inside_begin_end(ctx));
459 }
460
461 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
462 #if UTIL_ARCH_LITTLE_ENDIAN
463 #define SET_64BIT(dst32, u64) do { \
464 *(dst32)++ = (u64); \
465 *(dst32)++ = (uint64_t)(u64) >> 32; \
466 } while (0)
467 #else
468 #define SET_64BIT(dst32, u64) do { \
469 *(dst32)++ = (uint64_t)(u64) >> 32; \
470 *(dst32)++ = (u64); \
471 } while (0)
472 #endif
473
474
475 /**
476 * This macro is used to implement all the glVertex, glColor, glTexCoord,
477 * glVertexAttrib, etc functions.
478 * \param A VBO_ATTRIB_x attribute index
479 * \param N attribute size (1..4)
480 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
481 * \param C cast type (uint32_t or uint64_t)
482 * \param V0, V1, v2, V3 attribute value
483 */
484 #define ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3) \
485 do { \
486 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
487 int sz = (sizeof(C) / sizeof(GLfloat)); \
488 \
489 assert(sz == 1 || sz == 2); \
490 /* store a copy of the attribute in exec except for glVertex */ \
491 if ((A) != 0) { \
492 /* Check if attribute size or type is changing. */ \
493 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
494 exec->vtx.attr[A].type != T)) { \
495 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
496 } \
497 \
498 C *dest = (C *)exec->vtx.attrptr[A]; \
499 if (N>0) dest[0] = V0; \
500 if (N>1) dest[1] = V1; \
501 if (N>2) dest[2] = V2; \
502 if (N>3) dest[3] = V3; \
503 assert(exec->vtx.attr[A].type == T); \
504 \
505 /* we now have accumulated a per-vertex attribute */ \
506 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
507 } else { \
508 /* This is a glVertex call */ \
509 int size = exec->vtx.attr[0].size; \
510 \
511 /* Check if attribute size or type is changing. */ \
512 if (unlikely(size < N * sz || \
513 exec->vtx.attr[0].type != T)) { \
514 vbo_exec_wrap_upgrade_vertex(exec, 0, N * sz, T); \
515 } \
516 \
517 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
518 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
519 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
520 \
521 /* Copy over attributes from exec. */ \
522 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
523 *dst++ = *src++; \
524 \
525 /* Store the position, which is always last and can have 32 or */ \
526 /* 64 bits per channel. */ \
527 if (sizeof(C) == 4) { \
528 if (N > 0) *dst++ = V0; \
529 if (N > 1) *dst++ = V1; \
530 if (N > 2) *dst++ = V2; \
531 if (N > 3) *dst++ = V3; \
532 \
533 if (unlikely(N < size)) { \
534 if (N < 2 && size >= 2) *dst++ = V1; \
535 if (N < 3 && size >= 3) *dst++ = V2; \
536 if (N < 4 && size >= 4) *dst++ = V3; \
537 } \
538 } else { \
539 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
540 /* separately */ \
541 if (N > 0) SET_64BIT(dst, V0); \
542 if (N > 1) SET_64BIT(dst, V1); \
543 if (N > 2) SET_64BIT(dst, V2); \
544 if (N > 3) SET_64BIT(dst, V3); \
545 \
546 if (unlikely(N * 2 < size)) { \
547 if (N < 2 && size >= 4) SET_64BIT(dst, V1); \
548 if (N < 3 && size >= 6) SET_64BIT(dst, V2); \
549 if (N < 4 && size >= 8) SET_64BIT(dst, V3); \
550 } \
551 } \
552 \
553 /* dst now points at the beginning of the next vertex */ \
554 exec->vtx.buffer_ptr = (fi_type*)dst; \
555 \
556 /* Don't set FLUSH_UPDATE_CURRENT because */ \
557 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
558 \
559 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
560 vbo_exec_vtx_wrap(exec); \
561 } \
562 } while (0)
563
564 #undef ERROR
565 #define ERROR(err) _mesa_error(ctx, err, __func__)
566 #define TAG(x) _mesa_##x
567 #define SUPPRESS_STATIC
568
569 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
570 ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3)
571
572 #include "vbo_attrib_tmp.h"
573
574
575 /**
576 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
577 * this may be a (partial) no-op.
578 */
579 void GLAPIENTRY
_mesa_Materialfv(GLenum face,GLenum pname,const GLfloat * params)580 _mesa_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
581 {
582 GLbitfield updateMats;
583 GET_CURRENT_CONTEXT(ctx);
584
585 /* This function should be a no-op when it tries to update material
586 * attributes which are currently tracking glColor via glColorMaterial.
587 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
588 * indicating which material attributes can actually be updated below.
589 */
590 if (ctx->Light.ColorMaterialEnabled) {
591 updateMats = ~ctx->Light._ColorMaterialBitmask;
592 }
593 else {
594 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
595 updateMats = ALL_MATERIAL_BITS;
596 }
597
598 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
599 updateMats &= FRONT_MATERIAL_BITS;
600 }
601 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
602 updateMats &= BACK_MATERIAL_BITS;
603 }
604 else if (face != GL_FRONT_AND_BACK) {
605 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
606 return;
607 }
608
609 switch (pname) {
610 case GL_EMISSION:
611 if (updateMats & MAT_BIT_FRONT_EMISSION)
612 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
613 if (updateMats & MAT_BIT_BACK_EMISSION)
614 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
615 break;
616 case GL_AMBIENT:
617 if (updateMats & MAT_BIT_FRONT_AMBIENT)
618 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
619 if (updateMats & MAT_BIT_BACK_AMBIENT)
620 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
621 break;
622 case GL_DIFFUSE:
623 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
624 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
625 if (updateMats & MAT_BIT_BACK_DIFFUSE)
626 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
627 break;
628 case GL_SPECULAR:
629 if (updateMats & MAT_BIT_FRONT_SPECULAR)
630 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
631 if (updateMats & MAT_BIT_BACK_SPECULAR)
632 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
633 break;
634 case GL_SHININESS:
635 if (*params < 0 || *params > ctx->Const.MaxShininess) {
636 _mesa_error(ctx, GL_INVALID_VALUE,
637 "glMaterial(invalid shininess: %f out range [0, %f])",
638 *params, ctx->Const.MaxShininess);
639 return;
640 }
641 if (updateMats & MAT_BIT_FRONT_SHININESS)
642 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
643 if (updateMats & MAT_BIT_BACK_SHININESS)
644 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
645 break;
646 case GL_COLOR_INDEXES:
647 if (ctx->API != API_OPENGL_COMPAT) {
648 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
649 return;
650 }
651 if (updateMats & MAT_BIT_FRONT_INDEXES)
652 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
653 if (updateMats & MAT_BIT_BACK_INDEXES)
654 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
655 break;
656 case GL_AMBIENT_AND_DIFFUSE:
657 if (updateMats & MAT_BIT_FRONT_AMBIENT)
658 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
659 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
660 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
661 if (updateMats & MAT_BIT_BACK_AMBIENT)
662 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
663 if (updateMats & MAT_BIT_BACK_DIFFUSE)
664 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
665 break;
666 default:
667 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
668 return;
669 }
670 }
671
672
673 /**
674 * Flush (draw) vertices.
675 *
676 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
677 */
678 static void
vbo_exec_FlushVertices_internal(struct vbo_exec_context * exec,unsigned flags)679 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
680 {
681 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
682
683 if (flags & FLUSH_STORED_VERTICES) {
684 if (exec->vtx.vert_count) {
685 vbo_exec_vtx_flush(exec);
686 }
687
688 if (exec->vtx.vertex_size) {
689 vbo_exec_copy_to_current(exec);
690 vbo_reset_all_attr(exec);
691 }
692
693 /* All done. */
694 ctx->Driver.NeedFlush = 0;
695 } else {
696 assert(flags == FLUSH_UPDATE_CURRENT);
697
698 /* Note that the vertex size is unchanged.
699 * (vbo_reset_all_attr isn't called)
700 */
701 vbo_exec_copy_to_current(exec);
702
703 /* Only FLUSH_UPDATE_CURRENT is done. */
704 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
705 }
706 }
707
708
709 void GLAPIENTRY
_mesa_EvalCoord1f(GLfloat u)710 _mesa_EvalCoord1f(GLfloat u)
711 {
712 GET_CURRENT_CONTEXT(ctx);
713 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
714
715 {
716 GLint i;
717 if (exec->eval.recalculate_maps)
718 vbo_exec_eval_update(exec);
719
720 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
721 if (exec->eval.map1[i].map)
722 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
723 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
724 }
725 }
726
727 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
728 exec->vtx.vertex_size * sizeof(GLfloat));
729
730 vbo_exec_do_EvalCoord1f(exec, u);
731
732 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
733 exec->vtx.vertex_size * sizeof(GLfloat));
734 }
735
736
737 void GLAPIENTRY
_mesa_EvalCoord2f(GLfloat u,GLfloat v)738 _mesa_EvalCoord2f(GLfloat u, GLfloat v)
739 {
740 GET_CURRENT_CONTEXT(ctx);
741 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
742
743 {
744 GLint i;
745 if (exec->eval.recalculate_maps)
746 vbo_exec_eval_update(exec);
747
748 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
749 if (exec->eval.map2[i].map)
750 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
751 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
752 }
753
754 if (ctx->Eval.AutoNormal)
755 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
756 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
757 }
758
759 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
760 exec->vtx.vertex_size * sizeof(GLfloat));
761
762 vbo_exec_do_EvalCoord2f(exec, u, v);
763
764 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
765 exec->vtx.vertex_size * sizeof(GLfloat));
766 }
767
768
769 void GLAPIENTRY
_mesa_EvalCoord1fv(const GLfloat * u)770 _mesa_EvalCoord1fv(const GLfloat *u)
771 {
772 _mesa_EvalCoord1f(u[0]);
773 }
774
775
776 void GLAPIENTRY
_mesa_EvalCoord2fv(const GLfloat * u)777 _mesa_EvalCoord2fv(const GLfloat *u)
778 {
779 _mesa_EvalCoord2f(u[0], u[1]);
780 }
781
782
783 void GLAPIENTRY
_mesa_EvalPoint1(GLint i)784 _mesa_EvalPoint1(GLint i)
785 {
786 GET_CURRENT_CONTEXT(ctx);
787 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
788 (GLfloat) ctx->Eval.MapGrid1un);
789 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
790
791 _mesa_EvalCoord1f(u);
792 }
793
794
795 void GLAPIENTRY
_mesa_EvalPoint2(GLint i,GLint j)796 _mesa_EvalPoint2(GLint i, GLint j)
797 {
798 GET_CURRENT_CONTEXT(ctx);
799 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
800 (GLfloat) ctx->Eval.MapGrid2un);
801 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
802 (GLfloat) ctx->Eval.MapGrid2vn);
803 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
804 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
805
806 _mesa_EvalCoord2f(u, v);
807 }
808
809
810 /**
811 * Called via glBegin.
812 */
813 void GLAPIENTRY
_mesa_Begin(GLenum mode)814 _mesa_Begin(GLenum mode)
815 {
816 GET_CURRENT_CONTEXT(ctx);
817 struct vbo_context *vbo = vbo_context(ctx);
818 struct vbo_exec_context *exec = &vbo->exec;
819 int i;
820
821 if (_mesa_inside_begin_end(ctx)) {
822 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
823 return;
824 }
825
826 if (ctx->NewState)
827 _mesa_update_state(ctx);
828
829 GLenum error = _mesa_valid_prim_mode(ctx, mode);
830 if (error != GL_NO_ERROR) {
831 _mesa_error(ctx, error, "glBegin");
832 return;
833 }
834
835 /* Heuristic: attempt to isolate attributes occurring outside
836 * begin/end pairs.
837 *
838 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
839 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
840 */
841 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
842 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
843
844 i = exec->vtx.prim_count++;
845 exec->vtx.mode[i] = mode;
846 exec->vtx.draw[i].start = exec->vtx.vert_count;
847 exec->vtx.markers[i].begin = 1;
848
849 ctx->Driver.CurrentExecPrimitive = mode;
850
851 ctx->Exec = _mesa_hw_select_enabled(ctx) ?
852 ctx->HWSelectModeBeginEnd : ctx->BeginEnd;
853
854 /* We may have been called from a display list, in which case we should
855 * leave dlist.c's dispatch table in place.
856 */
857 if (ctx->GLThread.enabled) {
858 if (ctx->CurrentServerDispatch == ctx->OutsideBeginEnd)
859 ctx->CurrentServerDispatch = ctx->Exec;
860 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
861 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch = ctx->Exec;
862 _glapi_set_dispatch(ctx->CurrentClientDispatch);
863 } else {
864 assert(ctx->CurrentClientDispatch == ctx->Save);
865 }
866 }
867
868
869 /**
870 * Try to merge / concatenate the two most recent VBO primitives.
871 */
872 static void
try_vbo_merge(struct vbo_exec_context * exec)873 try_vbo_merge(struct vbo_exec_context *exec)
874 {
875 unsigned cur = exec->vtx.prim_count - 1;
876
877 assert(exec->vtx.prim_count >= 1);
878
879 vbo_try_prim_conversion(&exec->vtx.mode[cur], &exec->vtx.draw[cur].count);
880
881 if (exec->vtx.prim_count >= 2) {
882 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
883 unsigned prev = cur - 1;
884
885 if (vbo_merge_draws(ctx, false,
886 exec->vtx.mode[prev],
887 exec->vtx.mode[cur],
888 exec->vtx.draw[prev].start,
889 exec->vtx.draw[cur].start,
890 &exec->vtx.draw[prev].count,
891 exec->vtx.draw[cur].count,
892 0, 0,
893 &exec->vtx.markers[prev].end,
894 exec->vtx.markers[cur].begin,
895 exec->vtx.markers[cur].end))
896 exec->vtx.prim_count--; /* drop the last primitive */
897 }
898 }
899
900
901 /**
902 * Called via glEnd.
903 */
904 void GLAPIENTRY
_mesa_End(void)905 _mesa_End(void)
906 {
907 GET_CURRENT_CONTEXT(ctx);
908 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
909
910 if (!_mesa_inside_begin_end(ctx)) {
911 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
912 return;
913 }
914
915 ctx->Exec = ctx->OutsideBeginEnd;
916
917 if (ctx->GLThread.enabled) {
918 if (ctx->CurrentServerDispatch == ctx->BeginEnd ||
919 ctx->CurrentServerDispatch == ctx->HWSelectModeBeginEnd) {
920 ctx->CurrentServerDispatch = ctx->Exec;
921 }
922 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd ||
923 ctx->CurrentClientDispatch == ctx->HWSelectModeBeginEnd) {
924 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch = ctx->Exec;
925 _glapi_set_dispatch(ctx->CurrentClientDispatch);
926 }
927
928 if (exec->vtx.prim_count > 0) {
929 /* close off current primitive */
930 unsigned last = exec->vtx.prim_count - 1;
931 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
932 unsigned count = exec->vtx.vert_count - last_draw->start;
933
934 last_draw->count = count;
935 exec->vtx.markers[last].end = 1;
936
937 if (count) {
938 /* mark result buffer used */
939 if (_mesa_hw_select_enabled(ctx))
940 ctx->Select.ResultUsed = GL_TRUE;
941
942 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
943 }
944
945 /* Special handling for GL_LINE_LOOP */
946 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
947 exec->vtx.markers[last].begin == 0) {
948 /* We're finishing drawing a line loop. Append 0th vertex onto
949 * end of vertex buffer so we can draw it as a line strip.
950 */
951 const fi_type *src = exec->vtx.buffer_map +
952 last_draw->start * exec->vtx.vertex_size;
953 fi_type *dst = exec->vtx.buffer_map +
954 exec->vtx.vert_count * exec->vtx.vertex_size;
955
956 /* copy 0th vertex to end of buffer */
957 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
958
959 last_draw->start++; /* skip vertex0 */
960 /* note that the count stays unchanged */
961 exec->vtx.mode[last] = GL_LINE_STRIP;
962
963 /* Increment the vertex count so the next primitive doesn't
964 * overwrite the last vertex which we just added.
965 */
966 exec->vtx.vert_count++;
967 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
968 }
969
970 try_vbo_merge(exec);
971 }
972
973 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
974
975 if (exec->vtx.prim_count == VBO_MAX_PRIM)
976 vbo_exec_vtx_flush(exec);
977
978 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
979 _mesa_flush(ctx);
980 }
981 }
982
983
984 /**
985 * Called via glPrimitiveRestartNV()
986 */
987 void GLAPIENTRY
_mesa_PrimitiveRestartNV(void)988 _mesa_PrimitiveRestartNV(void)
989 {
990 GLenum curPrim;
991 GET_CURRENT_CONTEXT(ctx);
992
993 curPrim = ctx->Driver.CurrentExecPrimitive;
994
995 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
996 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
997 }
998 else {
999 _mesa_End();
1000 _mesa_Begin(curPrim);
1001 }
1002 }
1003
1004
1005 /**
1006 * A special version of glVertexAttrib4f that does not treat index 0 as
1007 * VBO_ATTRIB_POS.
1008 */
1009 static void
VertexAttrib4f_nopos(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1010 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1011 {
1012 GET_CURRENT_CONTEXT(ctx);
1013 if (index < ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs)
1014 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1015 else
1016 ERROR(GL_INVALID_VALUE);
1017 }
1018
1019 static void GLAPIENTRY
_es_VertexAttrib4fARB(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1020 _es_VertexAttrib4fARB(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1021 {
1022 VertexAttrib4f_nopos(index, x, y, z, w);
1023 }
1024
1025
1026 static void GLAPIENTRY
_es_VertexAttrib1fARB(GLuint indx,GLfloat x)1027 _es_VertexAttrib1fARB(GLuint indx, GLfloat x)
1028 {
1029 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1030 }
1031
1032
1033 static void GLAPIENTRY
_es_VertexAttrib1fvARB(GLuint indx,const GLfloat * values)1034 _es_VertexAttrib1fvARB(GLuint indx, const GLfloat* values)
1035 {
1036 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1037 }
1038
1039
1040 static void GLAPIENTRY
_es_VertexAttrib2fARB(GLuint indx,GLfloat x,GLfloat y)1041 _es_VertexAttrib2fARB(GLuint indx, GLfloat x, GLfloat y)
1042 {
1043 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1044 }
1045
1046
1047 static void GLAPIENTRY
_es_VertexAttrib2fvARB(GLuint indx,const GLfloat * values)1048 _es_VertexAttrib2fvARB(GLuint indx, const GLfloat* values)
1049 {
1050 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1051 }
1052
1053
1054 static void GLAPIENTRY
_es_VertexAttrib3fARB(GLuint indx,GLfloat x,GLfloat y,GLfloat z)1055 _es_VertexAttrib3fARB(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1056 {
1057 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1058 }
1059
1060
1061 static void GLAPIENTRY
_es_VertexAttrib3fvARB(GLuint indx,const GLfloat * values)1062 _es_VertexAttrib3fvARB(GLuint indx, const GLfloat* values)
1063 {
1064 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1065 }
1066
1067
1068 static void GLAPIENTRY
_es_VertexAttrib4fvARB(GLuint indx,const GLfloat * values)1069 _es_VertexAttrib4fvARB(GLuint indx, const GLfloat* values)
1070 {
1071 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1072 }
1073
1074
1075 void
vbo_install_exec_vtxfmt(struct gl_context * ctx)1076 vbo_install_exec_vtxfmt(struct gl_context *ctx)
1077 {
1078 #define NAME_AE(x) _mesa_##x
1079 #define NAME_CALLLIST(x) _mesa_##x
1080 #define NAME(x) _mesa_##x
1081 #define NAME_ES(x) _es_##x
1082
1083 struct _glapi_table *tab = ctx->Exec;
1084 #include "api_vtxfmt_init.h"
1085
1086 if (ctx->BeginEnd) {
1087 tab = ctx->BeginEnd;
1088 #include "api_vtxfmt_init.h"
1089 }
1090 }
1091
1092
1093 static void
vbo_reset_all_attr(struct vbo_exec_context * exec)1094 vbo_reset_all_attr(struct vbo_exec_context *exec)
1095 {
1096 while (exec->vtx.enabled) {
1097 const int i = u_bit_scan64(&exec->vtx.enabled);
1098
1099 /* Reset the vertex attribute by setting its size to zero. */
1100 exec->vtx.attr[i].size = 0;
1101 exec->vtx.attr[i].type = GL_FLOAT;
1102 exec->vtx.attr[i].active_size = 0;
1103 exec->vtx.attrptr[i] = NULL;
1104 }
1105
1106 exec->vtx.vertex_size = 0;
1107 }
1108
1109
1110 void
vbo_exec_vtx_init(struct vbo_exec_context * exec)1111 vbo_exec_vtx_init(struct vbo_exec_context *exec)
1112 {
1113 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1114
1115 exec->vtx.bufferobj = _mesa_bufferobj_alloc(ctx, IMM_BUFFER_NAME);
1116
1117 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1118 vbo_reset_all_attr(exec);
1119
1120 exec->vtx.info.instance_count = 1;
1121 exec->vtx.info.max_index = ~0;
1122 }
1123
1124
1125 void
vbo_exec_vtx_destroy(struct vbo_exec_context * exec)1126 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1127 {
1128 /* using a real VBO for vertex data */
1129 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1130
1131 /* True VBOs should already be unmapped
1132 */
1133 if (exec->vtx.buffer_map) {
1134 assert(!exec->vtx.bufferobj ||
1135 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1136 if (!exec->vtx.bufferobj) {
1137 align_free(exec->vtx.buffer_map);
1138 exec->vtx.buffer_map = NULL;
1139 exec->vtx.buffer_ptr = NULL;
1140 }
1141 }
1142
1143 /* Free the vertex buffer. Unmap first if needed.
1144 */
1145 if (exec->vtx.bufferobj &&
1146 _mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1147 _mesa_bufferobj_unmap(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1148 }
1149 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1150 }
1151
1152
1153 /**
1154 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1155 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1156 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1157 * __struct gl_contextRec::Current and gl_light_attrib::Material
1158 *
1159 * Note that the default T&L engine never clears the
1160 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1161 *
1162 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1163 */
1164 void
vbo_exec_FlushVertices(struct gl_context * ctx,GLuint flags)1165 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1166 {
1167 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1168
1169 #ifndef NDEBUG
1170 /* debug check: make sure we don't get called recursively */
1171 exec->flush_call_depth++;
1172 assert(exec->flush_call_depth == 1);
1173 #endif
1174
1175 if (_mesa_inside_begin_end(ctx)) {
1176 /* We've had glBegin but not glEnd! */
1177 #ifndef NDEBUG
1178 exec->flush_call_depth--;
1179 assert(exec->flush_call_depth == 0);
1180 #endif
1181 return;
1182 }
1183
1184 /* Flush (draw). */
1185 vbo_exec_FlushVertices_internal(exec, flags);
1186
1187 #ifndef NDEBUG
1188 exec->flush_call_depth--;
1189 assert(exec->flush_call_depth == 0);
1190 #endif
1191 }
1192
1193
1194 void GLAPIENTRY
_es_Color4f(GLfloat r,GLfloat g,GLfloat b,GLfloat a)1195 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1196 {
1197 _mesa_Color4f(r, g, b, a);
1198 }
1199
1200
1201 void GLAPIENTRY
_es_Normal3f(GLfloat x,GLfloat y,GLfloat z)1202 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1203 {
1204 _mesa_Normal3f(x, y, z);
1205 }
1206
1207
1208 void GLAPIENTRY
_es_MultiTexCoord4f(GLenum target,GLfloat s,GLfloat t,GLfloat r,GLfloat q)1209 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1210 {
1211 _mesa_MultiTexCoord4fARB(target, s, t, r, q);
1212 }
1213
1214
1215 void GLAPIENTRY
_es_Materialfv(GLenum face,GLenum pname,const GLfloat * params)1216 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1217 {
1218 _mesa_Materialfv(face, pname, params);
1219 }
1220
1221
1222 void GLAPIENTRY
_es_Materialf(GLenum face,GLenum pname,GLfloat param)1223 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1224 {
1225 GLfloat p[4];
1226 p[0] = param;
1227 p[1] = p[2] = p[3] = 0.0F;
1228 _mesa_Materialfv(face, pname, p);
1229 }
1230
1231 #undef TAG
1232 #undef SUPPRESS_STATIC
1233 #define TAG(x) _hw_select_##x
1234 /* filter out none vertex api */
1235 #define HW_SELECT_MODE
1236
1237 #undef ATTR_UNION
1238 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
1239 do { \
1240 if ((A) == 0) { \
1241 ATTR_UNION_BASE(VBO_ATTRIB_SELECT_RESULT_OFFSET, 1, GL_UNSIGNED_INT, uint32_t, \
1242 ctx->Select.ResultOffset, 0, 0, 0); \
1243 } \
1244 ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3); \
1245 } while (0)
1246
1247 #include "vbo_attrib_tmp.h"
1248
1249 void
vbo_install_hw_select_begin_end(struct gl_context * ctx)1250 vbo_install_hw_select_begin_end(struct gl_context *ctx)
1251 {
1252 int numEntries = MAX2(_gloffset_COUNT, _glapi_get_dispatch_table_size());
1253 memcpy(ctx->HWSelectModeBeginEnd, ctx->BeginEnd, numEntries * sizeof(_glapi_proc));
1254
1255 #undef NAME
1256 #define NAME(x) _hw_select_##x
1257 struct _glapi_table *tab = ctx->HWSelectModeBeginEnd;
1258 #include "api_hw_select_init.h"
1259 }
1260