1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "util/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/dlist.h"
38 #include "main/eval.h"
39 #include "main/state.h"
40 #include "main/light.h"
41 #include "main/api_arrayelt.h"
42 #include "main/draw_validate.h"
43 #include "main/dispatch.h"
44 #include "util/bitscan.h"
45 #include "util/u_memory.h"
46 #include "api_exec_decl.h"
47
48 #include "vbo_private.h"
49
50 /** ID/name for immediate-mode VBO */
51 #define IMM_BUFFER_NAME 0xaabbccdd
52
53
54 static void
55 vbo_reset_all_attr(struct vbo_exec_context *exec);
56
57
58 /**
59 * Close off the last primitive, execute the buffer, restart the
60 * primitive. This is called when we fill a vertex buffer before
61 * hitting glEnd.
62 */
63 static void
vbo_exec_wrap_buffers(struct vbo_exec_context * exec)64 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
65 {
66 if (exec->vtx.prim_count == 0) {
67 exec->vtx.copied.nr = 0;
68 exec->vtx.vert_count = 0;
69 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
70 }
71 else {
72 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
73 unsigned last = exec->vtx.prim_count - 1;
74 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
75 const bool last_begin = exec->vtx.markers[last].begin;
76 GLuint last_count = 0;
77
78 if (_mesa_inside_begin_end(ctx)) {
79 last_draw->count = exec->vtx.vert_count - last_draw->start;
80 last_count = last_draw->count;
81 exec->vtx.markers[last].end = 0;
82 }
83
84 /* Special handling for wrapping GL_LINE_LOOP */
85 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
86 last_count > 0 &&
87 !exec->vtx.markers[last].end) {
88 /* draw this section of the incomplete line loop as a line strip */
89 exec->vtx.mode[last] = GL_LINE_STRIP;
90 if (!last_begin) {
91 /* This is not the first section of the line loop, so don't
92 * draw the 0th vertex. We're saving it until we draw the
93 * very last section of the loop.
94 */
95 last_draw->start++;
96 last_draw->count--;
97 }
98 }
99
100 /* Execute the buffer and save copied vertices.
101 */
102 if (exec->vtx.vert_count)
103 vbo_exec_vtx_flush(exec);
104 else {
105 exec->vtx.prim_count = 0;
106 exec->vtx.copied.nr = 0;
107 }
108
109 /* Emit a glBegin to start the new list.
110 */
111 assert(exec->vtx.prim_count == 0);
112
113 if (_mesa_inside_begin_end(ctx)) {
114 exec->vtx.mode[0] = ctx->Driver.CurrentExecPrimitive;
115 exec->vtx.draw[0].start = 0;
116 exec->vtx.markers[0].begin = 0;
117 exec->vtx.prim_count++;
118
119 if (exec->vtx.copied.nr == last_count)
120 exec->vtx.markers[0].begin = last_begin;
121 }
122 }
123 }
124
125
126 /**
127 * Deal with buffer wrapping where provoked by the vertex buffer
128 * filling up, as opposed to upgrade_vertex().
129 */
130 static void
vbo_exec_vtx_wrap(struct vbo_exec_context * exec)131 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
132 {
133 unsigned numComponents;
134
135 /* Run pipeline on current vertices, copy wrapped vertices
136 * to exec->vtx.copied.
137 */
138 vbo_exec_wrap_buffers(exec);
139
140 if (!exec->vtx.buffer_ptr) {
141 /* probably ran out of memory earlier when allocating the VBO */
142 return;
143 }
144
145 /* Copy stored stored vertices to start of new list.
146 */
147 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
148
149 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
150 memcpy(exec->vtx.buffer_ptr,
151 exec->vtx.copied.buffer,
152 numComponents * sizeof(fi_type));
153 exec->vtx.buffer_ptr += numComponents;
154 exec->vtx.vert_count += exec->vtx.copied.nr;
155
156 exec->vtx.copied.nr = 0;
157 }
158
159
160 /**
161 * Copy the active vertex's values to the ctx->Current fields.
162 */
163 static void
vbo_exec_copy_to_current(struct vbo_exec_context * exec)164 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
165 {
166 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
167 struct vbo_context *vbo = vbo_context(ctx);
168 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
169 bool color0_changed = false;
170
171 while (enabled) {
172 const int i = u_bit_scan64(&enabled);
173
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
176 */
177 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
178 fi_type tmp[8]; /* space for doubles */
179 int dmul_shift = 0;
180
181 assert(exec->vtx.attr[i].size);
182
183 /* VBO_ATTRIB_SELECT_RESULT_INDEX has no current */
184 if (!current)
185 continue;
186
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
189 memset(tmp, 0, sizeof(tmp));
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
191 dmul_shift = 1;
192 } else {
193 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
194 exec->vtx.attr[i].size,
195 exec->vtx.attrptr[i],
196 exec->vtx.attr[i].type);
197 }
198
199 if (memcmp(current, tmp, 4 * sizeof(GLfloat) << dmul_shift) != 0) {
200 memcpy(current, tmp, 4 * sizeof(GLfloat) << dmul_shift);
201
202 if (i == VBO_ATTRIB_COLOR0)
203 color0_changed = true;
204
205 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT) {
206 ctx->NewState |= _NEW_MATERIAL;
207 ctx->PopAttribState |= GL_LIGHTING_BIT;
208
209 /* The fixed-func vertex program uses this. */
210 if (i == VBO_ATTRIB_MAT_FRONT_SHININESS ||
211 i == VBO_ATTRIB_MAT_BACK_SHININESS)
212 ctx->NewState |= _NEW_FF_VERT_PROGRAM;
213 } else {
214 if (i == VBO_ATTRIB_EDGEFLAG)
215 _mesa_update_edgeflag_state_vao(ctx);
216
217 ctx->NewState |= _NEW_CURRENT_ATTRIB;
218 ctx->PopAttribState |= GL_CURRENT_BIT;
219 }
220 }
221
222 /* Given that we explicitly state size here, there is no need
223 * for the COPY_CLEAN above, could just copy 16 bytes and be
224 * done. The only problem is when Mesa accesses ctx->Current
225 * directly.
226 */
227 /* Size here is in components - not bytes */
228 if (exec->vtx.attr[i].type != vbo->current[i].Format.User.Type ||
229 (exec->vtx.attr[i].size >> dmul_shift) != vbo->current[i].Format.User.Size) {
230 vbo_set_vertex_format(&vbo->current[i].Format,
231 exec->vtx.attr[i].size >> dmul_shift,
232 exec->vtx.attr[i].type);
233 /* The format changed. We need to update gallium vertex elements.
234 * Material attributes don't need this because they don't have formats.
235 */
236 if (i <= VBO_ATTRIB_EDGEFLAG)
237 ctx->NewState |= _NEW_CURRENT_ATTRIB;
238 }
239 }
240
241 if (color0_changed && ctx->Light.ColorMaterialEnabled) {
242 _mesa_update_color_material(ctx,
243 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
244 }
245 }
246
247
248 /**
249 * Flush existing data, set new attrib size, replay copied vertices.
250 * This is called when we transition from a small vertex attribute size
251 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
252 * We need to go back over the previous 2-component texcoords and insert
253 * zero and one values.
254 * \param attr VBO_ATTRIB_x vertex attribute value
255 */
256 static void
vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context * exec,GLuint attr,GLuint newSize,GLenum newType)257 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
258 GLuint attr, GLuint newSize, GLenum newType)
259 {
260 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
261 struct vbo_context *vbo = vbo_context(ctx);
262 const GLint lastcount = exec->vtx.vert_count;
263 fi_type *old_attrptr[VBO_ATTRIB_MAX];
264 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
265 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
266 const GLuint oldSize = exec->vtx.attr[attr].size;
267 GLuint i;
268
269 assert(attr < VBO_ATTRIB_MAX);
270
271 if (unlikely(!exec->vtx.buffer_ptr)) {
272 /* We should only hit this when use_buffer_objects=true */
273 assert(exec->vtx.bufferobj);
274 vbo_exec_vtx_map(exec);
275 assert(exec->vtx.buffer_ptr);
276 }
277
278 /* Run pipeline on current vertices, copy wrapped vertices
279 * to exec->vtx.copied.
280 */
281 vbo_exec_wrap_buffers(exec);
282
283 if (unlikely(exec->vtx.copied.nr)) {
284 /* We're in the middle of a primitive, keep the old vertex
285 * format around to be able to translate the copied vertices to
286 * the new format.
287 */
288 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
289 }
290
291 /* Heuristic: Attempt to isolate attributes received outside
292 * begin/end so that they don't bloat the vertices.
293 */
294 if (!_mesa_inside_begin_end(ctx) &&
295 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
296 vbo_exec_copy_to_current(exec);
297 vbo_reset_all_attr(exec);
298 }
299
300 /* Fix up sizes:
301 */
302 exec->vtx.attr[attr].size = newSize;
303 exec->vtx.attr[attr].active_size = newSize;
304 exec->vtx.attr[attr].type = newType;
305 exec->vtx.vertex_size += newSize - oldSize;
306 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
307 exec->vtx.max_vert = vbo_compute_max_verts(exec);
308 exec->vtx.vert_count = 0;
309 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
310 exec->vtx.enabled |= BITFIELD64_BIT(attr);
311
312 if (attr != 0) {
313 if (unlikely(oldSize)) {
314 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
315
316 /* If there are attribs after the resized attrib... */
317 if (offset + oldSize < old_vtx_size_no_pos) {
318 int size_diff = newSize - oldSize;
319 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
320 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
321 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
322 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
323
324 if (size_diff < 0) {
325 /* Decreasing the size: Copy from first to last to move
326 * elements to the left.
327 */
328 fi_type *old_end = old_last + 1;
329 fi_type *old = old_first;
330 fi_type *new = new_first;
331
332 do {
333 *new++ = *old++;
334 } while (old != old_end);
335 } else {
336 /* Increasing the size: Copy from last to first to move
337 * elements to the right.
338 */
339 fi_type *old_end = old_first - 1;
340 fi_type *old = old_last;
341 fi_type *new = new_last;
342
343 do {
344 *new-- = *old--;
345 } while (old != old_end);
346 }
347
348 /* Update pointers to attribs, because we moved them. */
349 GLbitfield64 enabled = exec->vtx.enabled &
350 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
351 ~BITFIELD64_BIT(attr);
352 while (enabled) {
353 unsigned i = u_bit_scan64(&enabled);
354
355 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
356 exec->vtx.attrptr[i] += size_diff;
357 }
358 }
359 } else {
360 /* Just have to append the new attribute at the end */
361 exec->vtx.attrptr[attr] = exec->vtx.vertex +
362 exec->vtx.vertex_size_no_pos - newSize;
363 }
364 }
365
366 /* The position is always last. */
367 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
368
369 /* Replay stored vertices to translate them
370 * to new format here.
371 *
372 * -- No need to replay - just copy piecewise
373 */
374 if (unlikely(exec->vtx.copied.nr)) {
375 fi_type *data = exec->vtx.copied.buffer;
376 fi_type *dest = exec->vtx.buffer_ptr;
377
378 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
379
380 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
381 GLbitfield64 enabled = exec->vtx.enabled;
382 while (enabled) {
383 const int j = u_bit_scan64(&enabled);
384 GLuint sz = exec->vtx.attr[j].size;
385 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
386 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
387
388 assert(sz);
389
390 if (j == attr) {
391 if (oldSize) {
392 fi_type tmp[4];
393 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
394 data + old_offset,
395 exec->vtx.attr[j].type);
396 COPY_SZ_4V(dest + new_offset, newSize, tmp);
397 } else {
398 fi_type *current = (fi_type *)vbo->current[j].Ptr;
399 COPY_SZ_4V(dest + new_offset, sz, current);
400 }
401 }
402 else {
403 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
404 }
405 }
406
407 data += old_vtx_size;
408 dest += exec->vtx.vertex_size;
409 }
410
411 exec->vtx.buffer_ptr = dest;
412 exec->vtx.vert_count += exec->vtx.copied.nr;
413 exec->vtx.copied.nr = 0;
414 }
415 }
416
417
418 /**
419 * This is when a vertex attribute transitions to a different size.
420 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
421 * glTexCoord4f() call. We promote the array from size=2 to size=4.
422 * \param newSize size of new vertex (number of 32-bit words).
423 * \param attr VBO_ATTRIB_x vertex attribute value
424 */
425 static void
vbo_exec_fixup_vertex(struct gl_context * ctx,GLuint attr,GLuint newSize,GLenum newType)426 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
427 GLuint newSize, GLenum newType)
428 {
429 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
430
431 assert(attr < VBO_ATTRIB_MAX);
432
433 if (newSize > exec->vtx.attr[attr].size ||
434 newType != exec->vtx.attr[attr].type) {
435 /* New size is larger. Need to flush existing vertices and get
436 * an enlarged vertex format.
437 */
438 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize, newType);
439 }
440 else if (newSize < exec->vtx.attr[attr].active_size) {
441 GLuint i;
442 const fi_type *id =
443 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
444
445 /* New size is smaller - just need to fill in some
446 * zeros. Don't need to flush or wrap.
447 */
448 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
449 exec->vtx.attrptr[attr][i-1] = id[i-1];
450
451 exec->vtx.attr[attr].active_size = newSize;
452 }
453 }
454
455
456 /**
457 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
458 * It depends on a few things, including whether we're inside or outside
459 * of glBegin/glEnd.
460 */
461 static inline bool
is_vertex_position(const struct gl_context * ctx,GLuint index)462 is_vertex_position(const struct gl_context *ctx, GLuint index)
463 {
464 return (index == 0 &&
465 _mesa_attr_zero_aliases_vertex(ctx) &&
466 _mesa_inside_begin_end(ctx));
467 }
468
469 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
470 #if UTIL_ARCH_LITTLE_ENDIAN
471 #define SET_64BIT(dst32, u64) do { \
472 *(dst32)++ = (u64); \
473 *(dst32)++ = (uint64_t)(u64) >> 32; \
474 } while (0)
475 #else
476 #define SET_64BIT(dst32, u64) do { \
477 *(dst32)++ = (uint64_t)(u64) >> 32; \
478 *(dst32)++ = (u64); \
479 } while (0)
480 #endif
481
482
483 /**
484 * This macro is used to implement all the glVertex, glColor, glTexCoord,
485 * glVertexAttrib, etc functions.
486 * \param A VBO_ATTRIB_x attribute index
487 * \param N attribute size (1..4)
488 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
489 * \param C cast type (uint32_t or uint64_t)
490 * \param V0, V1, v2, V3 attribute value
491 */
492 #define ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3) \
493 do { \
494 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
495 int sz = (sizeof(C) / sizeof(GLfloat)); \
496 \
497 assert(sz == 1 || sz == 2); \
498 /* store a copy of the attribute in exec except for glVertex */ \
499 if ((A) != 0) { \
500 /* Check if attribute size or type is changing. */ \
501 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
502 exec->vtx.attr[A].type != T)) { \
503 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
504 } \
505 \
506 C *dest = (C *)exec->vtx.attrptr[A]; \
507 if (N>0) dest[0] = V0; \
508 if (N>1) dest[1] = V1; \
509 if (N>2) dest[2] = V2; \
510 if (N>3) dest[3] = V3; \
511 assert(exec->vtx.attr[A].type == T); \
512 \
513 /* we now have accumulated a per-vertex attribute */ \
514 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
515 } else { \
516 /* This is a glVertex call */ \
517 int size = exec->vtx.attr[0].size; \
518 \
519 /* Check if attribute size or type is changing. */ \
520 if (unlikely(size < N * sz || \
521 exec->vtx.attr[0].type != T)) { \
522 vbo_exec_wrap_upgrade_vertex(exec, 0, N * sz, T); \
523 } \
524 \
525 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
526 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
527 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
528 \
529 /* Copy over attributes from exec. */ \
530 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
531 *dst++ = *src++; \
532 \
533 /* Store the position, which is always last and can have 32 or */ \
534 /* 64 bits per channel. */ \
535 if (sizeof(C) == 4) { \
536 if (N > 0) *dst++ = V0; \
537 if (N > 1) *dst++ = V1; \
538 if (N > 2) *dst++ = V2; \
539 if (N > 3) *dst++ = V3; \
540 \
541 if (unlikely(N < size)) { \
542 if (N < 2 && size >= 2) *dst++ = V1; \
543 if (N < 3 && size >= 3) *dst++ = V2; \
544 if (N < 4 && size >= 4) *dst++ = V3; \
545 } \
546 } else { \
547 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
548 /* separately */ \
549 if (N > 0) SET_64BIT(dst, V0); \
550 if (N > 1) SET_64BIT(dst, V1); \
551 if (N > 2) SET_64BIT(dst, V2); \
552 if (N > 3) SET_64BIT(dst, V3); \
553 \
554 if (unlikely(N * 2 < size)) { \
555 if (N < 2 && size >= 4) SET_64BIT(dst, V1); \
556 if (N < 3 && size >= 6) SET_64BIT(dst, V2); \
557 if (N < 4 && size >= 8) SET_64BIT(dst, V3); \
558 } \
559 } \
560 \
561 /* dst now points at the beginning of the next vertex */ \
562 exec->vtx.buffer_ptr = (fi_type*)dst; \
563 \
564 /* Don't set FLUSH_UPDATE_CURRENT because */ \
565 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
566 \
567 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
568 vbo_exec_vtx_wrap(exec); \
569 } \
570 } while (0)
571
572 #undef ERROR
573 #define ERROR(err) _mesa_error(ctx, err, __func__)
574 #define TAG(x) _mesa_##x
575 #define SUPPRESS_STATIC
576
577 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
578 ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3)
579
580 #include "vbo_attrib_tmp.h"
581
582
583 /**
584 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
585 * this may be a (partial) no-op.
586 */
587 void GLAPIENTRY
_mesa_Materialfv(GLenum face,GLenum pname,const GLfloat * params)588 _mesa_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
589 {
590 GLbitfield updateMats;
591 GET_CURRENT_CONTEXT(ctx);
592
593 /* This function should be a no-op when it tries to update material
594 * attributes which are currently tracking glColor via glColorMaterial.
595 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
596 * indicating which material attributes can actually be updated below.
597 */
598 if (ctx->Light.ColorMaterialEnabled) {
599 updateMats = ~ctx->Light._ColorMaterialBitmask;
600 }
601 else {
602 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
603 updateMats = ALL_MATERIAL_BITS;
604 }
605
606 if (_mesa_is_desktop_gl_compat(ctx) && face == GL_FRONT) {
607 updateMats &= FRONT_MATERIAL_BITS;
608 }
609 else if (_mesa_is_desktop_gl_compat(ctx) && face == GL_BACK) {
610 updateMats &= BACK_MATERIAL_BITS;
611 }
612 else if (face != GL_FRONT_AND_BACK) {
613 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
614 return;
615 }
616
617 switch (pname) {
618 case GL_EMISSION:
619 if (updateMats & MAT_BIT_FRONT_EMISSION)
620 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
621 if (updateMats & MAT_BIT_BACK_EMISSION)
622 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
623 break;
624 case GL_AMBIENT:
625 if (updateMats & MAT_BIT_FRONT_AMBIENT)
626 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
627 if (updateMats & MAT_BIT_BACK_AMBIENT)
628 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
629 break;
630 case GL_DIFFUSE:
631 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
632 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
633 if (updateMats & MAT_BIT_BACK_DIFFUSE)
634 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
635 break;
636 case GL_SPECULAR:
637 if (updateMats & MAT_BIT_FRONT_SPECULAR)
638 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
639 if (updateMats & MAT_BIT_BACK_SPECULAR)
640 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
641 break;
642 case GL_SHININESS:
643 if (*params < 0 || *params > ctx->Const.MaxShininess) {
644 _mesa_error(ctx, GL_INVALID_VALUE,
645 "glMaterial(invalid shininess: %f out range [0, %f])",
646 *params, ctx->Const.MaxShininess);
647 return;
648 }
649 if (updateMats & MAT_BIT_FRONT_SHININESS)
650 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
651 if (updateMats & MAT_BIT_BACK_SHININESS)
652 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
653 break;
654 case GL_COLOR_INDEXES:
655 if (ctx->API != API_OPENGL_COMPAT) {
656 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
657 return;
658 }
659 if (updateMats & MAT_BIT_FRONT_INDEXES)
660 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
661 if (updateMats & MAT_BIT_BACK_INDEXES)
662 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
663 break;
664 case GL_AMBIENT_AND_DIFFUSE:
665 if (updateMats & MAT_BIT_FRONT_AMBIENT)
666 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
667 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
668 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
669 if (updateMats & MAT_BIT_BACK_AMBIENT)
670 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
671 if (updateMats & MAT_BIT_BACK_DIFFUSE)
672 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
673 break;
674 default:
675 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
676 return;
677 }
678 }
679
680
681 /**
682 * Flush (draw) vertices.
683 *
684 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
685 */
686 static void
vbo_exec_FlushVertices_internal(struct vbo_exec_context * exec,unsigned flags)687 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
688 {
689 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
690
691 if (flags & FLUSH_STORED_VERTICES) {
692 if (exec->vtx.vert_count) {
693 vbo_exec_vtx_flush(exec);
694 }
695
696 if (exec->vtx.vertex_size) {
697 vbo_exec_copy_to_current(exec);
698 vbo_reset_all_attr(exec);
699 }
700
701 /* All done. */
702 ctx->Driver.NeedFlush = 0;
703 } else {
704 assert(flags == FLUSH_UPDATE_CURRENT);
705
706 /* Note that the vertex size is unchanged.
707 * (vbo_reset_all_attr isn't called)
708 */
709 vbo_exec_copy_to_current(exec);
710
711 /* Only FLUSH_UPDATE_CURRENT is done. */
712 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
713 }
714 }
715
716
717 void GLAPIENTRY
_mesa_EvalCoord1f(GLfloat u)718 _mesa_EvalCoord1f(GLfloat u)
719 {
720 GET_CURRENT_CONTEXT(ctx);
721 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
722
723 {
724 GLint i;
725 if (exec->eval.recalculate_maps)
726 vbo_exec_eval_update(exec);
727
728 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
729 if (exec->eval.map1[i].map)
730 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
731 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
732 }
733 }
734
735 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
736 exec->vtx.vertex_size * sizeof(GLfloat));
737
738 vbo_exec_do_EvalCoord1f(exec, u);
739
740 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
741 exec->vtx.vertex_size * sizeof(GLfloat));
742 }
743
744
745 void GLAPIENTRY
_mesa_EvalCoord2f(GLfloat u,GLfloat v)746 _mesa_EvalCoord2f(GLfloat u, GLfloat v)
747 {
748 GET_CURRENT_CONTEXT(ctx);
749 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
750
751 {
752 GLint i;
753 if (exec->eval.recalculate_maps)
754 vbo_exec_eval_update(exec);
755
756 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
757 if (exec->eval.map2[i].map)
758 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
759 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
760 }
761
762 if (ctx->Eval.AutoNormal)
763 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
764 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
765 }
766
767 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
768 exec->vtx.vertex_size * sizeof(GLfloat));
769
770 vbo_exec_do_EvalCoord2f(exec, u, v);
771
772 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
773 exec->vtx.vertex_size * sizeof(GLfloat));
774 }
775
776
777 void GLAPIENTRY
_mesa_EvalCoord1fv(const GLfloat * u)778 _mesa_EvalCoord1fv(const GLfloat *u)
779 {
780 _mesa_EvalCoord1f(u[0]);
781 }
782
783
784 void GLAPIENTRY
_mesa_EvalCoord2fv(const GLfloat * u)785 _mesa_EvalCoord2fv(const GLfloat *u)
786 {
787 _mesa_EvalCoord2f(u[0], u[1]);
788 }
789
790
791 void GLAPIENTRY
_mesa_EvalPoint1(GLint i)792 _mesa_EvalPoint1(GLint i)
793 {
794 GET_CURRENT_CONTEXT(ctx);
795 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
796 (GLfloat) ctx->Eval.MapGrid1un);
797 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
798
799 _mesa_EvalCoord1f(u);
800 }
801
802
803 void GLAPIENTRY
_mesa_EvalPoint2(GLint i,GLint j)804 _mesa_EvalPoint2(GLint i, GLint j)
805 {
806 GET_CURRENT_CONTEXT(ctx);
807 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
808 (GLfloat) ctx->Eval.MapGrid2un);
809 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
810 (GLfloat) ctx->Eval.MapGrid2vn);
811 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
812 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
813
814 _mesa_EvalCoord2f(u, v);
815 }
816
817
818 /**
819 * Called via glBegin.
820 */
821 void GLAPIENTRY
_mesa_Begin(GLenum mode)822 _mesa_Begin(GLenum mode)
823 {
824 GET_CURRENT_CONTEXT(ctx);
825 struct vbo_context *vbo = vbo_context(ctx);
826 struct vbo_exec_context *exec = &vbo->exec;
827 int i;
828
829 if (_mesa_inside_begin_end(ctx)) {
830 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
831 return;
832 }
833
834 if (ctx->NewState)
835 _mesa_update_state(ctx);
836
837 GLenum error = _mesa_valid_prim_mode(ctx, mode);
838 if (error != GL_NO_ERROR) {
839 _mesa_error(ctx, error, "glBegin");
840 return;
841 }
842
843 /* Heuristic: attempt to isolate attributes occurring outside
844 * begin/end pairs.
845 *
846 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
847 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
848 */
849 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
850 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
851
852 i = exec->vtx.prim_count++;
853 exec->vtx.mode[i] = mode;
854 exec->vtx.draw[i].start = exec->vtx.vert_count;
855 exec->vtx.markers[i].begin = 1;
856
857 ctx->Driver.CurrentExecPrimitive = mode;
858
859 ctx->Dispatch.Exec = _mesa_hw_select_enabled(ctx) ?
860 ctx->Dispatch.HWSelectModeBeginEnd : ctx->Dispatch.BeginEnd;
861
862 /* We may have been called from a display list, in which case we should
863 * leave dlist.c's dispatch table in place.
864 */
865 if (ctx->GLThread.enabled) {
866 if (ctx->Dispatch.Current == ctx->Dispatch.OutsideBeginEnd)
867 ctx->Dispatch.Current = ctx->Dispatch.Exec;
868 } else if (ctx->GLApi == ctx->Dispatch.OutsideBeginEnd) {
869 ctx->GLApi = ctx->Dispatch.Current = ctx->Dispatch.Exec;
870 _glapi_set_dispatch(ctx->GLApi);
871 } else {
872 assert(ctx->GLApi == ctx->Dispatch.Save);
873 }
874 }
875
876
877 /**
878 * Try to merge / concatenate the two most recent VBO primitives.
879 */
880 static void
try_vbo_merge(struct vbo_exec_context * exec)881 try_vbo_merge(struct vbo_exec_context *exec)
882 {
883 unsigned cur = exec->vtx.prim_count - 1;
884
885 assert(exec->vtx.prim_count >= 1);
886
887 vbo_try_prim_conversion(&exec->vtx.mode[cur], &exec->vtx.draw[cur].count);
888
889 if (exec->vtx.prim_count >= 2) {
890 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
891 unsigned prev = cur - 1;
892
893 if (vbo_merge_draws(ctx, false,
894 exec->vtx.mode[prev],
895 exec->vtx.mode[cur],
896 exec->vtx.draw[prev].start,
897 exec->vtx.draw[cur].start,
898 &exec->vtx.draw[prev].count,
899 exec->vtx.draw[cur].count,
900 0, 0,
901 &exec->vtx.markers[prev].end,
902 exec->vtx.markers[cur].begin,
903 exec->vtx.markers[cur].end))
904 exec->vtx.prim_count--; /* drop the last primitive */
905 }
906 }
907
908
909 /**
910 * Called via glEnd.
911 */
912 void GLAPIENTRY
_mesa_End(void)913 _mesa_End(void)
914 {
915 GET_CURRENT_CONTEXT(ctx);
916 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
917
918 if (!_mesa_inside_begin_end(ctx)) {
919 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
920 return;
921 }
922
923 ctx->Dispatch.Exec = ctx->Dispatch.OutsideBeginEnd;
924
925 if (ctx->GLThread.enabled) {
926 if (ctx->Dispatch.Current == ctx->Dispatch.BeginEnd ||
927 ctx->Dispatch.Current == ctx->Dispatch.HWSelectModeBeginEnd) {
928 ctx->Dispatch.Current = ctx->Dispatch.Exec;
929 }
930 } else if (ctx->GLApi == ctx->Dispatch.BeginEnd ||
931 ctx->GLApi == ctx->Dispatch.HWSelectModeBeginEnd) {
932 ctx->GLApi = ctx->Dispatch.Current = ctx->Dispatch.Exec;
933 _glapi_set_dispatch(ctx->GLApi);
934 }
935
936 if (exec->vtx.prim_count > 0) {
937 /* close off current primitive */
938 unsigned last = exec->vtx.prim_count - 1;
939 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
940 unsigned count = exec->vtx.vert_count - last_draw->start;
941
942 last_draw->count = count;
943 exec->vtx.markers[last].end = 1;
944
945 if (count) {
946 /* mark result buffer used */
947 if (_mesa_hw_select_enabled(ctx))
948 ctx->Select.ResultUsed = GL_TRUE;
949
950 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
951 }
952
953 /* Special handling for GL_LINE_LOOP */
954 bool driver_supports_lineloop =
955 ctx->Const.DriverSupportedPrimMask & BITFIELD_BIT(MESA_PRIM_LINE_LOOP);
956 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
957 (exec->vtx.markers[last].begin == 0 || !driver_supports_lineloop)) {
958 /* We're finishing drawing a line loop. Append 0th vertex onto
959 * end of vertex buffer so we can draw it as a line strip.
960 */
961 const fi_type *src = exec->vtx.buffer_map +
962 last_draw->start * exec->vtx.vertex_size;
963 fi_type *dst = exec->vtx.buffer_map +
964 exec->vtx.vert_count * exec->vtx.vertex_size;
965
966 /* copy 0th vertex to end of buffer */
967 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
968
969 if (exec->vtx.markers[last].begin == 0)
970 last_draw->start++; /* skip vertex0 */
971
972 /* note that the count stays unchanged */
973 exec->vtx.mode[last] = GL_LINE_STRIP;
974
975 /* Increment the vertex count so the next primitive doesn't
976 * overwrite the last vertex which we just added.
977 */
978 exec->vtx.vert_count++;
979 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
980
981 if (!driver_supports_lineloop)
982 last_draw->count++;
983 }
984
985 try_vbo_merge(exec);
986 }
987
988 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
989
990 if (exec->vtx.prim_count == VBO_MAX_PRIM)
991 vbo_exec_vtx_flush(exec);
992
993 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
994 _mesa_flush(ctx);
995 }
996 }
997
998
999 /**
1000 * Called via glPrimitiveRestartNV()
1001 */
1002 void GLAPIENTRY
_mesa_PrimitiveRestartNV(void)1003 _mesa_PrimitiveRestartNV(void)
1004 {
1005 GLenum curPrim;
1006 GET_CURRENT_CONTEXT(ctx);
1007
1008 curPrim = ctx->Driver.CurrentExecPrimitive;
1009
1010 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
1011 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
1012 }
1013 else {
1014 _mesa_End();
1015 _mesa_Begin(curPrim);
1016 }
1017 }
1018
1019
1020 /**
1021 * A special version of glVertexAttrib4f that does not treat index 0 as
1022 * VBO_ATTRIB_POS.
1023 */
1024 static void
VertexAttrib4f_nopos(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1025 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1026 {
1027 GET_CURRENT_CONTEXT(ctx);
1028 if (index < ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs)
1029 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1030 else
1031 ERROR(GL_INVALID_VALUE);
1032 }
1033
1034 static void GLAPIENTRY
_es_VertexAttrib4fARB(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1035 _es_VertexAttrib4fARB(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1036 {
1037 VertexAttrib4f_nopos(index, x, y, z, w);
1038 }
1039
1040
1041 static void GLAPIENTRY
_es_VertexAttrib1fARB(GLuint indx,GLfloat x)1042 _es_VertexAttrib1fARB(GLuint indx, GLfloat x)
1043 {
1044 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1045 }
1046
1047
1048 static void GLAPIENTRY
_es_VertexAttrib1fvARB(GLuint indx,const GLfloat * values)1049 _es_VertexAttrib1fvARB(GLuint indx, const GLfloat* values)
1050 {
1051 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1052 }
1053
1054
1055 static void GLAPIENTRY
_es_VertexAttrib2fARB(GLuint indx,GLfloat x,GLfloat y)1056 _es_VertexAttrib2fARB(GLuint indx, GLfloat x, GLfloat y)
1057 {
1058 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1059 }
1060
1061
1062 static void GLAPIENTRY
_es_VertexAttrib2fvARB(GLuint indx,const GLfloat * values)1063 _es_VertexAttrib2fvARB(GLuint indx, const GLfloat* values)
1064 {
1065 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1066 }
1067
1068
1069 static void GLAPIENTRY
_es_VertexAttrib3fARB(GLuint indx,GLfloat x,GLfloat y,GLfloat z)1070 _es_VertexAttrib3fARB(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1071 {
1072 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1073 }
1074
1075
1076 static void GLAPIENTRY
_es_VertexAttrib3fvARB(GLuint indx,const GLfloat * values)1077 _es_VertexAttrib3fvARB(GLuint indx, const GLfloat* values)
1078 {
1079 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1080 }
1081
1082
1083 static void GLAPIENTRY
_es_VertexAttrib4fvARB(GLuint indx,const GLfloat * values)1084 _es_VertexAttrib4fvARB(GLuint indx, const GLfloat* values)
1085 {
1086 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1087 }
1088
1089
1090 void
vbo_init_dispatch_begin_end(struct gl_context * ctx)1091 vbo_init_dispatch_begin_end(struct gl_context *ctx)
1092 {
1093 #define NAME_AE(x) _mesa_##x
1094 #define NAME_CALLLIST(x) _mesa_##x
1095 #define NAME(x) _mesa_##x
1096 #define NAME_ES(x) _es_##x
1097
1098 struct _glapi_table *tab = ctx->Dispatch.OutsideBeginEnd;
1099 #include "api_beginend_init.h"
1100
1101 if (ctx->Dispatch.BeginEnd) {
1102 tab = ctx->Dispatch.BeginEnd;
1103 #include "api_beginend_init.h"
1104 }
1105 }
1106
1107
1108 static void
vbo_reset_all_attr(struct vbo_exec_context * exec)1109 vbo_reset_all_attr(struct vbo_exec_context *exec)
1110 {
1111 while (exec->vtx.enabled) {
1112 const int i = u_bit_scan64(&exec->vtx.enabled);
1113
1114 /* Reset the vertex attribute by setting its size to zero. */
1115 exec->vtx.attr[i].size = 0;
1116 exec->vtx.attr[i].type = GL_FLOAT;
1117 exec->vtx.attr[i].active_size = 0;
1118 exec->vtx.attrptr[i] = NULL;
1119 }
1120
1121 exec->vtx.vertex_size = 0;
1122 }
1123
1124
1125 void
vbo_exec_vtx_init(struct vbo_exec_context * exec)1126 vbo_exec_vtx_init(struct vbo_exec_context *exec)
1127 {
1128 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1129
1130 exec->vtx.bufferobj = _mesa_bufferobj_alloc(ctx, IMM_BUFFER_NAME);
1131
1132 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1133 vbo_reset_all_attr(exec);
1134
1135 exec->vtx.info.instance_count = 1;
1136 exec->vtx.info.max_index = ~0;
1137 }
1138
1139
1140 void
vbo_exec_vtx_destroy(struct vbo_exec_context * exec)1141 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1142 {
1143 /* using a real VBO for vertex data */
1144 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1145
1146 /* True VBOs should already be unmapped
1147 */
1148 if (exec->vtx.buffer_map) {
1149 assert(!exec->vtx.bufferobj ||
1150 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1151 if (!exec->vtx.bufferobj) {
1152 align_free(exec->vtx.buffer_map);
1153 exec->vtx.buffer_map = NULL;
1154 exec->vtx.buffer_ptr = NULL;
1155 }
1156 }
1157
1158 /* Free the vertex buffer. Unmap first if needed.
1159 */
1160 if (exec->vtx.bufferobj &&
1161 _mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1162 _mesa_bufferobj_unmap(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1163 }
1164 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1165 }
1166
1167
1168 /**
1169 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1170 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1171 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1172 * __struct gl_contextRec::Current and gl_light_attrib::Material
1173 *
1174 * Note that the default T&L engine never clears the
1175 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1176 *
1177 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1178 */
1179 void
vbo_exec_FlushVertices(struct gl_context * ctx,GLuint flags)1180 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1181 {
1182 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1183
1184 #ifndef NDEBUG
1185 /* debug check: make sure we don't get called recursively */
1186 exec->flush_call_depth++;
1187 assert(exec->flush_call_depth == 1);
1188 #endif
1189
1190 if (_mesa_inside_begin_end(ctx)) {
1191 /* We've had glBegin but not glEnd! */
1192 #ifndef NDEBUG
1193 exec->flush_call_depth--;
1194 assert(exec->flush_call_depth == 0);
1195 #endif
1196 return;
1197 }
1198
1199 /* Flush (draw). */
1200 vbo_exec_FlushVertices_internal(exec, flags);
1201
1202 #ifndef NDEBUG
1203 exec->flush_call_depth--;
1204 assert(exec->flush_call_depth == 0);
1205 #endif
1206 }
1207
1208
1209 void GLAPIENTRY
_es_Color4f(GLfloat r,GLfloat g,GLfloat b,GLfloat a)1210 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1211 {
1212 _mesa_Color4f(r, g, b, a);
1213 }
1214
1215
1216 void GLAPIENTRY
_es_Normal3f(GLfloat x,GLfloat y,GLfloat z)1217 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1218 {
1219 _mesa_Normal3f(x, y, z);
1220 }
1221
1222
1223 void GLAPIENTRY
_es_MultiTexCoord4f(GLenum target,GLfloat s,GLfloat t,GLfloat r,GLfloat q)1224 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1225 {
1226 _mesa_MultiTexCoord4fARB(target, s, t, r, q);
1227 }
1228
1229
1230 void GLAPIENTRY
_es_Materialfv(GLenum face,GLenum pname,const GLfloat * params)1231 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1232 {
1233 _mesa_Materialfv(face, pname, params);
1234 }
1235
1236
1237 void GLAPIENTRY
_es_Materialf(GLenum face,GLenum pname,GLfloat param)1238 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1239 {
1240 GLfloat p[4];
1241 p[0] = param;
1242 p[1] = p[2] = p[3] = 0.0F;
1243 _mesa_Materialfv(face, pname, p);
1244 }
1245
1246 #undef TAG
1247 #undef SUPPRESS_STATIC
1248 #define TAG(x) _hw_select_##x
1249 /* filter out none vertex api */
1250 #define HW_SELECT_MODE
1251
1252 #undef ATTR_UNION
1253 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
1254 do { \
1255 if ((A) == 0) { \
1256 ATTR_UNION_BASE(VBO_ATTRIB_SELECT_RESULT_OFFSET, 1, GL_UNSIGNED_INT, uint32_t, \
1257 ctx->Select.ResultOffset, 0, 0, 0); \
1258 } \
1259 ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3); \
1260 } while (0)
1261
1262 #include "vbo_attrib_tmp.h"
1263
1264 void
vbo_init_dispatch_hw_select_begin_end(struct gl_context * ctx)1265 vbo_init_dispatch_hw_select_begin_end(struct gl_context *ctx)
1266 {
1267 int numEntries = MAX2(_gloffset_COUNT, _glapi_get_dispatch_table_size());
1268 memcpy(ctx->Dispatch.HWSelectModeBeginEnd, ctx->Dispatch.BeginEnd, numEntries * sizeof(_glapi_proc));
1269
1270 #undef NAME
1271 #define NAME(x) _hw_select_##x
1272 struct _glapi_table *tab = ctx->Dispatch.HWSelectModeBeginEnd;
1273 #include "api_hw_select_init.h"
1274 }
1275