• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /**************************************************************************
3  *
4  * Copyright 2007 VMware, Inc.
5  * Copyright 2012 Marek Olšák <maraeo@gmail.com>
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 /*
31  * This converts the VBO's vertex attribute/array information into
32  * Gallium vertex state and binds it.
33  *
34  * Authors:
35  *   Keith Whitwell <keithw@vmware.com>
36  *   Marek Olšák <maraeo@gmail.com>
37  */
38 
39 #include "st_context.h"
40 #include "st_atom.h"
41 #include "st_cb_bufferobjects.h"
42 #include "st_draw.h"
43 #include "st_program.h"
44 
45 #include "cso_cache/cso_context.h"
46 #include "util/u_math.h"
47 #include "util/u_upload_mgr.h"
48 #include "main/bufferobj.h"
49 #include "main/glformats.h"
50 
51 /* vertex_formats[gltype - GL_BYTE][integer*2 + normalized][size - 1] */
52 static const uint16_t vertex_formats[][4][4] = {
53    { /* GL_BYTE */
54       {
55          PIPE_FORMAT_R8_SSCALED,
56          PIPE_FORMAT_R8G8_SSCALED,
57          PIPE_FORMAT_R8G8B8_SSCALED,
58          PIPE_FORMAT_R8G8B8A8_SSCALED
59       },
60       {
61          PIPE_FORMAT_R8_SNORM,
62          PIPE_FORMAT_R8G8_SNORM,
63          PIPE_FORMAT_R8G8B8_SNORM,
64          PIPE_FORMAT_R8G8B8A8_SNORM
65       },
66       {
67          PIPE_FORMAT_R8_SINT,
68          PIPE_FORMAT_R8G8_SINT,
69          PIPE_FORMAT_R8G8B8_SINT,
70          PIPE_FORMAT_R8G8B8A8_SINT
71       },
72    },
73    { /* GL_UNSIGNED_BYTE */
74       {
75          PIPE_FORMAT_R8_USCALED,
76          PIPE_FORMAT_R8G8_USCALED,
77          PIPE_FORMAT_R8G8B8_USCALED,
78          PIPE_FORMAT_R8G8B8A8_USCALED
79       },
80       {
81          PIPE_FORMAT_R8_UNORM,
82          PIPE_FORMAT_R8G8_UNORM,
83          PIPE_FORMAT_R8G8B8_UNORM,
84          PIPE_FORMAT_R8G8B8A8_UNORM
85       },
86       {
87          PIPE_FORMAT_R8_UINT,
88          PIPE_FORMAT_R8G8_UINT,
89          PIPE_FORMAT_R8G8B8_UINT,
90          PIPE_FORMAT_R8G8B8A8_UINT
91       },
92    },
93    { /* GL_SHORT */
94       {
95          PIPE_FORMAT_R16_SSCALED,
96          PIPE_FORMAT_R16G16_SSCALED,
97          PIPE_FORMAT_R16G16B16_SSCALED,
98          PIPE_FORMAT_R16G16B16A16_SSCALED
99       },
100       {
101          PIPE_FORMAT_R16_SNORM,
102          PIPE_FORMAT_R16G16_SNORM,
103          PIPE_FORMAT_R16G16B16_SNORM,
104          PIPE_FORMAT_R16G16B16A16_SNORM
105       },
106       {
107          PIPE_FORMAT_R16_SINT,
108          PIPE_FORMAT_R16G16_SINT,
109          PIPE_FORMAT_R16G16B16_SINT,
110          PIPE_FORMAT_R16G16B16A16_SINT
111       },
112    },
113    { /* GL_UNSIGNED_SHORT */
114       {
115          PIPE_FORMAT_R16_USCALED,
116          PIPE_FORMAT_R16G16_USCALED,
117          PIPE_FORMAT_R16G16B16_USCALED,
118          PIPE_FORMAT_R16G16B16A16_USCALED
119       },
120       {
121          PIPE_FORMAT_R16_UNORM,
122          PIPE_FORMAT_R16G16_UNORM,
123          PIPE_FORMAT_R16G16B16_UNORM,
124          PIPE_FORMAT_R16G16B16A16_UNORM
125       },
126       {
127          PIPE_FORMAT_R16_UINT,
128          PIPE_FORMAT_R16G16_UINT,
129          PIPE_FORMAT_R16G16B16_UINT,
130          PIPE_FORMAT_R16G16B16A16_UINT
131       },
132    },
133    { /* GL_INT */
134       {
135          PIPE_FORMAT_R32_SSCALED,
136          PIPE_FORMAT_R32G32_SSCALED,
137          PIPE_FORMAT_R32G32B32_SSCALED,
138          PIPE_FORMAT_R32G32B32A32_SSCALED
139       },
140       {
141          PIPE_FORMAT_R32_SNORM,
142          PIPE_FORMAT_R32G32_SNORM,
143          PIPE_FORMAT_R32G32B32_SNORM,
144          PIPE_FORMAT_R32G32B32A32_SNORM
145       },
146       {
147          PIPE_FORMAT_R32_SINT,
148          PIPE_FORMAT_R32G32_SINT,
149          PIPE_FORMAT_R32G32B32_SINT,
150          PIPE_FORMAT_R32G32B32A32_SINT
151       },
152    },
153    { /* GL_UNSIGNED_INT */
154       {
155          PIPE_FORMAT_R32_USCALED,
156          PIPE_FORMAT_R32G32_USCALED,
157          PIPE_FORMAT_R32G32B32_USCALED,
158          PIPE_FORMAT_R32G32B32A32_USCALED
159       },
160       {
161          PIPE_FORMAT_R32_UNORM,
162          PIPE_FORMAT_R32G32_UNORM,
163          PIPE_FORMAT_R32G32B32_UNORM,
164          PIPE_FORMAT_R32G32B32A32_UNORM
165       },
166       {
167          PIPE_FORMAT_R32_UINT,
168          PIPE_FORMAT_R32G32_UINT,
169          PIPE_FORMAT_R32G32B32_UINT,
170          PIPE_FORMAT_R32G32B32A32_UINT
171       },
172    },
173    { /* GL_FLOAT */
174       {
175          PIPE_FORMAT_R32_FLOAT,
176          PIPE_FORMAT_R32G32_FLOAT,
177          PIPE_FORMAT_R32G32B32_FLOAT,
178          PIPE_FORMAT_R32G32B32A32_FLOAT
179       },
180       {
181          PIPE_FORMAT_R32_FLOAT,
182          PIPE_FORMAT_R32G32_FLOAT,
183          PIPE_FORMAT_R32G32B32_FLOAT,
184          PIPE_FORMAT_R32G32B32A32_FLOAT
185       },
186    },
187    {{0}}, /* GL_2_BYTES */
188    {{0}}, /* GL_3_BYTES */
189    {{0}}, /* GL_4_BYTES */
190    { /* GL_DOUBLE */
191       {
192          PIPE_FORMAT_R64_FLOAT,
193          PIPE_FORMAT_R64G64_FLOAT,
194          PIPE_FORMAT_R64G64B64_FLOAT,
195          PIPE_FORMAT_R64G64B64A64_FLOAT
196       },
197       {
198          PIPE_FORMAT_R64_FLOAT,
199          PIPE_FORMAT_R64G64_FLOAT,
200          PIPE_FORMAT_R64G64B64_FLOAT,
201          PIPE_FORMAT_R64G64B64A64_FLOAT
202       },
203    },
204    { /* GL_HALF_FLOAT */
205       {
206          PIPE_FORMAT_R16_FLOAT,
207          PIPE_FORMAT_R16G16_FLOAT,
208          PIPE_FORMAT_R16G16B16_FLOAT,
209          PIPE_FORMAT_R16G16B16A16_FLOAT
210       },
211       {
212          PIPE_FORMAT_R16_FLOAT,
213          PIPE_FORMAT_R16G16_FLOAT,
214          PIPE_FORMAT_R16G16B16_FLOAT,
215          PIPE_FORMAT_R16G16B16A16_FLOAT
216       },
217    },
218    { /* GL_FIXED */
219       {
220          PIPE_FORMAT_R32_FIXED,
221          PIPE_FORMAT_R32G32_FIXED,
222          PIPE_FORMAT_R32G32B32_FIXED,
223          PIPE_FORMAT_R32G32B32A32_FIXED
224       },
225       {
226          PIPE_FORMAT_R32_FIXED,
227          PIPE_FORMAT_R32G32_FIXED,
228          PIPE_FORMAT_R32G32B32_FIXED,
229          PIPE_FORMAT_R32G32B32A32_FIXED
230       },
231    },
232 };
233 
234 
235 /**
236  * Return a PIPE_FORMAT_x for the given GL datatype and size.
237  */
238 enum pipe_format
st_pipe_vertex_format(GLenum type,GLuint size,GLenum format,GLboolean normalized,GLboolean integer)239 st_pipe_vertex_format(GLenum type, GLuint size, GLenum format,
240                       GLboolean normalized, GLboolean integer)
241 {
242    unsigned index;
243 
244    assert(size >= 1 && size <= 4);
245    assert(format == GL_RGBA || format == GL_BGRA);
246 
247    switch (type) {
248    case GL_HALF_FLOAT_OES:
249       type = GL_HALF_FLOAT;
250       break;
251 
252    case GL_INT_2_10_10_10_REV:
253       assert(size == 4 && !integer);
254 
255       if (format == GL_BGRA) {
256          if (normalized)
257             return PIPE_FORMAT_B10G10R10A2_SNORM;
258          else
259             return PIPE_FORMAT_B10G10R10A2_SSCALED;
260       } else {
261          if (normalized)
262             return PIPE_FORMAT_R10G10B10A2_SNORM;
263          else
264             return PIPE_FORMAT_R10G10B10A2_SSCALED;
265       }
266       break;
267 
268    case GL_UNSIGNED_INT_2_10_10_10_REV:
269       assert(size == 4 && !integer);
270 
271       if (format == GL_BGRA) {
272          if (normalized)
273             return PIPE_FORMAT_B10G10R10A2_UNORM;
274          else
275             return PIPE_FORMAT_B10G10R10A2_USCALED;
276       } else {
277          if (normalized)
278             return PIPE_FORMAT_R10G10B10A2_UNORM;
279          else
280             return PIPE_FORMAT_R10G10B10A2_USCALED;
281       }
282       break;
283 
284    case GL_UNSIGNED_INT_10F_11F_11F_REV:
285       assert(size == 3 && !integer && format == GL_RGBA);
286       return PIPE_FORMAT_R11G11B10_FLOAT;
287 
288    case GL_UNSIGNED_BYTE:
289       if (format == GL_BGRA) {
290          /* this is an odd-ball case */
291          assert(normalized);
292          return PIPE_FORMAT_B8G8R8A8_UNORM;
293       }
294       break;
295    }
296 
297    index = integer*2 + normalized;
298    assert(index <= 2);
299    assert(type >= GL_BYTE && type <= GL_FIXED);
300    return vertex_formats[type - GL_BYTE][index][size-1];
301 }
302 
303 static const struct gl_vertex_array *
get_client_array(const struct gl_vertex_array ** arrays,unsigned mesaAttr)304 get_client_array(const struct gl_vertex_array **arrays,
305                  unsigned mesaAttr)
306 {
307    /* st_program uses 0xffffffff to denote a double placeholder attribute */
308    if (mesaAttr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
309       return NULL;
310    return arrays[mesaAttr];
311 }
312 
313 /**
314  * Examine the active arrays to determine if we have interleaved
315  * vertex arrays all living in one VBO, or all living in user space.
316  */
317 static GLboolean
is_interleaved_arrays(const struct st_vertex_program * vp,const struct gl_vertex_array ** arrays,unsigned num_inputs)318 is_interleaved_arrays(const struct st_vertex_program *vp,
319                       const struct gl_vertex_array **arrays,
320                       unsigned num_inputs)
321 {
322    GLuint attr;
323    const struct gl_buffer_object *firstBufObj = NULL;
324    GLint firstStride = -1;
325    const GLubyte *firstPtr = NULL;
326    GLboolean userSpaceBuffer = GL_FALSE;
327 
328    for (attr = 0; attr < num_inputs; attr++) {
329       const struct gl_vertex_array *array;
330       const struct gl_buffer_object *bufObj;
331       GLsizei stride;
332 
333       array = get_client_array(arrays, vp->index_to_input[attr]);
334       if (!array)
335 	 continue;
336 
337       stride = array->StrideB; /* in bytes */
338 
339       /* To keep things simple, don't allow interleaved zero-stride attribs. */
340       if (stride == 0)
341          return false;
342 
343       bufObj = array->BufferObj;
344       if (attr == 0) {
345          /* save info about the first array */
346          firstStride = stride;
347          firstPtr = array->Ptr;
348          firstBufObj = bufObj;
349          userSpaceBuffer = !bufObj || !bufObj->Name;
350       }
351       else {
352          /* check if other arrays interleave with the first, in same buffer */
353          if (stride != firstStride)
354             return GL_FALSE; /* strides don't match */
355 
356          if (bufObj != firstBufObj)
357             return GL_FALSE; /* arrays in different VBOs */
358 
359          if (llabs(array->Ptr - firstPtr) > firstStride)
360             return GL_FALSE; /* arrays start too far apart */
361 
362          if ((!_mesa_is_bufferobj(bufObj)) != userSpaceBuffer)
363             return GL_FALSE; /* mix of VBO and user-space arrays */
364       }
365    }
366 
367    return GL_TRUE;
368 }
369 
init_velement(struct pipe_vertex_element * velement,int src_offset,int format,int instance_divisor,int vbo_index)370 static void init_velement(struct pipe_vertex_element *velement,
371                           int src_offset, int format,
372                           int instance_divisor, int vbo_index)
373 {
374    velement->src_offset = src_offset;
375    velement->src_format = format;
376    velement->instance_divisor = instance_divisor;
377    velement->vertex_buffer_index = vbo_index;
378    assert(velement->src_format);
379 }
380 
init_velement_lowered(const struct st_vertex_program * vp,struct pipe_vertex_element * velements,int src_offset,int format,int instance_divisor,int vbo_index,int nr_components,GLboolean doubles,GLuint * attr_idx)381 static void init_velement_lowered(const struct st_vertex_program *vp,
382                                   struct pipe_vertex_element *velements,
383                                   int src_offset, int format,
384                                   int instance_divisor, int vbo_index,
385                                   int nr_components, GLboolean doubles,
386                                   GLuint *attr_idx)
387 {
388    int idx = *attr_idx;
389    if (doubles) {
390       int lower_format;
391 
392       if (nr_components < 2)
393          lower_format = PIPE_FORMAT_R32G32_UINT;
394       else
395          lower_format = PIPE_FORMAT_R32G32B32A32_UINT;
396 
397       init_velement(&velements[idx], src_offset,
398                     lower_format, instance_divisor, vbo_index);
399       idx++;
400 
401       if (idx < vp->num_inputs &&
402           vp->index_to_input[idx] == ST_DOUBLE_ATTRIB_PLACEHOLDER) {
403          if (nr_components >= 3) {
404             if (nr_components == 3)
405                lower_format = PIPE_FORMAT_R32G32_UINT;
406             else
407                lower_format = PIPE_FORMAT_R32G32B32A32_UINT;
408 
409             init_velement(&velements[idx], src_offset + 4 * sizeof(float),
410                         lower_format, instance_divisor, vbo_index);
411          } else {
412             /* The values here are undefined. Fill in some conservative
413              * dummy values.
414              */
415             init_velement(&velements[idx], src_offset, PIPE_FORMAT_R32G32_UINT,
416                           instance_divisor, vbo_index);
417          }
418 
419          idx++;
420       }
421    } else {
422       init_velement(&velements[idx], src_offset,
423                     format, instance_divisor, vbo_index);
424       idx++;
425    }
426    *attr_idx = idx;
427 }
428 
429 static void
set_vertex_attribs(struct st_context * st,struct pipe_vertex_buffer * vbuffers,unsigned num_vbuffers,struct pipe_vertex_element * velements,unsigned num_velements)430 set_vertex_attribs(struct st_context *st,
431                    struct pipe_vertex_buffer *vbuffers,
432                    unsigned num_vbuffers,
433                    struct pipe_vertex_element *velements,
434                    unsigned num_velements)
435 {
436    struct cso_context *cso = st->cso_context;
437 
438    cso_set_vertex_buffers(cso, 0, num_vbuffers, vbuffers);
439    if (st->last_num_vbuffers > num_vbuffers) {
440       /* Unbind remaining buffers, if any. */
441       cso_set_vertex_buffers(cso, num_vbuffers,
442                              st->last_num_vbuffers - num_vbuffers, NULL);
443    }
444    st->last_num_vbuffers = num_vbuffers;
445    cso_set_vertex_elements(cso, num_velements, velements);
446 }
447 
448 /**
449  * Set up for drawing interleaved arrays that all live in one VBO
450  * or all live in user space.
451  * \param vbuffer  returns vertex buffer info
452  * \param velements  returns vertex element info
453  */
454 static void
setup_interleaved_attribs(struct st_context * st,const struct st_vertex_program * vp,const struct gl_vertex_array ** arrays,unsigned num_inputs)455 setup_interleaved_attribs(struct st_context *st,
456                           const struct st_vertex_program *vp,
457                           const struct gl_vertex_array **arrays,
458                           unsigned num_inputs)
459 {
460    struct pipe_vertex_buffer vbuffer;
461    struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS] = {{0}};
462    GLuint attr;
463    const GLubyte *low_addr = NULL;
464    GLboolean usingVBO;      /* all arrays in a VBO? */
465    struct gl_buffer_object *bufobj;
466    GLsizei stride;
467 
468    /* Find the lowest address of the arrays we're drawing,
469     * Init bufobj and stride.
470     */
471    if (num_inputs) {
472       const struct gl_vertex_array *array;
473 
474       array = get_client_array(arrays, vp->index_to_input[0]);
475       assert(array);
476 
477       /* Since we're doing interleaved arrays, we know there'll be at most
478        * one buffer object and the stride will be the same for all arrays.
479        * Grab them now.
480        */
481       bufobj = array->BufferObj;
482       stride = array->StrideB;
483 
484       low_addr = arrays[vp->index_to_input[0]]->Ptr;
485 
486       for (attr = 1; attr < num_inputs; attr++) {
487          const GLubyte *start;
488          array = get_client_array(arrays, vp->index_to_input[attr]);
489          if (!array)
490             continue;
491          start = array->Ptr;
492          low_addr = MIN2(low_addr, start);
493       }
494    }
495    else {
496       /* not sure we'll ever have zero inputs, but play it safe */
497       bufobj = NULL;
498       stride = 0;
499       low_addr = 0;
500    }
501 
502    /* are the arrays in user space? */
503    usingVBO = _mesa_is_bufferobj(bufobj);
504 
505    for (attr = 0; attr < num_inputs;) {
506       const struct gl_vertex_array *array;
507       unsigned src_offset;
508       unsigned src_format;
509 
510       array = get_client_array(arrays, vp->index_to_input[attr]);
511       assert(array);
512 
513       src_offset = (unsigned) (array->Ptr - low_addr);
514       assert(array->_ElementSize ==
515              _mesa_bytes_per_vertex_attrib(array->Size, array->Type));
516 
517       src_format = st_pipe_vertex_format(array->Type,
518                                          array->Size,
519                                          array->Format,
520                                          array->Normalized,
521                                          array->Integer);
522 
523       init_velement_lowered(vp, velements, src_offset, src_format,
524                             array->InstanceDivisor, 0,
525                             array->Size, array->Doubles, &attr);
526    }
527 
528    /*
529     * Return the vbuffer info and setup user-space attrib info, if needed.
530     */
531    if (num_inputs == 0) {
532       /* just defensive coding here */
533       vbuffer.buffer.resource = NULL;
534       vbuffer.is_user_buffer = false;
535       vbuffer.buffer_offset = 0;
536       vbuffer.stride = 0;
537    }
538    else if (usingVBO) {
539       /* all interleaved arrays in a VBO */
540       struct st_buffer_object *stobj = st_buffer_object(bufobj);
541 
542       if (!stobj || !stobj->buffer) {
543          st->vertex_array_out_of_memory = true;
544          return; /* out-of-memory error probably */
545       }
546 
547       vbuffer.buffer.resource = stobj->buffer;
548       vbuffer.is_user_buffer = false;
549       vbuffer.buffer_offset = pointer_to_offset(low_addr);
550       vbuffer.stride = stride;
551    }
552    else {
553       /* all interleaved arrays in user memory */
554       vbuffer.buffer.user = low_addr;
555       vbuffer.is_user_buffer = !!low_addr; /* if NULL, then unbind */
556       vbuffer.buffer_offset = 0;
557       vbuffer.stride = stride;
558 
559       if (low_addr)
560          st->draw_needs_minmax_index = true;
561    }
562 
563    set_vertex_attribs(st, &vbuffer, num_inputs ? 1 : 0,
564                       velements, num_inputs);
565 }
566 
567 /**
568  * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each
569  * vertex attribute.
570  * \param vbuffer  returns vertex buffer info
571  * \param velements  returns vertex element info
572  */
573 static void
setup_non_interleaved_attribs(struct st_context * st,const struct st_vertex_program * vp,const struct gl_vertex_array ** arrays,unsigned num_inputs)574 setup_non_interleaved_attribs(struct st_context *st,
575                               const struct st_vertex_program *vp,
576                               const struct gl_vertex_array **arrays,
577                               unsigned num_inputs)
578 {
579    struct gl_context *ctx = st->ctx;
580    struct pipe_vertex_buffer vbuffer[PIPE_MAX_ATTRIBS];
581    struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS] = {{0}};
582    unsigned num_vbuffers = 0;
583    unsigned unref_buffers = 0;
584    GLuint attr;
585 
586    for (attr = 0; attr < num_inputs;) {
587       const unsigned mesaAttr = vp->index_to_input[attr];
588       const struct gl_vertex_array *array;
589       struct gl_buffer_object *bufobj;
590       GLsizei stride;
591       unsigned src_format;
592       unsigned bufidx;
593 
594       array = get_client_array(arrays, mesaAttr);
595       assert(array);
596 
597       bufidx = num_vbuffers++;
598 
599       stride = array->StrideB;
600       bufobj = array->BufferObj;
601       assert(array->_ElementSize ==
602              _mesa_bytes_per_vertex_attrib(array->Size, array->Type));
603 
604       if (_mesa_is_bufferobj(bufobj)) {
605          /* Attribute data is in a VBO.
606           * Recall that for VBOs, the gl_vertex_array->Ptr field is
607           * really an offset from the start of the VBO, not a pointer.
608           */
609          struct st_buffer_object *stobj = st_buffer_object(bufobj);
610 
611          if (!stobj || !stobj->buffer) {
612             st->vertex_array_out_of_memory = true;
613             return; /* out-of-memory error probably */
614          }
615 
616          vbuffer[bufidx].buffer.resource = stobj->buffer;
617          vbuffer[bufidx].is_user_buffer = false;
618          vbuffer[bufidx].buffer_offset = pointer_to_offset(array->Ptr);
619       }
620       else {
621          if (stride == 0) {
622             unsigned size = array->_ElementSize;
623             /* This is optimal for GPU cache line usage if the upload size
624              * is <= cache line size.
625              */
626             unsigned alignment = util_next_power_of_two(size);
627             void *ptr = array->Ptr ? (void*)array->Ptr :
628                                      (void*)ctx->Current.Attrib[mesaAttr];
629 
630             vbuffer[bufidx].is_user_buffer = false;
631             vbuffer[bufidx].buffer.resource = NULL;
632 
633             /* Use const_uploader for zero-stride vertex attributes, because
634              * it may use a better memory placement than stream_uploader.
635              * The reason is that zero-stride attributes can be fetched many
636              * times (thousands of times), so a better placement is going to
637              * perform better.
638              *
639              * Upload the maximum possible size, which is 4x GLdouble = 32.
640              */
641             u_upload_data(st->can_bind_const_buffer_as_vertex ?
642                              st->pipe->const_uploader :
643                              st->pipe->stream_uploader,
644                           0, size, alignment, ptr,
645                           &vbuffer[bufidx].buffer_offset,
646                           &vbuffer[bufidx].buffer.resource);
647             unref_buffers |= 1u << bufidx;
648          } else {
649             assert(array->Ptr);
650             vbuffer[bufidx].buffer.user = array->Ptr;
651             vbuffer[bufidx].is_user_buffer = true;
652             vbuffer[bufidx].buffer_offset = 0;
653 
654             if (!array->InstanceDivisor)
655                st->draw_needs_minmax_index = true;
656          }
657       }
658 
659       /* common-case setup */
660       vbuffer[bufidx].stride = stride; /* in bytes */
661 
662       src_format = st_pipe_vertex_format(array->Type,
663                                          array->Size,
664                                          array->Format,
665                                          array->Normalized,
666                                          array->Integer);
667 
668       init_velement_lowered(vp, velements, 0, src_format,
669                             array->InstanceDivisor, bufidx,
670                             array->Size, array->Doubles, &attr);
671    }
672 
673    if (!ctx->Const.AllowMappedBuffersDuringExecution) {
674       u_upload_unmap(st->pipe->stream_uploader);
675    }
676 
677    set_vertex_attribs(st, vbuffer, num_vbuffers, velements, num_inputs);
678 
679    /* Unreference uploaded zero-stride vertex buffers. */
680    while (unref_buffers) {
681       unsigned i = u_bit_scan(&unref_buffers);
682       pipe_resource_reference(&vbuffer[i].buffer.resource, NULL);
683    }
684 }
685 
st_update_array(struct st_context * st)686 void st_update_array(struct st_context *st)
687 {
688    struct gl_context *ctx = st->ctx;
689    const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
690    const struct st_vertex_program *vp;
691    unsigned num_inputs;
692 
693    st->vertex_array_out_of_memory = FALSE;
694    st->draw_needs_minmax_index = false;
695 
696    /* No drawing has been done yet, so do nothing. */
697    if (!arrays)
698       return;
699 
700    /* vertex program validation must be done before this */
701    vp = st->vp;
702    num_inputs = st->vp_variant->num_inputs;
703 
704    if (is_interleaved_arrays(vp, arrays, num_inputs))
705       setup_interleaved_attribs(st, vp, arrays, num_inputs);
706    else
707       setup_non_interleaved_attribs(st, vp, arrays, num_inputs);
708 }
709