1 /**************************************************************************
2 *
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * This module uploads user buffers and translates the vertex buffers which
30 * contain incompatible vertices (i.e. not supported by the driver/hardware)
31 * into compatible ones, based on the Gallium CAPs.
32 *
33 * It does not upload index buffers.
34 *
35 * The module heavily uses bitmasks to represent per-buffer and
36 * per-vertex-element flags to avoid looping over the list of buffers just
37 * to see if there's a non-zero stride, or user buffer, or unsupported format,
38 * etc.
39 *
40 * There are 3 categories of vertex elements, which are processed separately:
41 * - per-vertex attribs (stride != 0, instance_divisor == 0)
42 * - instanced attribs (stride != 0, instance_divisor > 0)
43 * - constant attribs (stride == 0)
44 *
45 * All needed uploads and translations are performed every draw command, but
46 * only the subset of vertices needed for that draw command is uploaded or
47 * translated. (the module never translates whole buffers)
48 *
49 *
50 * The module consists of two main parts:
51 *
52 *
53 * 1) Translate (u_vbuf_translate_begin/end)
54 *
55 * This is pretty much a vertex fetch fallback. It translates vertices from
56 * one vertex buffer to another in an unused vertex buffer slot. It does
57 * whatever is needed to make the vertices readable by the hardware (changes
58 * vertex formats and aligns offsets and strides). The translate module is
59 * used here.
60 *
61 * Each of the 3 categories is translated to a separate buffer.
62 * Only the [min_index, max_index] range is translated. For instanced attribs,
63 * the range is [start_instance, start_instance+instance_count]. For constant
64 * attribs, the range is [0, 1].
65 *
66 *
67 * 2) User buffer uploading (u_vbuf_upload_buffers)
68 *
69 * Only the [min_index, max_index] range is uploaded (just like Translate)
70 * with a single memcpy.
71 *
72 * This method works best for non-indexed draw operations or indexed draw
73 * operations where the [min_index, max_index] range is not being way bigger
74 * than the vertex count.
75 *
76 * If the range is too big (e.g. one triangle with indices {0, 1, 10000}),
77 * the per-vertex attribs are uploaded via the translate module, all packed
78 * into one vertex buffer, and the indexed draw call is turned into
79 * a non-indexed one in the process. This adds additional complexity
80 * to the translate part, but it prevents bad apps from bringing your frame
81 * rate down.
82 *
83 *
84 * If there is nothing to do, it forwards every command to the driver.
85 * The module also has its own CSO cache of vertex element states.
86 */
87
88 #include "util/u_vbuf.h"
89
90 #include "util/u_dump.h"
91 #include "util/format/u_format.h"
92 #include "util/u_inlines.h"
93 #include "util/u_memory.h"
94 #include "util/u_screen.h"
95 #include "util/u_upload_mgr.h"
96 #include "translate/translate.h"
97 #include "translate/translate_cache.h"
98 #include "cso_cache/cso_cache.h"
99 #include "cso_cache/cso_hash.h"
100
101 struct u_vbuf_elements {
102 unsigned count;
103 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS];
104
105 unsigned src_format_size[PIPE_MAX_ATTRIBS];
106
107 /* If (velem[i].src_format != native_format[i]), the vertex buffer
108 * referenced by the vertex element cannot be used for rendering and
109 * its vertex data must be translated to native_format[i]. */
110 enum pipe_format native_format[PIPE_MAX_ATTRIBS];
111 unsigned native_format_size[PIPE_MAX_ATTRIBS];
112
113 /* Which buffers are used by the vertex element state. */
114 uint32_t used_vb_mask;
115 /* This might mean two things:
116 * - src_format != native_format, as discussed above.
117 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
118 uint32_t incompatible_elem_mask; /* each bit describes a corresp. attrib */
119 /* Which buffer has at least one vertex element referencing it
120 * incompatible. */
121 uint32_t incompatible_vb_mask_any;
122 /* Which buffer has all vertex elements referencing it incompatible. */
123 uint32_t incompatible_vb_mask_all;
124 /* Which buffer has at least one vertex element referencing it
125 * compatible. */
126 uint32_t compatible_vb_mask_any;
127 /* Which buffer has all vertex elements referencing it compatible. */
128 uint32_t compatible_vb_mask_all;
129
130 /* Which buffer has at least one vertex element referencing it
131 * non-instanced. */
132 uint32_t noninstance_vb_mask_any;
133
134 /* Which buffers are used by multiple vertex attribs. */
135 uint32_t interleaved_vb_mask;
136
137 void *driver_cso;
138 };
139
140 enum {
141 VB_VERTEX = 0,
142 VB_INSTANCE = 1,
143 VB_CONST = 2,
144 VB_NUM = 3
145 };
146
147 struct u_vbuf {
148 struct u_vbuf_caps caps;
149 bool has_signed_vb_offset;
150
151 struct pipe_context *pipe;
152 struct translate_cache *translate_cache;
153 struct cso_cache *cso_cache;
154
155 /* This is what was set in set_vertex_buffers.
156 * May contain user buffers. */
157 struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
158 uint32_t enabled_vb_mask;
159
160 /* Saved vertex buffer. */
161 struct pipe_vertex_buffer vertex_buffer0_saved;
162
163 /* Vertex buffers for the driver.
164 * There are usually no user buffers. */
165 struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS];
166 uint32_t dirty_real_vb_mask; /* which buffers are dirty since the last
167 call of set_vertex_buffers */
168
169 /* Vertex elements. */
170 struct u_vbuf_elements *ve, *ve_saved;
171
172 /* Vertex elements used for the translate fallback. */
173 struct cso_velems_state fallback_velems;
174 /* If non-NULL, this is a vertex element state used for the translate
175 * fallback and therefore used for rendering too. */
176 boolean using_translate;
177 /* The vertex buffer slot index where translated vertices have been
178 * stored in. */
179 unsigned fallback_vbs[VB_NUM];
180 unsigned fallback_vbs_mask;
181
182 /* Which buffer is a user buffer. */
183 uint32_t user_vb_mask; /* each bit describes a corresp. buffer */
184 /* Which buffer is incompatible (unaligned). */
185 uint32_t incompatible_vb_mask; /* each bit describes a corresp. buffer */
186 /* Which buffer has a non-zero stride. */
187 uint32_t nonzero_stride_vb_mask; /* each bit describes a corresp. buffer */
188 /* Which buffers are allowed (supported by hardware). */
189 uint32_t allowed_vb_mask;
190 };
191
192 static void *
193 u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count,
194 const struct pipe_vertex_element *attribs);
195 static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso);
196
197 static const struct {
198 enum pipe_format from, to;
199 } vbuf_format_fallbacks[] = {
200 { PIPE_FORMAT_R32_FIXED, PIPE_FORMAT_R32_FLOAT },
201 { PIPE_FORMAT_R32G32_FIXED, PIPE_FORMAT_R32G32_FLOAT },
202 { PIPE_FORMAT_R32G32B32_FIXED, PIPE_FORMAT_R32G32B32_FLOAT },
203 { PIPE_FORMAT_R32G32B32A32_FIXED, PIPE_FORMAT_R32G32B32A32_FLOAT },
204 { PIPE_FORMAT_R16_FLOAT, PIPE_FORMAT_R32_FLOAT },
205 { PIPE_FORMAT_R16G16_FLOAT, PIPE_FORMAT_R32G32_FLOAT },
206 { PIPE_FORMAT_R16G16B16_FLOAT, PIPE_FORMAT_R32G32B32_FLOAT },
207 { PIPE_FORMAT_R16G16B16A16_FLOAT, PIPE_FORMAT_R32G32B32A32_FLOAT },
208 { PIPE_FORMAT_R64_FLOAT, PIPE_FORMAT_R32_FLOAT },
209 { PIPE_FORMAT_R64G64_FLOAT, PIPE_FORMAT_R32G32_FLOAT },
210 { PIPE_FORMAT_R64G64B64_FLOAT, PIPE_FORMAT_R32G32B32_FLOAT },
211 { PIPE_FORMAT_R64G64B64A64_FLOAT, PIPE_FORMAT_R32G32B32A32_FLOAT },
212 { PIPE_FORMAT_R32_UNORM, PIPE_FORMAT_R32_FLOAT },
213 { PIPE_FORMAT_R32G32_UNORM, PIPE_FORMAT_R32G32_FLOAT },
214 { PIPE_FORMAT_R32G32B32_UNORM, PIPE_FORMAT_R32G32B32_FLOAT },
215 { PIPE_FORMAT_R32G32B32A32_UNORM, PIPE_FORMAT_R32G32B32A32_FLOAT },
216 { PIPE_FORMAT_R32_SNORM, PIPE_FORMAT_R32_FLOAT },
217 { PIPE_FORMAT_R32G32_SNORM, PIPE_FORMAT_R32G32_FLOAT },
218 { PIPE_FORMAT_R32G32B32_SNORM, PIPE_FORMAT_R32G32B32_FLOAT },
219 { PIPE_FORMAT_R32G32B32A32_SNORM, PIPE_FORMAT_R32G32B32A32_FLOAT },
220 { PIPE_FORMAT_R32_USCALED, PIPE_FORMAT_R32_FLOAT },
221 { PIPE_FORMAT_R32G32_USCALED, PIPE_FORMAT_R32G32_FLOAT },
222 { PIPE_FORMAT_R32G32B32_USCALED, PIPE_FORMAT_R32G32B32_FLOAT },
223 { PIPE_FORMAT_R32G32B32A32_USCALED, PIPE_FORMAT_R32G32B32A32_FLOAT },
224 { PIPE_FORMAT_R32_SSCALED, PIPE_FORMAT_R32_FLOAT },
225 { PIPE_FORMAT_R32G32_SSCALED, PIPE_FORMAT_R32G32_FLOAT },
226 { PIPE_FORMAT_R32G32B32_SSCALED, PIPE_FORMAT_R32G32B32_FLOAT },
227 { PIPE_FORMAT_R32G32B32A32_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT },
228 { PIPE_FORMAT_R16_UNORM, PIPE_FORMAT_R32_FLOAT },
229 { PIPE_FORMAT_R16G16_UNORM, PIPE_FORMAT_R32G32_FLOAT },
230 { PIPE_FORMAT_R16G16B16_UNORM, PIPE_FORMAT_R32G32B32_FLOAT },
231 { PIPE_FORMAT_R16G16B16A16_UNORM, PIPE_FORMAT_R32G32B32A32_FLOAT },
232 { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R32_FLOAT },
233 { PIPE_FORMAT_R16G16_SNORM, PIPE_FORMAT_R32G32_FLOAT },
234 { PIPE_FORMAT_R16G16B16_SNORM, PIPE_FORMAT_R32G32B32_FLOAT },
235 { PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R32G32B32A32_FLOAT },
236 { PIPE_FORMAT_R16_USCALED, PIPE_FORMAT_R32_FLOAT },
237 { PIPE_FORMAT_R16G16_USCALED, PIPE_FORMAT_R32G32_FLOAT },
238 { PIPE_FORMAT_R16G16B16_USCALED, PIPE_FORMAT_R32G32B32_FLOAT },
239 { PIPE_FORMAT_R16G16B16A16_USCALED, PIPE_FORMAT_R32G32B32A32_FLOAT },
240 { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R32_FLOAT },
241 { PIPE_FORMAT_R16G16_SSCALED, PIPE_FORMAT_R32G32_FLOAT },
242 { PIPE_FORMAT_R16G16B16_SSCALED, PIPE_FORMAT_R32G32B32_FLOAT },
243 { PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT },
244 { PIPE_FORMAT_R8_UNORM, PIPE_FORMAT_R32_FLOAT },
245 { PIPE_FORMAT_R8G8_UNORM, PIPE_FORMAT_R32G32_FLOAT },
246 { PIPE_FORMAT_R8G8B8_UNORM, PIPE_FORMAT_R32G32B32_FLOAT },
247 { PIPE_FORMAT_R8G8B8A8_UNORM, PIPE_FORMAT_R32G32B32A32_FLOAT },
248 { PIPE_FORMAT_R8_SNORM, PIPE_FORMAT_R32_FLOAT },
249 { PIPE_FORMAT_R8G8_SNORM, PIPE_FORMAT_R32G32_FLOAT },
250 { PIPE_FORMAT_R8G8B8_SNORM, PIPE_FORMAT_R32G32B32_FLOAT },
251 { PIPE_FORMAT_R8G8B8A8_SNORM, PIPE_FORMAT_R32G32B32A32_FLOAT },
252 { PIPE_FORMAT_R8_USCALED, PIPE_FORMAT_R32_FLOAT },
253 { PIPE_FORMAT_R8G8_USCALED, PIPE_FORMAT_R32G32_FLOAT },
254 { PIPE_FORMAT_R8G8B8_USCALED, PIPE_FORMAT_R32G32B32_FLOAT },
255 { PIPE_FORMAT_R8G8B8A8_USCALED, PIPE_FORMAT_R32G32B32A32_FLOAT },
256 { PIPE_FORMAT_R8_SSCALED, PIPE_FORMAT_R32_FLOAT },
257 { PIPE_FORMAT_R8G8_SSCALED, PIPE_FORMAT_R32G32_FLOAT },
258 { PIPE_FORMAT_R8G8B8_SSCALED, PIPE_FORMAT_R32G32B32_FLOAT },
259 { PIPE_FORMAT_R8G8B8A8_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT },
260 };
261
u_vbuf_get_caps(struct pipe_screen * screen,struct u_vbuf_caps * caps,bool needs64b)262 void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps,
263 bool needs64b)
264 {
265 unsigned i;
266
267 memset(caps, 0, sizeof(*caps));
268
269 /* I'd rather have a bitfield of which formats are supported and a static
270 * table of the translations indexed by format, but since we don't have C99
271 * we can't easily make a sparsely-populated table indexed by format. So,
272 * we construct the sparse table here.
273 */
274 for (i = 0; i < PIPE_FORMAT_COUNT; i++)
275 caps->format_translation[i] = i;
276
277 for (i = 0; i < ARRAY_SIZE(vbuf_format_fallbacks); i++) {
278 enum pipe_format format = vbuf_format_fallbacks[i].from;
279 unsigned comp_bits = util_format_get_component_bits(format, 0, 0);
280
281 if ((comp_bits > 32) && !needs64b)
282 continue;
283
284 if (!screen->is_format_supported(screen, format, PIPE_BUFFER, 0, 0,
285 PIPE_BIND_VERTEX_BUFFER)) {
286 caps->format_translation[format] = vbuf_format_fallbacks[i].to;
287 caps->fallback_always = true;
288 }
289 }
290
291 caps->buffer_offset_unaligned =
292 !screen->get_param(screen,
293 PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY);
294 caps->buffer_stride_unaligned =
295 !screen->get_param(screen,
296 PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY);
297 caps->velem_src_offset_unaligned =
298 !screen->get_param(screen,
299 PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY);
300 caps->user_vertex_buffers =
301 screen->get_param(screen, PIPE_CAP_USER_VERTEX_BUFFERS);
302 caps->max_vertex_buffers =
303 screen->get_param(screen, PIPE_CAP_MAX_VERTEX_BUFFERS);
304
305 /* OpenGL 2.0 requires a minimum of 16 vertex buffers */
306 if (caps->max_vertex_buffers < 16)
307 caps->fallback_always = true;
308
309 if (!caps->buffer_offset_unaligned ||
310 !caps->buffer_stride_unaligned ||
311 !caps->velem_src_offset_unaligned)
312 caps->fallback_always = true;
313
314 if (!caps->fallback_always && !caps->user_vertex_buffers)
315 caps->fallback_only_for_user_vbuffers = true;
316 }
317
318 struct u_vbuf *
u_vbuf_create(struct pipe_context * pipe,struct u_vbuf_caps * caps)319 u_vbuf_create(struct pipe_context *pipe, struct u_vbuf_caps *caps)
320 {
321 struct u_vbuf *mgr = CALLOC_STRUCT(u_vbuf);
322
323 mgr->caps = *caps;
324 mgr->pipe = pipe;
325 mgr->cso_cache = cso_cache_create();
326 mgr->translate_cache = translate_cache_create();
327 memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs));
328 mgr->allowed_vb_mask = u_bit_consecutive(0, mgr->caps.max_vertex_buffers);
329
330 mgr->has_signed_vb_offset =
331 pipe->screen->get_param(pipe->screen,
332 PIPE_CAP_SIGNED_VERTEX_BUFFER_OFFSET);
333
334 return mgr;
335 }
336
337 /* u_vbuf uses its own caching for vertex elements, because it needs to keep
338 * its own preprocessed state per vertex element CSO. */
339 static struct u_vbuf_elements *
u_vbuf_set_vertex_elements_internal(struct u_vbuf * mgr,const struct cso_velems_state * velems)340 u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr,
341 const struct cso_velems_state *velems)
342 {
343 struct pipe_context *pipe = mgr->pipe;
344 unsigned key_size, hash_key;
345 struct cso_hash_iter iter;
346 struct u_vbuf_elements *ve;
347
348 /* need to include the count into the stored state data too. */
349 key_size = sizeof(struct pipe_vertex_element) * velems->count +
350 sizeof(unsigned);
351 hash_key = cso_construct_key((void*)velems, key_size);
352 iter = cso_find_state_template(mgr->cso_cache, hash_key, CSO_VELEMENTS,
353 (void*)velems, key_size);
354
355 if (cso_hash_iter_is_null(iter)) {
356 struct cso_velements *cso = MALLOC_STRUCT(cso_velements);
357 memcpy(&cso->state, velems, key_size);
358 cso->data = u_vbuf_create_vertex_elements(mgr, velems->count,
359 velems->velems);
360 cso->delete_state = (cso_state_callback)u_vbuf_delete_vertex_elements;
361 cso->context = (void*)mgr;
362
363 iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso);
364 ve = cso->data;
365 } else {
366 ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
367 }
368
369 assert(ve);
370
371 if (ve != mgr->ve)
372 pipe->bind_vertex_elements_state(pipe, ve->driver_cso);
373
374 return ve;
375 }
376
u_vbuf_set_vertex_elements(struct u_vbuf * mgr,const struct cso_velems_state * velems)377 void u_vbuf_set_vertex_elements(struct u_vbuf *mgr,
378 const struct cso_velems_state *velems)
379 {
380 mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, velems);
381 }
382
u_vbuf_unset_vertex_elements(struct u_vbuf * mgr)383 void u_vbuf_unset_vertex_elements(struct u_vbuf *mgr)
384 {
385 mgr->ve = NULL;
386 }
387
u_vbuf_destroy(struct u_vbuf * mgr)388 void u_vbuf_destroy(struct u_vbuf *mgr)
389 {
390 struct pipe_screen *screen = mgr->pipe->screen;
391 unsigned i;
392 const unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX,
393 PIPE_SHADER_CAP_MAX_INPUTS);
394
395 mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL);
396
397 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
398 pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
399 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
400 pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]);
401
402 pipe_vertex_buffer_unreference(&mgr->vertex_buffer0_saved);
403
404 translate_cache_destroy(mgr->translate_cache);
405 cso_cache_delete(mgr->cso_cache);
406 FREE(mgr);
407 }
408
409 static enum pipe_error
u_vbuf_translate_buffers(struct u_vbuf * mgr,struct translate_key * key,const struct pipe_draw_info * info,unsigned vb_mask,unsigned out_vb,int start_vertex,unsigned num_vertices,int min_index,boolean unroll_indices)410 u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
411 const struct pipe_draw_info *info,
412 unsigned vb_mask, unsigned out_vb,
413 int start_vertex, unsigned num_vertices,
414 int min_index, boolean unroll_indices)
415 {
416 struct translate *tr;
417 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
418 struct pipe_resource *out_buffer = NULL;
419 uint8_t *out_map;
420 unsigned out_offset, mask;
421
422 /* Get a translate object. */
423 tr = translate_cache_find(mgr->translate_cache, key);
424
425 /* Map buffers we want to translate. */
426 mask = vb_mask;
427 while (mask) {
428 struct pipe_vertex_buffer *vb;
429 unsigned offset;
430 uint8_t *map;
431 unsigned i = u_bit_scan(&mask);
432
433 vb = &mgr->vertex_buffer[i];
434 offset = vb->buffer_offset + vb->stride * start_vertex;
435
436 if (vb->is_user_buffer) {
437 map = (uint8_t*)vb->buffer.user + offset;
438 } else {
439 unsigned size = vb->stride ? num_vertices * vb->stride
440 : sizeof(double)*4;
441
442 if (!vb->buffer.resource)
443 continue;
444
445 if (offset + size > vb->buffer.resource->width0) {
446 /* Don't try to map past end of buffer. This often happens when
447 * we're translating an attribute that's at offset > 0 from the
448 * start of the vertex. If we'd subtract attrib's offset from
449 * the size, this probably wouldn't happen.
450 */
451 size = vb->buffer.resource->width0 - offset;
452
453 /* Also adjust num_vertices. A common user error is to call
454 * glDrawRangeElements() with incorrect 'end' argument. The 'end
455 * value should be the max index value, but people often
456 * accidentally add one to this value. This adjustment avoids
457 * crashing (by reading past the end of a hardware buffer mapping)
458 * when people do that.
459 */
460 num_vertices = (size + vb->stride - 1) / vb->stride;
461 }
462
463 map = pipe_buffer_map_range(mgr->pipe, vb->buffer.resource, offset, size,
464 PIPE_MAP_READ, &vb_transfer[i]);
465 }
466
467 /* Subtract min_index so that indexing with the index buffer works. */
468 if (unroll_indices) {
469 map -= (ptrdiff_t)vb->stride * min_index;
470 }
471
472 tr->set_buffer(tr, i, map, vb->stride, info->max_index);
473 }
474
475 /* Translate. */
476 if (unroll_indices) {
477 struct pipe_transfer *transfer = NULL;
478 const unsigned offset = info->start * info->index_size;
479 uint8_t *map;
480
481 /* Create and map the output buffer. */
482 u_upload_alloc(mgr->pipe->stream_uploader, 0,
483 key->output_stride * info->count, 4,
484 &out_offset, &out_buffer,
485 (void**)&out_map);
486 if (!out_buffer)
487 return PIPE_ERROR_OUT_OF_MEMORY;
488
489 if (info->has_user_indices) {
490 map = (uint8_t*)info->index.user + offset;
491 } else {
492 map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset,
493 info->count * info->index_size,
494 PIPE_MAP_READ, &transfer);
495 }
496
497 switch (info->index_size) {
498 case 4:
499 tr->run_elts(tr, (unsigned*)map, info->count, 0, 0, out_map);
500 break;
501 case 2:
502 tr->run_elts16(tr, (uint16_t*)map, info->count, 0, 0, out_map);
503 break;
504 case 1:
505 tr->run_elts8(tr, map, info->count, 0, 0, out_map);
506 break;
507 }
508
509 if (transfer) {
510 pipe_buffer_unmap(mgr->pipe, transfer);
511 }
512 } else {
513 /* Create and map the output buffer. */
514 u_upload_alloc(mgr->pipe->stream_uploader,
515 mgr->has_signed_vb_offset ?
516 0 : key->output_stride * start_vertex,
517 key->output_stride * num_vertices, 4,
518 &out_offset, &out_buffer,
519 (void**)&out_map);
520 if (!out_buffer)
521 return PIPE_ERROR_OUT_OF_MEMORY;
522
523 out_offset -= key->output_stride * start_vertex;
524
525 tr->run(tr, 0, num_vertices, 0, 0, out_map);
526 }
527
528 /* Unmap all buffers. */
529 mask = vb_mask;
530 while (mask) {
531 unsigned i = u_bit_scan(&mask);
532
533 if (vb_transfer[i]) {
534 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]);
535 }
536 }
537
538 /* Setup the new vertex buffer. */
539 mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset;
540 mgr->real_vertex_buffer[out_vb].stride = key->output_stride;
541
542 /* Move the buffer reference. */
543 pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[out_vb]);
544 mgr->real_vertex_buffer[out_vb].buffer.resource = out_buffer;
545 mgr->real_vertex_buffer[out_vb].is_user_buffer = false;
546
547 return PIPE_OK;
548 }
549
550 static boolean
u_vbuf_translate_find_free_vb_slots(struct u_vbuf * mgr,unsigned mask[VB_NUM])551 u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
552 unsigned mask[VB_NUM])
553 {
554 unsigned type;
555 unsigned fallback_vbs[VB_NUM];
556 /* Set the bit for each buffer which is incompatible, or isn't set. */
557 uint32_t unused_vb_mask =
558 mgr->ve->incompatible_vb_mask_all | mgr->incompatible_vb_mask |
559 ~mgr->enabled_vb_mask;
560 uint32_t unused_vb_mask_orig;
561 boolean insufficient_buffers = false;
562
563 /* No vertex buffers available at all */
564 if (!unused_vb_mask)
565 return FALSE;
566
567 memset(fallback_vbs, ~0, sizeof(fallback_vbs));
568 mgr->fallback_vbs_mask = 0;
569
570 /* Find free slots for each type if needed. */
571 unused_vb_mask_orig = unused_vb_mask;
572 for (type = 0; type < VB_NUM; type++) {
573 if (mask[type]) {
574 uint32_t index;
575
576 if (!unused_vb_mask) {
577 insufficient_buffers = true;
578 break;
579 }
580
581 index = ffs(unused_vb_mask) - 1;
582 fallback_vbs[type] = index;
583 mgr->fallback_vbs_mask |= 1 << index;
584 unused_vb_mask &= ~(1 << index);
585 /*printf("found slot=%i for type=%i\n", index, type);*/
586 }
587 }
588
589 if (insufficient_buffers) {
590 /* not enough vbs for all types supported by the hardware, they will have to share one
591 * buffer */
592 uint32_t index = ffs(unused_vb_mask_orig) - 1;
593 /* When sharing one vertex buffer use per-vertex frequency for everything. */
594 fallback_vbs[VB_VERTEX] = index;
595 mgr->fallback_vbs_mask = 1 << index;
596 mask[VB_VERTEX] = mask[VB_VERTEX] | mask[VB_CONST] | mask[VB_INSTANCE];
597 mask[VB_CONST] = 0;
598 mask[VB_INSTANCE] = 0;
599 }
600
601 for (type = 0; type < VB_NUM; type++) {
602 if (mask[type]) {
603 mgr->dirty_real_vb_mask |= 1 << fallback_vbs[type];
604 }
605 }
606
607 memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs));
608 return TRUE;
609 }
610
611 static boolean
u_vbuf_translate_begin(struct u_vbuf * mgr,const struct pipe_draw_info * info,int start_vertex,unsigned num_vertices,int min_index,boolean unroll_indices)612 u_vbuf_translate_begin(struct u_vbuf *mgr,
613 const struct pipe_draw_info *info,
614 int start_vertex, unsigned num_vertices,
615 int min_index, boolean unroll_indices)
616 {
617 unsigned mask[VB_NUM] = {0};
618 struct translate_key key[VB_NUM];
619 unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */
620 unsigned i, type;
621 const unsigned incompatible_vb_mask = mgr->incompatible_vb_mask &
622 mgr->ve->used_vb_mask;
623
624 const int start[VB_NUM] = {
625 start_vertex, /* VERTEX */
626 info->start_instance, /* INSTANCE */
627 0 /* CONST */
628 };
629
630 const unsigned num[VB_NUM] = {
631 num_vertices, /* VERTEX */
632 info->instance_count, /* INSTANCE */
633 1 /* CONST */
634 };
635
636 memset(key, 0, sizeof(key));
637 memset(elem_index, ~0, sizeof(elem_index));
638
639 /* See if there are vertex attribs of each type to translate and
640 * which ones. */
641 for (i = 0; i < mgr->ve->count; i++) {
642 unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index;
643
644 if (!mgr->vertex_buffer[vb_index].stride) {
645 if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
646 !(incompatible_vb_mask & (1 << vb_index))) {
647 continue;
648 }
649 mask[VB_CONST] |= 1 << vb_index;
650 } else if (mgr->ve->ve[i].instance_divisor) {
651 if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
652 !(incompatible_vb_mask & (1 << vb_index))) {
653 continue;
654 }
655 mask[VB_INSTANCE] |= 1 << vb_index;
656 } else {
657 if (!unroll_indices &&
658 !(mgr->ve->incompatible_elem_mask & (1 << i)) &&
659 !(incompatible_vb_mask & (1 << vb_index))) {
660 continue;
661 }
662 mask[VB_VERTEX] |= 1 << vb_index;
663 }
664 }
665
666 assert(mask[VB_VERTEX] || mask[VB_INSTANCE] || mask[VB_CONST]);
667
668 /* Find free vertex buffer slots. */
669 if (!u_vbuf_translate_find_free_vb_slots(mgr, mask)) {
670 return FALSE;
671 }
672
673 /* Initialize the translate keys. */
674 for (i = 0; i < mgr->ve->count; i++) {
675 struct translate_key *k;
676 struct translate_element *te;
677 enum pipe_format output_format = mgr->ve->native_format[i];
678 unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index;
679 bit = 1 << vb_index;
680
681 if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
682 !(incompatible_vb_mask & (1 << vb_index)) &&
683 (!unroll_indices || !(mask[VB_VERTEX] & bit))) {
684 continue;
685 }
686
687 /* Set type to what we will translate.
688 * Whether vertex, instance, or constant attribs. */
689 for (type = 0; type < VB_NUM; type++) {
690 if (mask[type] & bit) {
691 break;
692 }
693 }
694 assert(type < VB_NUM);
695 if (mgr->ve->ve[i].src_format != output_format)
696 assert(translate_is_output_format_supported(output_format));
697 /*printf("velem=%i type=%i\n", i, type);*/
698
699 /* Add the vertex element. */
700 k = &key[type];
701 elem_index[type][i] = k->nr_elements;
702
703 te = &k->element[k->nr_elements];
704 te->type = TRANSLATE_ELEMENT_NORMAL;
705 te->instance_divisor = 0;
706 te->input_buffer = vb_index;
707 te->input_format = mgr->ve->ve[i].src_format;
708 te->input_offset = mgr->ve->ve[i].src_offset;
709 te->output_format = output_format;
710 te->output_offset = k->output_stride;
711
712 k->output_stride += mgr->ve->native_format_size[i];
713 k->nr_elements++;
714 }
715
716 /* Translate buffers. */
717 for (type = 0; type < VB_NUM; type++) {
718 if (key[type].nr_elements) {
719 enum pipe_error err;
720 err = u_vbuf_translate_buffers(mgr, &key[type], info, mask[type],
721 mgr->fallback_vbs[type],
722 start[type], num[type], min_index,
723 unroll_indices && type == VB_VERTEX);
724 if (err != PIPE_OK)
725 return FALSE;
726
727 /* Fixup the stride for constant attribs. */
728 if (type == VB_CONST) {
729 mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0;
730 }
731 }
732 }
733
734 /* Setup new vertex elements. */
735 for (i = 0; i < mgr->ve->count; i++) {
736 for (type = 0; type < VB_NUM; type++) {
737 if (elem_index[type][i] < key[type].nr_elements) {
738 struct translate_element *te = &key[type].element[elem_index[type][i]];
739 mgr->fallback_velems.velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor;
740 mgr->fallback_velems.velems[i].src_format = te->output_format;
741 mgr->fallback_velems.velems[i].src_offset = te->output_offset;
742 mgr->fallback_velems.velems[i].vertex_buffer_index = mgr->fallback_vbs[type];
743
744 /* elem_index[type][i] can only be set for one type. */
745 assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0u);
746 assert(type > VB_VERTEX || elem_index[type+2][i] == ~0u);
747 break;
748 }
749 }
750 /* No translating, just copy the original vertex element over. */
751 if (type == VB_NUM) {
752 memcpy(&mgr->fallback_velems.velems[i], &mgr->ve->ve[i],
753 sizeof(struct pipe_vertex_element));
754 }
755 }
756
757 mgr->fallback_velems.count = mgr->ve->count;
758
759 u_vbuf_set_vertex_elements_internal(mgr, &mgr->fallback_velems);
760 mgr->using_translate = TRUE;
761 return TRUE;
762 }
763
u_vbuf_translate_end(struct u_vbuf * mgr)764 static void u_vbuf_translate_end(struct u_vbuf *mgr)
765 {
766 unsigned i;
767
768 /* Restore vertex elements. */
769 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso);
770 mgr->using_translate = FALSE;
771
772 /* Unreference the now-unused VBOs. */
773 for (i = 0; i < VB_NUM; i++) {
774 unsigned vb = mgr->fallback_vbs[i];
775 if (vb != ~0u) {
776 pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer.resource, NULL);
777 mgr->fallback_vbs[i] = ~0;
778 }
779 }
780 /* This will cause the buffer to be unbound in the driver later. */
781 mgr->dirty_real_vb_mask |= mgr->fallback_vbs_mask;
782 mgr->fallback_vbs_mask = 0;
783 }
784
785 static void *
u_vbuf_create_vertex_elements(struct u_vbuf * mgr,unsigned count,const struct pipe_vertex_element * attribs)786 u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count,
787 const struct pipe_vertex_element *attribs)
788 {
789 struct pipe_context *pipe = mgr->pipe;
790 unsigned i;
791 struct pipe_vertex_element driver_attribs[PIPE_MAX_ATTRIBS];
792 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements);
793 uint32_t used_buffers = 0;
794
795 ve->count = count;
796
797 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count);
798 memcpy(driver_attribs, attribs, sizeof(struct pipe_vertex_element) * count);
799
800 /* Set the best native format in case the original format is not
801 * supported. */
802 for (i = 0; i < count; i++) {
803 enum pipe_format format = ve->ve[i].src_format;
804 unsigned vb_index_bit = 1 << ve->ve[i].vertex_buffer_index;
805
806 ve->src_format_size[i] = util_format_get_blocksize(format);
807
808 if (used_buffers & vb_index_bit)
809 ve->interleaved_vb_mask |= vb_index_bit;
810
811 used_buffers |= vb_index_bit;
812
813 if (!ve->ve[i].instance_divisor) {
814 ve->noninstance_vb_mask_any |= vb_index_bit;
815 }
816
817 format = mgr->caps.format_translation[format];
818
819 driver_attribs[i].src_format = format;
820 ve->native_format[i] = format;
821 ve->native_format_size[i] =
822 util_format_get_blocksize(ve->native_format[i]);
823
824 if (ve->ve[i].src_format != format ||
825 (!mgr->caps.velem_src_offset_unaligned &&
826 ve->ve[i].src_offset % 4 != 0)) {
827 ve->incompatible_elem_mask |= 1 << i;
828 ve->incompatible_vb_mask_any |= vb_index_bit;
829 } else {
830 ve->compatible_vb_mask_any |= vb_index_bit;
831 }
832 }
833
834 if (used_buffers & ~mgr->allowed_vb_mask) {
835 /* More vertex buffers are used than the hardware supports. In
836 * principle, we only need to make sure that less vertex buffers are
837 * used, and mark some of the latter vertex buffers as incompatible.
838 * For now, mark all vertex buffers as incompatible.
839 */
840 ve->incompatible_vb_mask_any = used_buffers;
841 ve->compatible_vb_mask_any = 0;
842 ve->incompatible_elem_mask = u_bit_consecutive(0, count);
843 }
844
845 ve->used_vb_mask = used_buffers;
846 ve->compatible_vb_mask_all = ~ve->incompatible_vb_mask_any & used_buffers;
847 ve->incompatible_vb_mask_all = ~ve->compatible_vb_mask_any & used_buffers;
848
849 /* Align the formats and offsets to the size of DWORD if needed. */
850 if (!mgr->caps.velem_src_offset_unaligned) {
851 for (i = 0; i < count; i++) {
852 ve->native_format_size[i] = align(ve->native_format_size[i], 4);
853 driver_attribs[i].src_offset = align(ve->ve[i].src_offset, 4);
854 }
855 }
856
857 /* Only create driver CSO if no incompatible elements */
858 if (!ve->incompatible_elem_mask) {
859 ve->driver_cso =
860 pipe->create_vertex_elements_state(pipe, count, driver_attribs);
861 }
862
863 return ve;
864 }
865
u_vbuf_delete_vertex_elements(struct u_vbuf * mgr,void * cso)866 static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso)
867 {
868 struct pipe_context *pipe = mgr->pipe;
869 struct u_vbuf_elements *ve = cso;
870
871 if (ve->driver_cso)
872 pipe->delete_vertex_elements_state(pipe, ve->driver_cso);
873 FREE(ve);
874 }
875
u_vbuf_set_vertex_buffers(struct u_vbuf * mgr,unsigned start_slot,unsigned count,const struct pipe_vertex_buffer * bufs)876 void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
877 unsigned start_slot, unsigned count,
878 const struct pipe_vertex_buffer *bufs)
879 {
880 unsigned i;
881 /* which buffers are enabled */
882 uint32_t enabled_vb_mask = 0;
883 /* which buffers are in user memory */
884 uint32_t user_vb_mask = 0;
885 /* which buffers are incompatible with the driver */
886 uint32_t incompatible_vb_mask = 0;
887 /* which buffers have a non-zero stride */
888 uint32_t nonzero_stride_vb_mask = 0;
889 const uint32_t mask = ~(((1ull << count) - 1) << start_slot);
890
891 /* Zero out the bits we are going to rewrite completely. */
892 mgr->user_vb_mask &= mask;
893 mgr->incompatible_vb_mask &= mask;
894 mgr->nonzero_stride_vb_mask &= mask;
895 mgr->enabled_vb_mask &= mask;
896
897 if (!bufs) {
898 struct pipe_context *pipe = mgr->pipe;
899 /* Unbind. */
900 mgr->dirty_real_vb_mask &= mask;
901
902 for (i = 0; i < count; i++) {
903 unsigned dst_index = start_slot + i;
904
905 pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]);
906 pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[dst_index]);
907 }
908
909 pipe->set_vertex_buffers(pipe, start_slot, count, NULL);
910 return;
911 }
912
913 for (i = 0; i < count; i++) {
914 unsigned dst_index = start_slot + i;
915 const struct pipe_vertex_buffer *vb = &bufs[i];
916 struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[dst_index];
917 struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[dst_index];
918
919 if (!vb->buffer.resource) {
920 pipe_vertex_buffer_unreference(orig_vb);
921 pipe_vertex_buffer_unreference(real_vb);
922 continue;
923 }
924
925 pipe_vertex_buffer_reference(orig_vb, vb);
926
927 if (vb->stride) {
928 nonzero_stride_vb_mask |= 1 << dst_index;
929 }
930 enabled_vb_mask |= 1 << dst_index;
931
932 if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) ||
933 (!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) {
934 incompatible_vb_mask |= 1 << dst_index;
935 real_vb->buffer_offset = vb->buffer_offset;
936 real_vb->stride = vb->stride;
937 pipe_vertex_buffer_unreference(real_vb);
938 real_vb->is_user_buffer = false;
939 continue;
940 }
941
942 if (!mgr->caps.user_vertex_buffers && vb->is_user_buffer) {
943 user_vb_mask |= 1 << dst_index;
944 real_vb->buffer_offset = vb->buffer_offset;
945 real_vb->stride = vb->stride;
946 pipe_vertex_buffer_unreference(real_vb);
947 real_vb->is_user_buffer = false;
948 continue;
949 }
950
951 pipe_vertex_buffer_reference(real_vb, vb);
952 }
953
954 mgr->user_vb_mask |= user_vb_mask;
955 mgr->incompatible_vb_mask |= incompatible_vb_mask;
956 mgr->nonzero_stride_vb_mask |= nonzero_stride_vb_mask;
957 mgr->enabled_vb_mask |= enabled_vb_mask;
958
959 /* All changed buffers are marked as dirty, even the NULL ones,
960 * which will cause the NULL buffers to be unbound in the driver later. */
961 mgr->dirty_real_vb_mask |= ~mask;
962 }
963
964 static ALWAYS_INLINE bool
get_upload_offset_size(struct u_vbuf * mgr,const struct pipe_vertex_buffer * vb,struct u_vbuf_elements * ve,const struct pipe_vertex_element * velem,unsigned vb_index,unsigned velem_index,int start_vertex,unsigned num_vertices,int start_instance,unsigned num_instances,unsigned * offset,unsigned * size)965 get_upload_offset_size(struct u_vbuf *mgr,
966 const struct pipe_vertex_buffer *vb,
967 struct u_vbuf_elements *ve,
968 const struct pipe_vertex_element *velem,
969 unsigned vb_index, unsigned velem_index,
970 int start_vertex, unsigned num_vertices,
971 int start_instance, unsigned num_instances,
972 unsigned *offset, unsigned *size)
973 {
974 /* Skip the buffers generated by translate. */
975 if ((1 << vb_index) & mgr->fallback_vbs_mask || !vb->is_user_buffer)
976 return false;
977
978 unsigned instance_div = velem->instance_divisor;
979 *offset = vb->buffer_offset + velem->src_offset;
980
981 if (!vb->stride) {
982 /* Constant attrib. */
983 *size = ve->src_format_size[velem_index];
984 } else if (instance_div) {
985 /* Per-instance attrib. */
986
987 /* Figure out how many instances we'll render given instance_div. We
988 * can't use the typical div_round_up() pattern because the CTS uses
989 * instance_div = ~0 for a test, which overflows div_round_up()'s
990 * addition.
991 */
992 unsigned count = num_instances / instance_div;
993 if (count * instance_div != num_instances)
994 count++;
995
996 *offset += vb->stride * start_instance;
997 *size = vb->stride * (count - 1) + ve->src_format_size[velem_index];
998 } else {
999 /* Per-vertex attrib. */
1000 *offset += vb->stride * start_vertex;
1001 *size = vb->stride * (num_vertices - 1) + ve->src_format_size[velem_index];
1002 }
1003 return true;
1004 }
1005
1006
1007 static enum pipe_error
u_vbuf_upload_buffers(struct u_vbuf * mgr,int start_vertex,unsigned num_vertices,int start_instance,unsigned num_instances)1008 u_vbuf_upload_buffers(struct u_vbuf *mgr,
1009 int start_vertex, unsigned num_vertices,
1010 int start_instance, unsigned num_instances)
1011 {
1012 unsigned i;
1013 struct u_vbuf_elements *ve = mgr->ve;
1014 unsigned nr_velems = ve->count;
1015 const struct pipe_vertex_element *velems =
1016 mgr->using_translate ? mgr->fallback_velems.velems : ve->ve;
1017
1018 /* Faster path when no vertex attribs are interleaved. */
1019 if ((ve->interleaved_vb_mask & mgr->user_vb_mask) == 0) {
1020 for (i = 0; i < nr_velems; i++) {
1021 const struct pipe_vertex_element *velem = &velems[i];
1022 unsigned index = velem->vertex_buffer_index;
1023 struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index];
1024 unsigned offset, size;
1025
1026 if (!get_upload_offset_size(mgr, vb, ve, velem, index, i, start_vertex,
1027 num_vertices, start_instance, num_instances,
1028 &offset, &size))
1029 continue;
1030
1031 struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[index];
1032 const uint8_t *ptr = mgr->vertex_buffer[index].buffer.user;
1033
1034 u_upload_data(mgr->pipe->stream_uploader,
1035 mgr->has_signed_vb_offset ? 0 : offset,
1036 size, 4, ptr + offset, &real_vb->buffer_offset,
1037 &real_vb->buffer.resource);
1038 if (!real_vb->buffer.resource)
1039 return PIPE_ERROR_OUT_OF_MEMORY;
1040
1041 real_vb->buffer_offset -= offset;
1042 }
1043 return PIPE_OK;
1044 }
1045
1046 unsigned start_offset[PIPE_MAX_ATTRIBS];
1047 unsigned end_offset[PIPE_MAX_ATTRIBS];
1048 uint32_t buffer_mask = 0;
1049
1050 /* Slower path supporting interleaved vertex attribs using 2 loops. */
1051 /* Determine how much data needs to be uploaded. */
1052 for (i = 0; i < nr_velems; i++) {
1053 const struct pipe_vertex_element *velem = &velems[i];
1054 unsigned index = velem->vertex_buffer_index;
1055 struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index];
1056 unsigned first, size, index_bit;
1057
1058 if (!get_upload_offset_size(mgr, vb, ve, velem, index, i, start_vertex,
1059 num_vertices, start_instance, num_instances,
1060 &first, &size))
1061 continue;
1062
1063 index_bit = 1 << index;
1064
1065 /* Update offsets. */
1066 if (!(buffer_mask & index_bit)) {
1067 start_offset[index] = first;
1068 end_offset[index] = first + size;
1069 } else {
1070 if (first < start_offset[index])
1071 start_offset[index] = first;
1072 if (first + size > end_offset[index])
1073 end_offset[index] = first + size;
1074 }
1075
1076 buffer_mask |= index_bit;
1077 }
1078
1079 /* Upload buffers. */
1080 while (buffer_mask) {
1081 unsigned start, end;
1082 struct pipe_vertex_buffer *real_vb;
1083 const uint8_t *ptr;
1084
1085 i = u_bit_scan(&buffer_mask);
1086
1087 start = start_offset[i];
1088 end = end_offset[i];
1089 assert(start < end);
1090
1091 real_vb = &mgr->real_vertex_buffer[i];
1092 ptr = mgr->vertex_buffer[i].buffer.user;
1093
1094 u_upload_data(mgr->pipe->stream_uploader,
1095 mgr->has_signed_vb_offset ? 0 : start,
1096 end - start, 4,
1097 ptr + start, &real_vb->buffer_offset, &real_vb->buffer.resource);
1098 if (!real_vb->buffer.resource)
1099 return PIPE_ERROR_OUT_OF_MEMORY;
1100
1101 real_vb->buffer_offset -= start;
1102 }
1103
1104 return PIPE_OK;
1105 }
1106
u_vbuf_need_minmax_index(const struct u_vbuf * mgr)1107 static boolean u_vbuf_need_minmax_index(const struct u_vbuf *mgr)
1108 {
1109 /* See if there are any per-vertex attribs which will be uploaded or
1110 * translated. Use bitmasks to get the info instead of looping over vertex
1111 * elements. */
1112 return (mgr->ve->used_vb_mask &
1113 ((mgr->user_vb_mask |
1114 mgr->incompatible_vb_mask |
1115 mgr->ve->incompatible_vb_mask_any) &
1116 mgr->ve->noninstance_vb_mask_any &
1117 mgr->nonzero_stride_vb_mask)) != 0;
1118 }
1119
u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf * mgr)1120 static boolean u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf *mgr)
1121 {
1122 /* Return true if there are hw buffers which don't need to be translated.
1123 *
1124 * We could query whether each buffer is busy, but that would
1125 * be way more costly than this. */
1126 return (mgr->ve->used_vb_mask &
1127 (~mgr->user_vb_mask &
1128 ~mgr->incompatible_vb_mask &
1129 mgr->ve->compatible_vb_mask_all &
1130 mgr->ve->noninstance_vb_mask_any &
1131 mgr->nonzero_stride_vb_mask)) != 0;
1132 }
1133
1134 static void
u_vbuf_get_minmax_index_mapped(const struct pipe_draw_info * info,const void * indices,unsigned * out_min_index,unsigned * out_max_index)1135 u_vbuf_get_minmax_index_mapped(const struct pipe_draw_info *info,
1136 const void *indices, unsigned *out_min_index,
1137 unsigned *out_max_index)
1138 {
1139 if (!info->count) {
1140 *out_min_index = 0;
1141 *out_max_index = 0;
1142 return;
1143 }
1144
1145 switch (info->index_size) {
1146 case 4: {
1147 const unsigned *ui_indices = (const unsigned*)indices;
1148 unsigned max = 0;
1149 unsigned min = ~0u;
1150 if (info->primitive_restart) {
1151 for (unsigned i = 0; i < info->count; i++) {
1152 if (ui_indices[i] != info->restart_index) {
1153 if (ui_indices[i] > max) max = ui_indices[i];
1154 if (ui_indices[i] < min) min = ui_indices[i];
1155 }
1156 }
1157 }
1158 else {
1159 for (unsigned i = 0; i < info->count; i++) {
1160 if (ui_indices[i] > max) max = ui_indices[i];
1161 if (ui_indices[i] < min) min = ui_indices[i];
1162 }
1163 }
1164 *out_min_index = min;
1165 *out_max_index = max;
1166 break;
1167 }
1168 case 2: {
1169 const unsigned short *us_indices = (const unsigned short*)indices;
1170 unsigned short max = 0;
1171 unsigned short min = ~((unsigned short)0);
1172 if (info->primitive_restart) {
1173 for (unsigned i = 0; i < info->count; i++) {
1174 if (us_indices[i] != info->restart_index) {
1175 if (us_indices[i] > max) max = us_indices[i];
1176 if (us_indices[i] < min) min = us_indices[i];
1177 }
1178 }
1179 }
1180 else {
1181 for (unsigned i = 0; i < info->count; i++) {
1182 if (us_indices[i] > max) max = us_indices[i];
1183 if (us_indices[i] < min) min = us_indices[i];
1184 }
1185 }
1186 *out_min_index = min;
1187 *out_max_index = max;
1188 break;
1189 }
1190 case 1: {
1191 const unsigned char *ub_indices = (const unsigned char*)indices;
1192 unsigned char max = 0;
1193 unsigned char min = ~((unsigned char)0);
1194 if (info->primitive_restart) {
1195 for (unsigned i = 0; i < info->count; i++) {
1196 if (ub_indices[i] != info->restart_index) {
1197 if (ub_indices[i] > max) max = ub_indices[i];
1198 if (ub_indices[i] < min) min = ub_indices[i];
1199 }
1200 }
1201 }
1202 else {
1203 for (unsigned i = 0; i < info->count; i++) {
1204 if (ub_indices[i] > max) max = ub_indices[i];
1205 if (ub_indices[i] < min) min = ub_indices[i];
1206 }
1207 }
1208 *out_min_index = min;
1209 *out_max_index = max;
1210 break;
1211 }
1212 default:
1213 unreachable("bad index size");
1214 }
1215 }
1216
u_vbuf_get_minmax_index(struct pipe_context * pipe,const struct pipe_draw_info * info,unsigned * out_min_index,unsigned * out_max_index)1217 void u_vbuf_get_minmax_index(struct pipe_context *pipe,
1218 const struct pipe_draw_info *info,
1219 unsigned *out_min_index, unsigned *out_max_index)
1220 {
1221 struct pipe_transfer *transfer = NULL;
1222 const void *indices;
1223
1224 if (info->has_user_indices) {
1225 indices = (uint8_t*)info->index.user +
1226 info->start * info->index_size;
1227 } else {
1228 indices = pipe_buffer_map_range(pipe, info->index.resource,
1229 info->start * info->index_size,
1230 info->count * info->index_size,
1231 PIPE_MAP_READ, &transfer);
1232 }
1233
1234 u_vbuf_get_minmax_index_mapped(info, indices, out_min_index, out_max_index);
1235
1236 if (transfer) {
1237 pipe_buffer_unmap(pipe, transfer);
1238 }
1239 }
1240
u_vbuf_set_driver_vertex_buffers(struct u_vbuf * mgr)1241 static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf *mgr)
1242 {
1243 struct pipe_context *pipe = mgr->pipe;
1244 unsigned start_slot, count;
1245
1246 start_slot = ffs(mgr->dirty_real_vb_mask) - 1;
1247 count = util_last_bit(mgr->dirty_real_vb_mask >> start_slot);
1248
1249 pipe->set_vertex_buffers(pipe, start_slot, count,
1250 mgr->real_vertex_buffer + start_slot);
1251 mgr->dirty_real_vb_mask = 0;
1252 }
1253
1254 static void
u_vbuf_split_indexed_multidraw(struct u_vbuf * mgr,struct pipe_draw_info * info,unsigned * indirect_data,unsigned stride,unsigned draw_count)1255 u_vbuf_split_indexed_multidraw(struct u_vbuf *mgr, struct pipe_draw_info *info,
1256 unsigned *indirect_data, unsigned stride,
1257 unsigned draw_count)
1258 {
1259 assert(info->index_size);
1260 info->indirect = NULL;
1261
1262 for (unsigned i = 0; i < draw_count; i++) {
1263 unsigned offset = i * stride / 4;
1264
1265 info->count = indirect_data[offset + 0];
1266 info->instance_count = indirect_data[offset + 1];
1267
1268 if (!info->count || !info->instance_count)
1269 continue;
1270
1271 info->start = indirect_data[offset + 2];
1272 info->index_bias = indirect_data[offset + 3];
1273 info->start_instance = indirect_data[offset + 4];
1274
1275 u_vbuf_draw_vbo(mgr, info);
1276 }
1277 }
1278
u_vbuf_draw_vbo(struct u_vbuf * mgr,const struct pipe_draw_info * info)1279 void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
1280 {
1281 struct pipe_context *pipe = mgr->pipe;
1282 int start_vertex;
1283 unsigned min_index;
1284 unsigned num_vertices;
1285 boolean unroll_indices = FALSE;
1286 const uint32_t used_vb_mask = mgr->ve->used_vb_mask;
1287 uint32_t user_vb_mask = mgr->user_vb_mask & used_vb_mask;
1288 const uint32_t incompatible_vb_mask =
1289 mgr->incompatible_vb_mask & used_vb_mask;
1290 struct pipe_draw_info new_info;
1291
1292 /* Normal draw. No fallback and no user buffers. */
1293 if (!incompatible_vb_mask &&
1294 !mgr->ve->incompatible_elem_mask &&
1295 !user_vb_mask) {
1296
1297 /* Set vertex buffers if needed. */
1298 if (mgr->dirty_real_vb_mask & used_vb_mask) {
1299 u_vbuf_set_driver_vertex_buffers(mgr);
1300 }
1301
1302 pipe->draw_vbo(pipe, info);
1303 return;
1304 }
1305
1306 new_info = *info;
1307
1308 /* Handle indirect (multi)draws. */
1309 if (new_info.indirect) {
1310 const struct pipe_draw_indirect_info *indirect = new_info.indirect;
1311 unsigned draw_count = 0;
1312
1313 /* Get the number of draws. */
1314 if (indirect->indirect_draw_count) {
1315 pipe_buffer_read(pipe, indirect->indirect_draw_count,
1316 indirect->indirect_draw_count_offset,
1317 4, &draw_count);
1318 } else {
1319 draw_count = indirect->draw_count;
1320 }
1321
1322 if (!draw_count)
1323 return;
1324
1325 unsigned data_size = (draw_count - 1) * indirect->stride +
1326 (new_info.index_size ? 20 : 16);
1327 unsigned *data = malloc(data_size);
1328 if (!data)
1329 return; /* report an error? */
1330
1331 /* Read the used buffer range only once, because the read can be
1332 * uncached.
1333 */
1334 pipe_buffer_read(pipe, indirect->buffer, indirect->offset, data_size,
1335 data);
1336
1337 if (info->index_size) {
1338 /* Indexed multidraw. */
1339 unsigned index_bias0 = data[3];
1340 bool index_bias_same = true;
1341
1342 /* If we invoke the translate path, we have to split the multidraw. */
1343 if (incompatible_vb_mask ||
1344 mgr->ve->incompatible_elem_mask) {
1345 u_vbuf_split_indexed_multidraw(mgr, &new_info, data,
1346 indirect->stride, draw_count);
1347 free(data);
1348 return;
1349 }
1350
1351 /* See if index_bias is the same for all draws. */
1352 for (unsigned i = 1; i < draw_count; i++) {
1353 if (data[i * indirect->stride / 4 + 3] != index_bias0) {
1354 index_bias_same = false;
1355 break;
1356 }
1357 }
1358
1359 /* Split the multidraw if index_bias is different. */
1360 if (!index_bias_same) {
1361 u_vbuf_split_indexed_multidraw(mgr, &new_info, data,
1362 indirect->stride, draw_count);
1363 free(data);
1364 return;
1365 }
1366
1367 /* If we don't need to use the translate path and index_bias is
1368 * the same, we can process the multidraw with the time complexity
1369 * equal to 1 draw call (except for the index range computation).
1370 * We only need to compute the index range covering all draw calls
1371 * of the multidraw.
1372 *
1373 * The driver will not look at these values because indirect != NULL.
1374 * These values determine the user buffer bounds to upload.
1375 */
1376 new_info.index_bias = index_bias0;
1377 new_info.min_index = ~0u;
1378 new_info.max_index = 0;
1379 new_info.start_instance = ~0u;
1380 unsigned end_instance = 0;
1381
1382 struct pipe_transfer *transfer = NULL;
1383 const uint8_t *indices;
1384
1385 if (info->has_user_indices) {
1386 indices = (uint8_t*)info->index.user;
1387 } else {
1388 indices = (uint8_t*)pipe_buffer_map(pipe, info->index.resource,
1389 PIPE_MAP_READ, &transfer);
1390 }
1391
1392 for (unsigned i = 0; i < draw_count; i++) {
1393 unsigned offset = i * indirect->stride / 4;
1394 unsigned start = data[offset + 2];
1395 unsigned count = data[offset + 0];
1396 unsigned start_instance = data[offset + 4];
1397 unsigned instance_count = data[offset + 1];
1398
1399 if (!count || !instance_count)
1400 continue;
1401
1402 /* Update the ranges of instances. */
1403 new_info.start_instance = MIN2(new_info.start_instance,
1404 start_instance);
1405 end_instance = MAX2(end_instance, start_instance + instance_count);
1406
1407 /* Update the index range. */
1408 unsigned min, max;
1409 new_info.count = count; /* only used by get_minmax_index */
1410 u_vbuf_get_minmax_index_mapped(&new_info,
1411 indices +
1412 new_info.index_size * start,
1413 &min, &max);
1414
1415 new_info.min_index = MIN2(new_info.min_index, min);
1416 new_info.max_index = MAX2(new_info.max_index, max);
1417 }
1418 free(data);
1419
1420 if (transfer)
1421 pipe_buffer_unmap(pipe, transfer);
1422
1423 /* Set the final instance count. */
1424 new_info.instance_count = end_instance - new_info.start_instance;
1425
1426 if (new_info.start_instance == ~0u || !new_info.instance_count)
1427 return;
1428 } else {
1429 /* Non-indexed multidraw.
1430 *
1431 * Keep the draw call indirect and compute minimums & maximums,
1432 * which will determine the user buffer bounds to upload, but
1433 * the driver will not look at these values because indirect != NULL.
1434 *
1435 * This efficiently processes the multidraw with the time complexity
1436 * equal to 1 draw call.
1437 */
1438 new_info.start = ~0u;
1439 new_info.start_instance = ~0u;
1440 unsigned end_vertex = 0;
1441 unsigned end_instance = 0;
1442
1443 for (unsigned i = 0; i < draw_count; i++) {
1444 unsigned offset = i * indirect->stride / 4;
1445 unsigned start = data[offset + 2];
1446 unsigned count = data[offset + 0];
1447 unsigned start_instance = data[offset + 3];
1448 unsigned instance_count = data[offset + 1];
1449
1450 new_info.start = MIN2(new_info.start, start);
1451 new_info.start_instance = MIN2(new_info.start_instance,
1452 start_instance);
1453
1454 end_vertex = MAX2(end_vertex, start + count);
1455 end_instance = MAX2(end_instance, start_instance + instance_count);
1456 }
1457 free(data);
1458
1459 /* Set the final counts. */
1460 new_info.count = end_vertex - new_info.start;
1461 new_info.instance_count = end_instance - new_info.start_instance;
1462
1463 if (new_info.start == ~0u || !new_info.count || !new_info.instance_count)
1464 return;
1465 }
1466 }
1467
1468 if (new_info.index_size) {
1469 /* See if anything needs to be done for per-vertex attribs. */
1470 if (u_vbuf_need_minmax_index(mgr)) {
1471 unsigned max_index;
1472
1473 if (new_info.max_index != ~0u) {
1474 min_index = new_info.min_index;
1475 max_index = new_info.max_index;
1476 } else {
1477 u_vbuf_get_minmax_index(mgr->pipe, &new_info,
1478 &min_index, &max_index);
1479 }
1480
1481 assert(min_index <= max_index);
1482
1483 start_vertex = min_index + new_info.index_bias;
1484 num_vertices = max_index + 1 - min_index;
1485
1486 /* Primitive restart doesn't work when unrolling indices.
1487 * We would have to break this drawing operation into several ones. */
1488 /* Use some heuristic to see if unrolling indices improves
1489 * performance. */
1490 if (!info->indirect &&
1491 !new_info.primitive_restart &&
1492 util_is_vbo_upload_ratio_too_large(new_info.count, num_vertices) &&
1493 !u_vbuf_mapping_vertex_buffer_blocks(mgr)) {
1494 unroll_indices = TRUE;
1495 user_vb_mask &= ~(mgr->nonzero_stride_vb_mask &
1496 mgr->ve->noninstance_vb_mask_any);
1497 }
1498 } else {
1499 /* Nothing to do for per-vertex attribs. */
1500 start_vertex = 0;
1501 num_vertices = 0;
1502 min_index = 0;
1503 }
1504 } else {
1505 start_vertex = new_info.start;
1506 num_vertices = new_info.count;
1507 min_index = 0;
1508 }
1509
1510 /* Translate vertices with non-native layouts or formats. */
1511 if (unroll_indices ||
1512 incompatible_vb_mask ||
1513 mgr->ve->incompatible_elem_mask) {
1514 if (!u_vbuf_translate_begin(mgr, &new_info, start_vertex, num_vertices,
1515 min_index, unroll_indices)) {
1516 debug_warn_once("u_vbuf_translate_begin() failed");
1517 return;
1518 }
1519
1520 if (unroll_indices) {
1521 new_info.index_size = 0;
1522 new_info.index_bias = 0;
1523 new_info.min_index = 0;
1524 new_info.max_index = new_info.count - 1;
1525 new_info.start = 0;
1526 }
1527
1528 user_vb_mask &= ~(incompatible_vb_mask |
1529 mgr->ve->incompatible_vb_mask_all);
1530 }
1531
1532 /* Upload user buffers. */
1533 if (user_vb_mask) {
1534 if (u_vbuf_upload_buffers(mgr, start_vertex, num_vertices,
1535 new_info.start_instance,
1536 new_info.instance_count) != PIPE_OK) {
1537 debug_warn_once("u_vbuf_upload_buffers() failed");
1538 return;
1539 }
1540
1541 mgr->dirty_real_vb_mask |= user_vb_mask;
1542 }
1543
1544 /*
1545 if (unroll_indices) {
1546 printf("unrolling indices: start_vertex = %i, num_vertices = %i\n",
1547 start_vertex, num_vertices);
1548 util_dump_draw_info(stdout, info);
1549 printf("\n");
1550 }
1551
1552 unsigned i;
1553 for (i = 0; i < mgr->nr_vertex_buffers; i++) {
1554 printf("input %i: ", i);
1555 util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i);
1556 printf("\n");
1557 }
1558 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) {
1559 printf("real %i: ", i);
1560 util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i);
1561 printf("\n");
1562 }
1563 */
1564
1565 u_upload_unmap(pipe->stream_uploader);
1566 u_vbuf_set_driver_vertex_buffers(mgr);
1567
1568 pipe->draw_vbo(pipe, &new_info);
1569
1570 if (mgr->using_translate) {
1571 u_vbuf_translate_end(mgr);
1572 }
1573 }
1574
u_vbuf_save_vertex_elements(struct u_vbuf * mgr)1575 void u_vbuf_save_vertex_elements(struct u_vbuf *mgr)
1576 {
1577 assert(!mgr->ve_saved);
1578 mgr->ve_saved = mgr->ve;
1579 }
1580
u_vbuf_restore_vertex_elements(struct u_vbuf * mgr)1581 void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr)
1582 {
1583 if (mgr->ve != mgr->ve_saved) {
1584 struct pipe_context *pipe = mgr->pipe;
1585
1586 mgr->ve = mgr->ve_saved;
1587 pipe->bind_vertex_elements_state(pipe,
1588 mgr->ve ? mgr->ve->driver_cso : NULL);
1589 }
1590 mgr->ve_saved = NULL;
1591 }
1592
u_vbuf_save_vertex_buffer0(struct u_vbuf * mgr)1593 void u_vbuf_save_vertex_buffer0(struct u_vbuf *mgr)
1594 {
1595 pipe_vertex_buffer_reference(&mgr->vertex_buffer0_saved,
1596 &mgr->vertex_buffer[0]);
1597 }
1598
u_vbuf_restore_vertex_buffer0(struct u_vbuf * mgr)1599 void u_vbuf_restore_vertex_buffer0(struct u_vbuf *mgr)
1600 {
1601 u_vbuf_set_vertex_buffers(mgr, 0, 1, &mgr->vertex_buffer0_saved);
1602 pipe_vertex_buffer_unreference(&mgr->vertex_buffer0_saved);
1603 }
1604