1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 *
24 */
25
26 #include "util/format/u_format.h"
27 #include "util/u_draw.h"
28 #include "util/u_inlines.h"
29 #include "util/u_prim.h"
30 #include "translate/translate.h"
31
32 #include "nouveau_fence.h"
33 #include "nv_object.xml.h"
34 #include "nv30/nv30-40_3d.xml.h"
35 #include "nv30/nv30_context.h"
36 #include "nv30/nv30_format.h"
37
38 static void
nv30_emit_vtxattr(struct nv30_context * nv30,struct pipe_vertex_buffer * vb,struct pipe_vertex_element * ve,unsigned attr)39 nv30_emit_vtxattr(struct nv30_context *nv30, struct pipe_vertex_buffer *vb,
40 struct pipe_vertex_element *ve, unsigned attr)
41 {
42 const unsigned nc = util_format_get_nr_components(ve->src_format);
43 struct nouveau_pushbuf *push = nv30->base.pushbuf;
44 struct nv04_resource *res = nv04_resource(vb->buffer.resource);
45 const void *data;
46 float v[4];
47
48 data = nouveau_resource_map_offset(&nv30->base, res, vb->buffer_offset +
49 ve->src_offset, NOUVEAU_BO_RD);
50
51 util_format_unpack_rgba(ve->src_format, v, data, 1);
52
53 switch (nc) {
54 case 4:
55 BEGIN_NV04(push, NV30_3D(VTX_ATTR_4F(attr)), 4);
56 PUSH_DATAf(push, v[0]);
57 PUSH_DATAf(push, v[1]);
58 PUSH_DATAf(push, v[2]);
59 PUSH_DATAf(push, v[3]);
60 break;
61 case 3:
62 BEGIN_NV04(push, NV30_3D(VTX_ATTR_3F(attr)), 3);
63 PUSH_DATAf(push, v[0]);
64 PUSH_DATAf(push, v[1]);
65 PUSH_DATAf(push, v[2]);
66 break;
67 case 2:
68 BEGIN_NV04(push, NV30_3D(VTX_ATTR_2F(attr)), 2);
69 PUSH_DATAf(push, v[0]);
70 PUSH_DATAf(push, v[1]);
71 break;
72 case 1:
73 BEGIN_NV04(push, NV30_3D(VTX_ATTR_1F(attr)), 1);
74 PUSH_DATAf(push, v[0]);
75 break;
76 default:
77 assert(0);
78 break;
79 }
80 }
81
82 static inline void
nv30_vbuf_range(struct nv30_context * nv30,int vbi,uint32_t * base,uint32_t * size)83 nv30_vbuf_range(struct nv30_context *nv30, int vbi,
84 uint32_t *base, uint32_t *size)
85 {
86 assert(nv30->vbo_max_index != ~0);
87 *base = nv30->vbo_min_index * nv30->vtxbuf[vbi].stride;
88 *size = (nv30->vbo_max_index -
89 nv30->vbo_min_index + 1) * nv30->vtxbuf[vbi].stride;
90 }
91
92 static void
nv30_prevalidate_vbufs(struct nv30_context * nv30)93 nv30_prevalidate_vbufs(struct nv30_context *nv30)
94 {
95 struct pipe_vertex_buffer *vb;
96 struct nv04_resource *buf;
97 int i;
98 uint32_t base, size;
99
100 nv30->vbo_fifo = nv30->vbo_user = 0;
101
102 for (i = 0; i < nv30->num_vtxbufs; i++) {
103 vb = &nv30->vtxbuf[i];
104 if (!vb->stride || !vb->buffer.resource) /* NOTE: user_buffer not implemented */
105 continue;
106 buf = nv04_resource(vb->buffer.resource);
107
108 /* NOTE: user buffers with temporary storage count as mapped by GPU */
109 if (!nouveau_resource_mapped_by_gpu(vb->buffer.resource)) {
110 if (nv30->vbo_push_hint) {
111 nv30->vbo_fifo = ~0;
112 continue;
113 } else {
114 if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
115 nv30->vbo_user |= 1 << i;
116 assert(vb->stride > vb->buffer_offset);
117 nv30_vbuf_range(nv30, i, &base, &size);
118 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
119 } else {
120 nouveau_buffer_migrate(&nv30->base, buf, NOUVEAU_BO_GART);
121 }
122 nv30->base.vbo_dirty = true;
123 }
124 }
125 }
126 }
127
128 static void
nv30_update_user_vbufs(struct nv30_context * nv30)129 nv30_update_user_vbufs(struct nv30_context *nv30)
130 {
131 struct nouveau_pushbuf *push = nv30->base.pushbuf;
132 uint32_t base, offset, size;
133 int i;
134 uint32_t written = 0;
135
136 for (i = 0; i < nv30->vertex->num_elements; i++) {
137 struct pipe_vertex_element *ve = &nv30->vertex->pipe[i];
138 const int b = ve->vertex_buffer_index;
139 struct pipe_vertex_buffer *vb = &nv30->vtxbuf[b];
140 struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
141
142 if (!(nv30->vbo_user & (1 << b)))
143 continue;
144
145 if (!vb->stride) {
146 nv30_emit_vtxattr(nv30, vb, ve, i);
147 continue;
148 }
149 nv30_vbuf_range(nv30, b, &base, &size);
150
151 if (!(written & (1 << b))) {
152 written |= 1 << b;
153 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
154 }
155
156 offset = vb->buffer_offset + ve->src_offset;
157
158 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
159 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP, buf, offset,
160 NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
161 0, NV30_3D_VTXBUF_DMA1);
162 }
163 nv30->base.vbo_dirty = true;
164 }
165
166 static inline void
nv30_release_user_vbufs(struct nv30_context * nv30)167 nv30_release_user_vbufs(struct nv30_context *nv30)
168 {
169 uint32_t vbo_user = nv30->vbo_user;
170
171 while (vbo_user) {
172 int i = ffs(vbo_user) - 1;
173 vbo_user &= ~(1 << i);
174
175 nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer.resource));
176 }
177
178 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
179 }
180
181 void
nv30_vbo_validate(struct nv30_context * nv30)182 nv30_vbo_validate(struct nv30_context *nv30)
183 {
184 struct nouveau_pushbuf *push = nv30->base.pushbuf;
185 struct nv30_vertex_stateobj *vertex = nv30->vertex;
186 struct pipe_vertex_element *ve;
187 struct pipe_vertex_buffer *vb;
188 unsigned i, redefine;
189
190 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
191 if (!nv30->vertex || nv30->draw_flags)
192 return;
193
194 #if UTIL_ARCH_BIG_ENDIAN
195 if (1) { /* Figure out where the buffers are getting messed up */
196 #else
197 if (unlikely(vertex->need_conversion)) {
198 #endif
199 nv30->vbo_fifo = ~0;
200 nv30->vbo_user = 0;
201 } else {
202 nv30_prevalidate_vbufs(nv30);
203 }
204
205 if (!PUSH_SPACE(push, 128))
206 return;
207
208 redefine = MAX2(vertex->num_elements, nv30->state.num_vtxelts);
209 if (redefine == 0)
210 return;
211
212 BEGIN_NV04(push, NV30_3D(VTXFMT(0)), redefine);
213
214 for (i = 0; i < vertex->num_elements; i++) {
215 ve = &vertex->pipe[i];
216 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
217
218 if (likely(vb->stride) || nv30->vbo_fifo)
219 PUSH_DATA (push, (vb->stride << 8) | vertex->element[i].state);
220 else
221 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
222 }
223
224 for (; i < nv30->state.num_vtxelts; i++) {
225 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
226 }
227
228 for (i = 0; i < vertex->num_elements; i++) {
229 struct nv04_resource *res;
230 unsigned offset;
231 bool user;
232
233 ve = &vertex->pipe[i];
234 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
235 user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));
236
237 res = nv04_resource(vb->buffer.resource);
238
239 if (nv30->vbo_fifo || unlikely(vb->stride == 0)) {
240 if (!nv30->vbo_fifo)
241 nv30_emit_vtxattr(nv30, vb, ve, i);
242 continue;
243 }
244
245 offset = ve->src_offset + vb->buffer_offset;
246
247 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
248 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), user ? BUFCTX_VTXTMP : BUFCTX_VTXBUF,
249 res, offset, NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
250 0, NV30_3D_VTXBUF_DMA1);
251 }
252
253 nv30->state.num_vtxelts = vertex->num_elements;
254 }
255
256 static void *
257 nv30_vertex_state_create(struct pipe_context *pipe, unsigned num_elements,
258 const struct pipe_vertex_element *elements)
259 {
260 struct nv30_vertex_stateobj *so;
261 struct translate_key transkey;
262 unsigned i;
263
264 so = MALLOC(sizeof(*so) + sizeof(*so->element) * num_elements);
265 if (!so)
266 return NULL;
267 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
268 so->num_elements = num_elements;
269 so->need_conversion = false;
270
271 transkey.nr_elements = 0;
272 transkey.output_stride = 0;
273
274 for (i = 0; i < num_elements; i++) {
275 const struct pipe_vertex_element *ve = &elements[i];
276 const unsigned vbi = ve->vertex_buffer_index;
277 enum pipe_format fmt = ve->src_format;
278
279 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
280 if (!so->element[i].state) {
281 switch (util_format_get_nr_components(fmt)) {
282 case 1: fmt = PIPE_FORMAT_R32_FLOAT; break;
283 case 2: fmt = PIPE_FORMAT_R32G32_FLOAT; break;
284 case 3: fmt = PIPE_FORMAT_R32G32B32_FLOAT; break;
285 case 4: fmt = PIPE_FORMAT_R32G32B32A32_FLOAT; break;
286 default:
287 assert(0);
288 FREE(so);
289 return NULL;
290 }
291 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
292 so->need_conversion = true;
293 }
294
295 if (1) {
296 unsigned j = transkey.nr_elements++;
297
298 transkey.element[j].type = TRANSLATE_ELEMENT_NORMAL;
299 transkey.element[j].input_format = ve->src_format;
300 transkey.element[j].input_buffer = vbi;
301 transkey.element[j].input_offset = ve->src_offset;
302 transkey.element[j].instance_divisor = ve->instance_divisor;
303
304 transkey.element[j].output_format = fmt;
305 transkey.element[j].output_offset = transkey.output_stride;
306 transkey.output_stride += (util_format_get_stride(fmt, 1) + 3) & ~3;
307 }
308 }
309
310 so->translate = translate_create(&transkey);
311 so->vtx_size = transkey.output_stride / 4;
312 so->vtx_per_packet_max = NV04_PFIFO_MAX_PACKET_LEN / MAX2(so->vtx_size, 1);
313 return so;
314 }
315
316 static void
317 nv30_vertex_state_delete(struct pipe_context *pipe, void *hwcso)
318 {
319 struct nv30_vertex_stateobj *so = hwcso;
320
321 if (so->translate)
322 so->translate->release(so->translate);
323 FREE(hwcso);
324 }
325
326 static void
327 nv30_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
328 {
329 struct nv30_context *nv30 = nv30_context(pipe);
330
331 nv30->vertex = hwcso;
332 nv30->dirty |= NV30_NEW_VERTEX;
333 }
334
335 static void
336 nv30_draw_arrays(struct nv30_context *nv30,
337 unsigned mode, unsigned start, unsigned count,
338 unsigned instance_count)
339 {
340 struct nouveau_pushbuf *push = nv30->base.pushbuf;
341 unsigned prim;
342
343 prim = nv30_prim_gl(mode);
344
345 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
346 PUSH_DATA (push, prim);
347 while (count) {
348 const unsigned mpush = 2047 * 256;
349 unsigned npush = (count > mpush) ? mpush : count;
350 unsigned wpush = ((npush + 255) & ~255) >> 8;
351
352 count -= npush;
353
354 BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), wpush);
355 while (npush >= 256) {
356 PUSH_DATA (push, 0xff000000 | start);
357 start += 256;
358 npush -= 256;
359 }
360
361 if (npush)
362 PUSH_DATA (push, ((npush - 1) << 24) | start);
363 }
364 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
365 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
366 }
367
368 static void
369 nv30_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
370 unsigned start, unsigned count)
371 {
372 map += start;
373
374 if (count & 1) {
375 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
376 PUSH_DATA (push, *map++);
377 }
378
379 count >>= 1;
380 while (count) {
381 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
382 count -= npush;
383
384 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
385 while (npush--) {
386 PUSH_DATA (push, (map[1] << 16) | map[0]);
387 map += 2;
388 }
389 }
390
391 }
392
393 static void
394 nv30_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
395 unsigned start, unsigned count)
396 {
397 map += start;
398
399 if (count & 1) {
400 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
401 PUSH_DATA (push, *map++);
402 }
403
404 count >>= 1;
405 while (count) {
406 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
407 count -= npush;
408
409 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
410 while (npush--) {
411 PUSH_DATA (push, (map[1] << 16) | map[0]);
412 map += 2;
413 }
414 }
415 }
416
417 static void
418 nv30_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
419 unsigned start, unsigned count)
420 {
421 map += start;
422
423 while (count) {
424 const unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
425
426 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U32), nr);
427 PUSH_DATAp(push, map, nr);
428
429 map += nr;
430 count -= nr;
431 }
432 }
433
434 static void
435 nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
436 const uint32_t *map,
437 unsigned start, unsigned count)
438 {
439 map += start;
440
441 if (count & 1) {
442 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
443 PUSH_DATA (push, *map++);
444 }
445
446 count >>= 1;
447 while (count) {
448 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
449 count -= npush;
450
451 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
452 while (npush--) {
453 PUSH_DATA (push, (map[1] << 16) | map[0]);
454 map += 2;
455 }
456 }
457 }
458
459 static void
460 nv30_draw_elements(struct nv30_context *nv30, bool shorten,
461 const struct pipe_draw_info *info,
462 unsigned mode, unsigned start, unsigned count,
463 unsigned instance_count, int32_t index_bias,
464 unsigned index_size)
465 {
466 struct nouveau_pushbuf *push = nv30->base.pushbuf;
467 struct nouveau_object *eng3d = nv30->screen->eng3d;
468 unsigned prim = nv30_prim_gl(mode);
469
470 if (eng3d->oclass >= NV40_3D_CLASS && index_bias != nv30->state.index_bias) {
471 BEGIN_NV04(push, NV40_3D(VB_ELEMENT_BASE), 1);
472 PUSH_DATA (push, index_bias);
473 nv30->state.index_bias = index_bias;
474 }
475
476 if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
477 !info->has_user_indices) {
478 struct nv04_resource *res = nv04_resource(info->index.resource);
479 unsigned offset = 0;
480
481 assert(nouveau_resource_mapped_by_gpu(&res->base));
482
483 BEGIN_NV04(push, NV30_3D(IDXBUF_OFFSET), 2);
484 PUSH_RESRC(push, NV30_3D(IDXBUF_OFFSET), BUFCTX_IDXBUF, res, offset,
485 NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, 0);
486 PUSH_MTHD (push, NV30_3D(IDXBUF_FORMAT), BUFCTX_IDXBUF, res->bo,
487 (index_size == 2) ? 0x00000010 : 0x00000000,
488 res->domain | NOUVEAU_BO_RD,
489 0, NV30_3D_IDXBUF_FORMAT_DMA1);
490 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
491 PUSH_DATA (push, prim);
492 while (count) {
493 const unsigned mpush = 2047 * 256;
494 unsigned npush = (count > mpush) ? mpush : count;
495 unsigned wpush = ((npush + 255) & ~255) >> 8;
496
497 count -= npush;
498
499 BEGIN_NI04(push, NV30_3D(VB_INDEX_BATCH), wpush);
500 while (npush >= 256) {
501 PUSH_DATA (push, 0xff000000 | start);
502 start += 256;
503 npush -= 256;
504 }
505
506 if (npush)
507 PUSH_DATA (push, ((npush - 1) << 24) | start);
508 }
509 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
510 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
511 PUSH_RESET(push, BUFCTX_IDXBUF);
512 } else {
513 const void *data;
514 if (!info->has_user_indices)
515 data = nouveau_resource_map_offset(&nv30->base,
516 nv04_resource(info->index.resource),
517 0, NOUVEAU_BO_RD);
518 else
519 data = info->index.user;
520 if (!data)
521 return;
522
523 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
524 PUSH_DATA (push, prim);
525 switch (index_size) {
526 case 1:
527 nv30_draw_elements_inline_u08(push, data, start, count);
528 break;
529 case 2:
530 nv30_draw_elements_inline_u16(push, data, start, count);
531 break;
532 case 4:
533 if (shorten)
534 nv30_draw_elements_inline_u32_short(push, data, start, count);
535 else
536 nv30_draw_elements_inline_u32(push, data, start, count);
537 break;
538 default:
539 assert(0);
540 return;
541 }
542 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
543 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
544 }
545 }
546
547 static void
548 nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info,
549 unsigned drawid_offset,
550 const struct pipe_draw_indirect_info *indirect,
551 const struct pipe_draw_start_count_bias *draws,
552 unsigned num_draws)
553 {
554 if (num_draws > 1) {
555 util_draw_multi(pipe, info, drawid_offset, indirect, draws, num_draws);
556 return;
557 }
558
559 if (!indirect && (!draws[0].count || !info->instance_count))
560 return;
561
562 struct nv30_context *nv30 = nv30_context(pipe);
563 struct nouveau_pushbuf *push = nv30->base.pushbuf;
564 int i;
565
566 if (!info->primitive_restart &&
567 !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
568 return;
569
570 /* For picking only a few vertices from a large user buffer, push is better,
571 * if index count is larger and we expect repeated vertices, suggest upload.
572 */
573 nv30->vbo_push_hint = /* the 64 is heuristic */
574 !(info->index_size &&
575 info->index_bounds_valid &&
576 ((info->max_index - info->min_index + 64) < draws[0].count));
577
578 if (info->index_bounds_valid) {
579 nv30->vbo_min_index = info->min_index;
580 nv30->vbo_max_index = info->max_index;
581 } else {
582 nv30->vbo_min_index = 0;
583 nv30->vbo_max_index = ~0;
584 }
585
586 if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
587 nv30->dirty |= NV30_NEW_ARRAYS;
588
589 push->user_priv = &nv30->bufctx;
590 if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
591 nv30_update_user_vbufs(nv30);
592
593 nv30_state_validate(nv30, ~0, true);
594 if (nv30->draw_flags) {
595 nv30_render_vbo(pipe, info, drawid_offset, &draws[0]);
596 return;
597 } else
598 if (nv30->vbo_fifo) {
599 nv30_push_vbo(nv30, info, &draws[0]);
600 return;
601 }
602
603 for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
604 if (!nv30->vtxbuf[i].buffer.resource)
605 continue;
606 if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
607 nv30->base.vbo_dirty = true;
608 }
609
610 if (!nv30->base.vbo_dirty && info->index_size && !info->has_user_indices &&
611 info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
612 nv30->base.vbo_dirty = true;
613
614 if (nv30->base.vbo_dirty) {
615 BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
616 PUSH_DATA (push, 0);
617 nv30->base.vbo_dirty = false;
618 }
619
620 if (!info->index_size) {
621 nv30_draw_arrays(nv30,
622 info->mode, draws[0].start, draws[0].count,
623 info->instance_count);
624 } else {
625 bool shorten = info->index_bounds_valid && info->max_index <= 65535;
626
627 if (info->primitive_restart != nv30->state.prim_restart) {
628 if (info->primitive_restart) {
629 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 2);
630 PUSH_DATA (push, 1);
631 PUSH_DATA (push, info->restart_index);
632
633 if (info->restart_index > 65535)
634 shorten = false;
635 } else {
636 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 1);
637 PUSH_DATA (push, 0);
638 }
639 nv30->state.prim_restart = info->primitive_restart;
640 } else
641 if (info->primitive_restart) {
642 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_INDEX), 1);
643 PUSH_DATA (push, info->restart_index);
644
645 if (info->restart_index > 65535)
646 shorten = false;
647 }
648
649 nv30_draw_elements(nv30, shorten, info,
650 info->mode, draws[0].start, draws[0].count,
651 info->instance_count, draws[0].index_bias, info->index_size);
652 }
653
654 nv30_state_release(nv30);
655 nv30_release_user_vbufs(nv30);
656 }
657
658 void
659 nv30_vbo_init(struct pipe_context *pipe)
660 {
661 pipe->create_vertex_elements_state = nv30_vertex_state_create;
662 pipe->delete_vertex_elements_state = nv30_vertex_state_delete;
663 pipe->bind_vertex_elements_state = nv30_vertex_state_bind;
664 pipe->draw_vbo = nv30_draw_vbo;
665 }
666