1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 *
24 */
25
26 #include "util/format/u_format.h"
27 #include "util/u_inlines.h"
28 #include "util/u_prim.h"
29 #include "translate/translate.h"
30
31 #include "nouveau_fence.h"
32 #include "nv_object.xml.h"
33 #include "nv30/nv30-40_3d.xml.h"
34 #include "nv30/nv30_context.h"
35 #include "nv30/nv30_format.h"
36
37 static void
nv30_emit_vtxattr(struct nv30_context * nv30,struct pipe_vertex_buffer * vb,struct pipe_vertex_element * ve,unsigned attr)38 nv30_emit_vtxattr(struct nv30_context *nv30, struct pipe_vertex_buffer *vb,
39 struct pipe_vertex_element *ve, unsigned attr)
40 {
41 const unsigned nc = util_format_get_nr_components(ve->src_format);
42 struct nouveau_pushbuf *push = nv30->base.pushbuf;
43 struct nv04_resource *res = nv04_resource(vb->buffer.resource);
44 const void *data;
45 float v[4];
46
47 data = nouveau_resource_map_offset(&nv30->base, res, vb->buffer_offset +
48 ve->src_offset, NOUVEAU_BO_RD);
49
50 util_format_unpack_rgba(ve->src_format, v, data, 1);
51
52 switch (nc) {
53 case 4:
54 BEGIN_NV04(push, NV30_3D(VTX_ATTR_4F(attr)), 4);
55 PUSH_DATAf(push, v[0]);
56 PUSH_DATAf(push, v[1]);
57 PUSH_DATAf(push, v[2]);
58 PUSH_DATAf(push, v[3]);
59 break;
60 case 3:
61 BEGIN_NV04(push, NV30_3D(VTX_ATTR_3F(attr)), 3);
62 PUSH_DATAf(push, v[0]);
63 PUSH_DATAf(push, v[1]);
64 PUSH_DATAf(push, v[2]);
65 break;
66 case 2:
67 BEGIN_NV04(push, NV30_3D(VTX_ATTR_2F(attr)), 2);
68 PUSH_DATAf(push, v[0]);
69 PUSH_DATAf(push, v[1]);
70 break;
71 case 1:
72 BEGIN_NV04(push, NV30_3D(VTX_ATTR_1F(attr)), 1);
73 PUSH_DATAf(push, v[0]);
74 break;
75 default:
76 assert(0);
77 break;
78 }
79 }
80
81 static inline void
nv30_vbuf_range(struct nv30_context * nv30,int vbi,uint32_t * base,uint32_t * size)82 nv30_vbuf_range(struct nv30_context *nv30, int vbi,
83 uint32_t *base, uint32_t *size)
84 {
85 assert(nv30->vbo_max_index != ~0);
86 *base = nv30->vbo_min_index * nv30->vtxbuf[vbi].stride;
87 *size = (nv30->vbo_max_index -
88 nv30->vbo_min_index + 1) * nv30->vtxbuf[vbi].stride;
89 }
90
91 static void
nv30_prevalidate_vbufs(struct nv30_context * nv30)92 nv30_prevalidate_vbufs(struct nv30_context *nv30)
93 {
94 struct pipe_vertex_buffer *vb;
95 struct nv04_resource *buf;
96 int i;
97 uint32_t base, size;
98
99 nv30->vbo_fifo = nv30->vbo_user = 0;
100
101 for (i = 0; i < nv30->num_vtxbufs; i++) {
102 vb = &nv30->vtxbuf[i];
103 if (!vb->stride || !vb->buffer.resource) /* NOTE: user_buffer not implemented */
104 continue;
105 buf = nv04_resource(vb->buffer.resource);
106
107 /* NOTE: user buffers with temporary storage count as mapped by GPU */
108 if (!nouveau_resource_mapped_by_gpu(vb->buffer.resource)) {
109 if (nv30->vbo_push_hint) {
110 nv30->vbo_fifo = ~0;
111 continue;
112 } else {
113 if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
114 nv30->vbo_user |= 1 << i;
115 assert(vb->stride > vb->buffer_offset);
116 nv30_vbuf_range(nv30, i, &base, &size);
117 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
118 } else {
119 nouveau_buffer_migrate(&nv30->base, buf, NOUVEAU_BO_GART);
120 }
121 nv30->base.vbo_dirty = true;
122 }
123 }
124 }
125 }
126
127 static void
nv30_update_user_vbufs(struct nv30_context * nv30)128 nv30_update_user_vbufs(struct nv30_context *nv30)
129 {
130 struct nouveau_pushbuf *push = nv30->base.pushbuf;
131 uint32_t base, offset, size;
132 int i;
133 uint32_t written = 0;
134
135 for (i = 0; i < nv30->vertex->num_elements; i++) {
136 struct pipe_vertex_element *ve = &nv30->vertex->pipe[i];
137 const int b = ve->vertex_buffer_index;
138 struct pipe_vertex_buffer *vb = &nv30->vtxbuf[b];
139 struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
140
141 if (!(nv30->vbo_user & (1 << b)))
142 continue;
143
144 if (!vb->stride) {
145 nv30_emit_vtxattr(nv30, vb, ve, i);
146 continue;
147 }
148 nv30_vbuf_range(nv30, b, &base, &size);
149
150 if (!(written & (1 << b))) {
151 written |= 1 << b;
152 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
153 }
154
155 offset = vb->buffer_offset + ve->src_offset;
156
157 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
158 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP, buf, offset,
159 NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
160 0, NV30_3D_VTXBUF_DMA1);
161 }
162 nv30->base.vbo_dirty = true;
163 }
164
165 static inline void
nv30_release_user_vbufs(struct nv30_context * nv30)166 nv30_release_user_vbufs(struct nv30_context *nv30)
167 {
168 uint32_t vbo_user = nv30->vbo_user;
169
170 while (vbo_user) {
171 int i = ffs(vbo_user) - 1;
172 vbo_user &= ~(1 << i);
173
174 nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer.resource));
175 }
176
177 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
178 }
179
180 void
nv30_vbo_validate(struct nv30_context * nv30)181 nv30_vbo_validate(struct nv30_context *nv30)
182 {
183 struct nouveau_pushbuf *push = nv30->base.pushbuf;
184 struct nv30_vertex_stateobj *vertex = nv30->vertex;
185 struct pipe_vertex_element *ve;
186 struct pipe_vertex_buffer *vb;
187 unsigned i, redefine;
188
189 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
190 if (!nv30->vertex || nv30->draw_flags)
191 return;
192
193 #if UTIL_ARCH_BIG_ENDIAN
194 if (1) { /* Figure out where the buffers are getting messed up */
195 #else
196 if (unlikely(vertex->need_conversion)) {
197 #endif
198 nv30->vbo_fifo = ~0;
199 nv30->vbo_user = 0;
200 } else {
201 nv30_prevalidate_vbufs(nv30);
202 }
203
204 if (!PUSH_SPACE(push, 128))
205 return;
206
207 redefine = MAX2(vertex->num_elements, nv30->state.num_vtxelts);
208 if (redefine == 0)
209 return;
210
211 BEGIN_NV04(push, NV30_3D(VTXFMT(0)), redefine);
212
213 for (i = 0; i < vertex->num_elements; i++) {
214 ve = &vertex->pipe[i];
215 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
216
217 if (likely(vb->stride) || nv30->vbo_fifo)
218 PUSH_DATA (push, (vb->stride << 8) | vertex->element[i].state);
219 else
220 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
221 }
222
223 for (; i < nv30->state.num_vtxelts; i++) {
224 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
225 }
226
227 for (i = 0; i < vertex->num_elements; i++) {
228 struct nv04_resource *res;
229 unsigned offset;
230 bool user;
231
232 ve = &vertex->pipe[i];
233 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
234 user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));
235
236 res = nv04_resource(vb->buffer.resource);
237
238 if (nv30->vbo_fifo || unlikely(vb->stride == 0)) {
239 if (!nv30->vbo_fifo)
240 nv30_emit_vtxattr(nv30, vb, ve, i);
241 continue;
242 }
243
244 offset = ve->src_offset + vb->buffer_offset;
245
246 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
247 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), user ? BUFCTX_VTXTMP : BUFCTX_VTXBUF,
248 res, offset, NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
249 0, NV30_3D_VTXBUF_DMA1);
250 }
251
252 nv30->state.num_vtxelts = vertex->num_elements;
253 }
254
255 static void *
256 nv30_vertex_state_create(struct pipe_context *pipe, unsigned num_elements,
257 const struct pipe_vertex_element *elements)
258 {
259 struct nv30_vertex_stateobj *so;
260 struct translate_key transkey;
261 unsigned i;
262
263 so = MALLOC(sizeof(*so) + sizeof(*so->element) * num_elements);
264 if (!so)
265 return NULL;
266 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
267 so->num_elements = num_elements;
268 so->need_conversion = false;
269
270 transkey.nr_elements = 0;
271 transkey.output_stride = 0;
272
273 for (i = 0; i < num_elements; i++) {
274 const struct pipe_vertex_element *ve = &elements[i];
275 const unsigned vbi = ve->vertex_buffer_index;
276 enum pipe_format fmt = ve->src_format;
277
278 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
279 if (!so->element[i].state) {
280 switch (util_format_get_nr_components(fmt)) {
281 case 1: fmt = PIPE_FORMAT_R32_FLOAT; break;
282 case 2: fmt = PIPE_FORMAT_R32G32_FLOAT; break;
283 case 3: fmt = PIPE_FORMAT_R32G32B32_FLOAT; break;
284 case 4: fmt = PIPE_FORMAT_R32G32B32A32_FLOAT; break;
285 default:
286 assert(0);
287 FREE(so);
288 return NULL;
289 }
290 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
291 so->need_conversion = true;
292 }
293
294 if (1) {
295 unsigned j = transkey.nr_elements++;
296
297 transkey.element[j].type = TRANSLATE_ELEMENT_NORMAL;
298 transkey.element[j].input_format = ve->src_format;
299 transkey.element[j].input_buffer = vbi;
300 transkey.element[j].input_offset = ve->src_offset;
301 transkey.element[j].instance_divisor = ve->instance_divisor;
302
303 transkey.element[j].output_format = fmt;
304 transkey.element[j].output_offset = transkey.output_stride;
305 transkey.output_stride += (util_format_get_stride(fmt, 1) + 3) & ~3;
306 }
307 }
308
309 so->translate = translate_create(&transkey);
310 so->vtx_size = transkey.output_stride / 4;
311 so->vtx_per_packet_max = NV04_PFIFO_MAX_PACKET_LEN / MAX2(so->vtx_size, 1);
312 return so;
313 }
314
315 static void
316 nv30_vertex_state_delete(struct pipe_context *pipe, void *hwcso)
317 {
318 struct nv30_vertex_stateobj *so = hwcso;
319
320 if (so->translate)
321 so->translate->release(so->translate);
322 FREE(hwcso);
323 }
324
325 static void
326 nv30_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
327 {
328 struct nv30_context *nv30 = nv30_context(pipe);
329
330 nv30->vertex = hwcso;
331 nv30->dirty |= NV30_NEW_VERTEX;
332 }
333
334 static void
335 nv30_draw_arrays(struct nv30_context *nv30,
336 unsigned mode, unsigned start, unsigned count,
337 unsigned instance_count)
338 {
339 struct nouveau_pushbuf *push = nv30->base.pushbuf;
340 unsigned prim;
341
342 prim = nv30_prim_gl(mode);
343
344 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
345 PUSH_DATA (push, prim);
346 while (count) {
347 const unsigned mpush = 2047 * 256;
348 unsigned npush = (count > mpush) ? mpush : count;
349 unsigned wpush = ((npush + 255) & ~255) >> 8;
350
351 count -= npush;
352
353 BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), wpush);
354 while (npush >= 256) {
355 PUSH_DATA (push, 0xff000000 | start);
356 start += 256;
357 npush -= 256;
358 }
359
360 if (npush)
361 PUSH_DATA (push, ((npush - 1) << 24) | start);
362 }
363 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
364 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
365 }
366
367 static void
368 nv30_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
369 unsigned start, unsigned count)
370 {
371 map += start;
372
373 if (count & 1) {
374 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
375 PUSH_DATA (push, *map++);
376 }
377
378 count >>= 1;
379 while (count) {
380 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
381 count -= npush;
382
383 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
384 while (npush--) {
385 PUSH_DATA (push, (map[1] << 16) | map[0]);
386 map += 2;
387 }
388 }
389
390 }
391
392 static void
393 nv30_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
394 unsigned start, unsigned count)
395 {
396 map += start;
397
398 if (count & 1) {
399 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
400 PUSH_DATA (push, *map++);
401 }
402
403 count >>= 1;
404 while (count) {
405 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
406 count -= npush;
407
408 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
409 while (npush--) {
410 PUSH_DATA (push, (map[1] << 16) | map[0]);
411 map += 2;
412 }
413 }
414 }
415
416 static void
417 nv30_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
418 unsigned start, unsigned count)
419 {
420 map += start;
421
422 while (count) {
423 const unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
424
425 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U32), nr);
426 PUSH_DATAp(push, map, nr);
427
428 map += nr;
429 count -= nr;
430 }
431 }
432
433 static void
434 nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
435 const uint32_t *map,
436 unsigned start, unsigned count)
437 {
438 map += start;
439
440 if (count & 1) {
441 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
442 PUSH_DATA (push, *map++);
443 }
444
445 count >>= 1;
446 while (count) {
447 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
448 count -= npush;
449
450 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
451 while (npush--) {
452 PUSH_DATA (push, (map[1] << 16) | map[0]);
453 map += 2;
454 }
455 }
456 }
457
458 static void
459 nv30_draw_elements(struct nv30_context *nv30, bool shorten,
460 const struct pipe_draw_info *info,
461 unsigned mode, unsigned start, unsigned count,
462 unsigned instance_count, int32_t index_bias,
463 unsigned index_size)
464 {
465 struct nouveau_pushbuf *push = nv30->base.pushbuf;
466 struct nouveau_object *eng3d = nv30->screen->eng3d;
467 unsigned prim = nv30_prim_gl(mode);
468
469 if (eng3d->oclass >= NV40_3D_CLASS && index_bias != nv30->state.index_bias) {
470 BEGIN_NV04(push, NV40_3D(VB_ELEMENT_BASE), 1);
471 PUSH_DATA (push, index_bias);
472 nv30->state.index_bias = index_bias;
473 }
474
475 if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
476 !info->has_user_indices) {
477 struct nv04_resource *res = nv04_resource(info->index.resource);
478 unsigned offset = 0;
479
480 assert(nouveau_resource_mapped_by_gpu(&res->base));
481
482 BEGIN_NV04(push, NV30_3D(IDXBUF_OFFSET), 2);
483 PUSH_RESRC(push, NV30_3D(IDXBUF_OFFSET), BUFCTX_IDXBUF, res, offset,
484 NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, 0);
485 PUSH_MTHD (push, NV30_3D(IDXBUF_FORMAT), BUFCTX_IDXBUF, res->bo,
486 (index_size == 2) ? 0x00000010 : 0x00000000,
487 res->domain | NOUVEAU_BO_RD,
488 0, NV30_3D_IDXBUF_FORMAT_DMA1);
489 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
490 PUSH_DATA (push, prim);
491 while (count) {
492 const unsigned mpush = 2047 * 256;
493 unsigned npush = (count > mpush) ? mpush : count;
494 unsigned wpush = ((npush + 255) & ~255) >> 8;
495
496 count -= npush;
497
498 BEGIN_NI04(push, NV30_3D(VB_INDEX_BATCH), wpush);
499 while (npush >= 256) {
500 PUSH_DATA (push, 0xff000000 | start);
501 start += 256;
502 npush -= 256;
503 }
504
505 if (npush)
506 PUSH_DATA (push, ((npush - 1) << 24) | start);
507 }
508 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
509 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
510 PUSH_RESET(push, BUFCTX_IDXBUF);
511 } else {
512 const void *data;
513 if (!info->has_user_indices)
514 data = nouveau_resource_map_offset(&nv30->base,
515 nv04_resource(info->index.resource),
516 start * index_size, NOUVEAU_BO_RD);
517 else
518 data = info->index.user;
519 if (!data)
520 return;
521
522 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
523 PUSH_DATA (push, prim);
524 switch (index_size) {
525 case 1:
526 nv30_draw_elements_inline_u08(push, data, start, count);
527 break;
528 case 2:
529 nv30_draw_elements_inline_u16(push, data, start, count);
530 break;
531 case 4:
532 if (shorten)
533 nv30_draw_elements_inline_u32_short(push, data, start, count);
534 else
535 nv30_draw_elements_inline_u32(push, data, start, count);
536 break;
537 default:
538 assert(0);
539 return;
540 }
541 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
542 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
543 }
544 }
545
546 static void
547 nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
548 {
549 struct nv30_context *nv30 = nv30_context(pipe);
550 struct nouveau_pushbuf *push = nv30->base.pushbuf;
551 int i;
552
553 if (!info->primitive_restart &&
554 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
555 return;
556
557 /* For picking only a few vertices from a large user buffer, push is better,
558 * if index count is larger and we expect repeated vertices, suggest upload.
559 */
560 nv30->vbo_push_hint = /* the 64 is heuristic */
561 !(info->index_size &&
562 ((info->max_index - info->min_index + 64) < info->count));
563
564 nv30->vbo_min_index = info->min_index;
565 nv30->vbo_max_index = info->max_index;
566
567 if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
568 nv30->dirty |= NV30_NEW_ARRAYS;
569
570 push->user_priv = &nv30->bufctx;
571 if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
572 nv30_update_user_vbufs(nv30);
573
574 nv30_state_validate(nv30, ~0, true);
575 if (nv30->draw_flags) {
576 nv30_render_vbo(pipe, info);
577 return;
578 } else
579 if (nv30->vbo_fifo) {
580 nv30_push_vbo(nv30, info);
581 return;
582 }
583
584 for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
585 if (!nv30->vtxbuf[i].buffer.resource)
586 continue;
587 if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
588 nv30->base.vbo_dirty = true;
589 }
590
591 if (!nv30->base.vbo_dirty && info->index_size && !info->has_user_indices &&
592 info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
593 nv30->base.vbo_dirty = true;
594
595 if (nv30->base.vbo_dirty) {
596 BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
597 PUSH_DATA (push, 0);
598 nv30->base.vbo_dirty = false;
599 }
600
601 if (!info->index_size) {
602 nv30_draw_arrays(nv30,
603 info->mode, info->start, info->count,
604 info->instance_count);
605 } else {
606 bool shorten = info->max_index <= 65535;
607
608 if (info->primitive_restart != nv30->state.prim_restart) {
609 if (info->primitive_restart) {
610 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 2);
611 PUSH_DATA (push, 1);
612 PUSH_DATA (push, info->restart_index);
613
614 if (info->restart_index > 65535)
615 shorten = false;
616 } else {
617 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 1);
618 PUSH_DATA (push, 0);
619 }
620 nv30->state.prim_restart = info->primitive_restart;
621 } else
622 if (info->primitive_restart) {
623 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_INDEX), 1);
624 PUSH_DATA (push, info->restart_index);
625
626 if (info->restart_index > 65535)
627 shorten = false;
628 }
629
630 nv30_draw_elements(nv30, shorten, info,
631 info->mode, info->start, info->count,
632 info->instance_count, info->index_bias, info->index_size);
633 }
634
635 nv30_state_release(nv30);
636 nv30_release_user_vbufs(nv30);
637 }
638
639 void
640 nv30_vbo_init(struct pipe_context *pipe)
641 {
642 pipe->create_vertex_elements_state = nv30_vertex_state_create;
643 pipe->delete_vertex_elements_state = nv30_vertex_state_delete;
644 pipe->bind_vertex_elements_state = nv30_vertex_state_bind;
645 pipe->draw_vbo = nv30_draw_vbo;
646 }
647