1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 *
24 */
25
26 #include "util/u_format.h"
27 #include "util/u_inlines.h"
28 #include "util/u_prim.h"
29 #include "translate/translate.h"
30
31 #include "nouveau_fence.h"
32 #include "nv_object.xml.h"
33 #include "nv30/nv30-40_3d.xml.h"
34 #include "nv30/nv30_context.h"
35 #include "nv30/nv30_format.h"
36
37 static void
nv30_emit_vtxattr(struct nv30_context * nv30,struct pipe_vertex_buffer * vb,struct pipe_vertex_element * ve,unsigned attr)38 nv30_emit_vtxattr(struct nv30_context *nv30, struct pipe_vertex_buffer *vb,
39 struct pipe_vertex_element *ve, unsigned attr)
40 {
41 const unsigned nc = util_format_get_nr_components(ve->src_format);
42 struct nouveau_pushbuf *push = nv30->base.pushbuf;
43 struct nv04_resource *res = nv04_resource(vb->buffer.resource);
44 const struct util_format_description *desc =
45 util_format_description(ve->src_format);
46 const void *data;
47 float v[4];
48
49 data = nouveau_resource_map_offset(&nv30->base, res, vb->buffer_offset +
50 ve->src_offset, NOUVEAU_BO_RD);
51
52 desc->unpack_rgba_float(v, 0, data, 0, 1, 1);
53
54 switch (nc) {
55 case 4:
56 BEGIN_NV04(push, NV30_3D(VTX_ATTR_4F(attr)), 4);
57 PUSH_DATAf(push, v[0]);
58 PUSH_DATAf(push, v[1]);
59 PUSH_DATAf(push, v[2]);
60 PUSH_DATAf(push, v[3]);
61 break;
62 case 3:
63 BEGIN_NV04(push, NV30_3D(VTX_ATTR_3F(attr)), 3);
64 PUSH_DATAf(push, v[0]);
65 PUSH_DATAf(push, v[1]);
66 PUSH_DATAf(push, v[2]);
67 break;
68 case 2:
69 BEGIN_NV04(push, NV30_3D(VTX_ATTR_2F(attr)), 2);
70 PUSH_DATAf(push, v[0]);
71 PUSH_DATAf(push, v[1]);
72 break;
73 case 1:
74 BEGIN_NV04(push, NV30_3D(VTX_ATTR_1F(attr)), 1);
75 PUSH_DATAf(push, v[0]);
76 break;
77 default:
78 assert(0);
79 break;
80 }
81 }
82
83 static inline void
nv30_vbuf_range(struct nv30_context * nv30,int vbi,uint32_t * base,uint32_t * size)84 nv30_vbuf_range(struct nv30_context *nv30, int vbi,
85 uint32_t *base, uint32_t *size)
86 {
87 assert(nv30->vbo_max_index != ~0);
88 *base = nv30->vbo_min_index * nv30->vtxbuf[vbi].stride;
89 *size = (nv30->vbo_max_index -
90 nv30->vbo_min_index + 1) * nv30->vtxbuf[vbi].stride;
91 }
92
93 static void
nv30_prevalidate_vbufs(struct nv30_context * nv30)94 nv30_prevalidate_vbufs(struct nv30_context *nv30)
95 {
96 struct pipe_vertex_buffer *vb;
97 struct nv04_resource *buf;
98 int i;
99 uint32_t base, size;
100
101 nv30->vbo_fifo = nv30->vbo_user = 0;
102
103 for (i = 0; i < nv30->num_vtxbufs; i++) {
104 vb = &nv30->vtxbuf[i];
105 if (!vb->stride || !vb->buffer.resource) /* NOTE: user_buffer not implemented */
106 continue;
107 buf = nv04_resource(vb->buffer.resource);
108
109 /* NOTE: user buffers with temporary storage count as mapped by GPU */
110 if (!nouveau_resource_mapped_by_gpu(vb->buffer.resource)) {
111 if (nv30->vbo_push_hint) {
112 nv30->vbo_fifo = ~0;
113 continue;
114 } else {
115 if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
116 nv30->vbo_user |= 1 << i;
117 assert(vb->stride > vb->buffer_offset);
118 nv30_vbuf_range(nv30, i, &base, &size);
119 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
120 } else {
121 nouveau_buffer_migrate(&nv30->base, buf, NOUVEAU_BO_GART);
122 }
123 nv30->base.vbo_dirty = true;
124 }
125 }
126 }
127 }
128
129 static void
nv30_update_user_vbufs(struct nv30_context * nv30)130 nv30_update_user_vbufs(struct nv30_context *nv30)
131 {
132 struct nouveau_pushbuf *push = nv30->base.pushbuf;
133 uint32_t base, offset, size;
134 int i;
135 uint32_t written = 0;
136
137 for (i = 0; i < nv30->vertex->num_elements; i++) {
138 struct pipe_vertex_element *ve = &nv30->vertex->pipe[i];
139 const int b = ve->vertex_buffer_index;
140 struct pipe_vertex_buffer *vb = &nv30->vtxbuf[b];
141 struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
142
143 if (!(nv30->vbo_user & (1 << b)))
144 continue;
145
146 if (!vb->stride) {
147 nv30_emit_vtxattr(nv30, vb, ve, i);
148 continue;
149 }
150 nv30_vbuf_range(nv30, b, &base, &size);
151
152 if (!(written & (1 << b))) {
153 written |= 1 << b;
154 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
155 }
156
157 offset = vb->buffer_offset + ve->src_offset;
158
159 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
160 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP, buf, offset,
161 NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
162 0, NV30_3D_VTXBUF_DMA1);
163 }
164 nv30->base.vbo_dirty = true;
165 }
166
167 static inline void
nv30_release_user_vbufs(struct nv30_context * nv30)168 nv30_release_user_vbufs(struct nv30_context *nv30)
169 {
170 uint32_t vbo_user = nv30->vbo_user;
171
172 while (vbo_user) {
173 int i = ffs(vbo_user) - 1;
174 vbo_user &= ~(1 << i);
175
176 nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer.resource));
177 }
178
179 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
180 }
181
182 void
nv30_vbo_validate(struct nv30_context * nv30)183 nv30_vbo_validate(struct nv30_context *nv30)
184 {
185 struct nouveau_pushbuf *push = nv30->base.pushbuf;
186 struct nv30_vertex_stateobj *vertex = nv30->vertex;
187 struct pipe_vertex_element *ve;
188 struct pipe_vertex_buffer *vb;
189 unsigned i, redefine;
190
191 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
192 if (!nv30->vertex || nv30->draw_flags)
193 return;
194
195 #ifdef PIPE_ARCH_BIG_ENDIAN
196 if (1) { /* Figure out where the buffers are getting messed up */
197 #else
198 if (unlikely(vertex->need_conversion)) {
199 #endif
200 nv30->vbo_fifo = ~0;
201 nv30->vbo_user = 0;
202 } else {
203 nv30_prevalidate_vbufs(nv30);
204 }
205
206 if (!PUSH_SPACE(push, 128))
207 return;
208
209 redefine = MAX2(vertex->num_elements, nv30->state.num_vtxelts);
210 if (redefine == 0)
211 return;
212
213 BEGIN_NV04(push, NV30_3D(VTXFMT(0)), redefine);
214
215 for (i = 0; i < vertex->num_elements; i++) {
216 ve = &vertex->pipe[i];
217 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
218
219 if (likely(vb->stride) || nv30->vbo_fifo)
220 PUSH_DATA (push, (vb->stride << 8) | vertex->element[i].state);
221 else
222 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
223 }
224
225 for (; i < nv30->state.num_vtxelts; i++) {
226 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
227 }
228
229 for (i = 0; i < vertex->num_elements; i++) {
230 struct nv04_resource *res;
231 unsigned offset;
232 bool user;
233
234 ve = &vertex->pipe[i];
235 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
236 user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));
237
238 res = nv04_resource(vb->buffer.resource);
239
240 if (nv30->vbo_fifo || unlikely(vb->stride == 0)) {
241 if (!nv30->vbo_fifo)
242 nv30_emit_vtxattr(nv30, vb, ve, i);
243 continue;
244 }
245
246 offset = ve->src_offset + vb->buffer_offset;
247
248 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
249 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), user ? BUFCTX_VTXTMP : BUFCTX_VTXBUF,
250 res, offset, NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
251 0, NV30_3D_VTXBUF_DMA1);
252 }
253
254 nv30->state.num_vtxelts = vertex->num_elements;
255 }
256
257 static void *
258 nv30_vertex_state_create(struct pipe_context *pipe, unsigned num_elements,
259 const struct pipe_vertex_element *elements)
260 {
261 struct nv30_vertex_stateobj *so;
262 struct translate_key transkey;
263 unsigned i;
264
265 so = MALLOC(sizeof(*so) + sizeof(*so->element) * num_elements);
266 if (!so)
267 return NULL;
268 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
269 so->num_elements = num_elements;
270 so->need_conversion = false;
271
272 transkey.nr_elements = 0;
273 transkey.output_stride = 0;
274
275 for (i = 0; i < num_elements; i++) {
276 const struct pipe_vertex_element *ve = &elements[i];
277 const unsigned vbi = ve->vertex_buffer_index;
278 enum pipe_format fmt = ve->src_format;
279
280 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
281 if (!so->element[i].state) {
282 switch (util_format_get_nr_components(fmt)) {
283 case 1: fmt = PIPE_FORMAT_R32_FLOAT; break;
284 case 2: fmt = PIPE_FORMAT_R32G32_FLOAT; break;
285 case 3: fmt = PIPE_FORMAT_R32G32B32_FLOAT; break;
286 case 4: fmt = PIPE_FORMAT_R32G32B32A32_FLOAT; break;
287 default:
288 assert(0);
289 FREE(so);
290 return NULL;
291 }
292 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
293 so->need_conversion = true;
294 }
295
296 if (1) {
297 unsigned j = transkey.nr_elements++;
298
299 transkey.element[j].type = TRANSLATE_ELEMENT_NORMAL;
300 transkey.element[j].input_format = ve->src_format;
301 transkey.element[j].input_buffer = vbi;
302 transkey.element[j].input_offset = ve->src_offset;
303 transkey.element[j].instance_divisor = ve->instance_divisor;
304
305 transkey.element[j].output_format = fmt;
306 transkey.element[j].output_offset = transkey.output_stride;
307 transkey.output_stride += (util_format_get_stride(fmt, 1) + 3) & ~3;
308 }
309 }
310
311 so->translate = translate_create(&transkey);
312 so->vtx_size = transkey.output_stride / 4;
313 so->vtx_per_packet_max = NV04_PFIFO_MAX_PACKET_LEN / MAX2(so->vtx_size, 1);
314 return so;
315 }
316
317 static void
318 nv30_vertex_state_delete(struct pipe_context *pipe, void *hwcso)
319 {
320 struct nv30_vertex_stateobj *so = hwcso;
321
322 if (so->translate)
323 so->translate->release(so->translate);
324 FREE(hwcso);
325 }
326
327 static void
328 nv30_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
329 {
330 struct nv30_context *nv30 = nv30_context(pipe);
331
332 nv30->vertex = hwcso;
333 nv30->dirty |= NV30_NEW_VERTEX;
334 }
335
336 static void
337 nv30_draw_arrays(struct nv30_context *nv30,
338 unsigned mode, unsigned start, unsigned count,
339 unsigned instance_count)
340 {
341 struct nouveau_pushbuf *push = nv30->base.pushbuf;
342 unsigned prim;
343
344 prim = nv30_prim_gl(mode);
345
346 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
347 PUSH_DATA (push, prim);
348 while (count) {
349 const unsigned mpush = 2047 * 256;
350 unsigned npush = (count > mpush) ? mpush : count;
351 unsigned wpush = ((npush + 255) & ~255) >> 8;
352
353 count -= npush;
354
355 BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), wpush);
356 while (npush >= 256) {
357 PUSH_DATA (push, 0xff000000 | start);
358 start += 256;
359 npush -= 256;
360 }
361
362 if (npush)
363 PUSH_DATA (push, ((npush - 1) << 24) | start);
364 }
365 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
366 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
367 }
368
369 static void
370 nv30_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
371 unsigned start, unsigned count)
372 {
373 map += start;
374
375 if (count & 1) {
376 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
377 PUSH_DATA (push, *map++);
378 }
379
380 count >>= 1;
381 while (count) {
382 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
383 count -= npush;
384
385 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
386 while (npush--) {
387 PUSH_DATA (push, (map[1] << 16) | map[0]);
388 map += 2;
389 }
390 }
391
392 }
393
394 static void
395 nv30_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
396 unsigned start, unsigned count)
397 {
398 map += start;
399
400 if (count & 1) {
401 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
402 PUSH_DATA (push, *map++);
403 }
404
405 count >>= 1;
406 while (count) {
407 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
408 count -= npush;
409
410 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
411 while (npush--) {
412 PUSH_DATA (push, (map[1] << 16) | map[0]);
413 map += 2;
414 }
415 }
416 }
417
418 static void
419 nv30_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
420 unsigned start, unsigned count)
421 {
422 map += start;
423
424 while (count) {
425 const unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
426
427 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U32), nr);
428 PUSH_DATAp(push, map, nr);
429
430 map += nr;
431 count -= nr;
432 }
433 }
434
435 static void
436 nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
437 const uint32_t *map,
438 unsigned start, unsigned count)
439 {
440 map += start;
441
442 if (count & 1) {
443 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
444 PUSH_DATA (push, *map++);
445 }
446
447 count >>= 1;
448 while (count) {
449 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
450 count -= npush;
451
452 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
453 while (npush--) {
454 PUSH_DATA (push, (map[1] << 16) | map[0]);
455 map += 2;
456 }
457 }
458 }
459
460 static void
461 nv30_draw_elements(struct nv30_context *nv30, bool shorten,
462 const struct pipe_draw_info *info,
463 unsigned mode, unsigned start, unsigned count,
464 unsigned instance_count, int32_t index_bias,
465 unsigned index_size)
466 {
467 struct nouveau_pushbuf *push = nv30->base.pushbuf;
468 struct nouveau_object *eng3d = nv30->screen->eng3d;
469 unsigned prim = nv30_prim_gl(mode);
470
471 if (eng3d->oclass >= NV40_3D_CLASS && index_bias != nv30->state.index_bias) {
472 BEGIN_NV04(push, NV40_3D(VB_ELEMENT_BASE), 1);
473 PUSH_DATA (push, index_bias);
474 nv30->state.index_bias = index_bias;
475 }
476
477 if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
478 !info->has_user_indices) {
479 struct nv04_resource *res = nv04_resource(info->index.resource);
480 unsigned offset = 0;
481
482 assert(nouveau_resource_mapped_by_gpu(&res->base));
483
484 BEGIN_NV04(push, NV30_3D(IDXBUF_OFFSET), 2);
485 PUSH_RESRC(push, NV30_3D(IDXBUF_OFFSET), BUFCTX_IDXBUF, res, offset,
486 NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, 0);
487 PUSH_MTHD (push, NV30_3D(IDXBUF_FORMAT), BUFCTX_IDXBUF, res->bo,
488 (index_size == 2) ? 0x00000010 : 0x00000000,
489 res->domain | NOUVEAU_BO_RD,
490 0, NV30_3D_IDXBUF_FORMAT_DMA1);
491 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
492 PUSH_DATA (push, prim);
493 while (count) {
494 const unsigned mpush = 2047 * 256;
495 unsigned npush = (count > mpush) ? mpush : count;
496 unsigned wpush = ((npush + 255) & ~255) >> 8;
497
498 count -= npush;
499
500 BEGIN_NI04(push, NV30_3D(VB_INDEX_BATCH), wpush);
501 while (npush >= 256) {
502 PUSH_DATA (push, 0xff000000 | start);
503 start += 256;
504 npush -= 256;
505 }
506
507 if (npush)
508 PUSH_DATA (push, ((npush - 1) << 24) | start);
509 }
510 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
511 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
512 PUSH_RESET(push, BUFCTX_IDXBUF);
513 } else {
514 const void *data;
515 if (!info->has_user_indices)
516 data = nouveau_resource_map_offset(&nv30->base,
517 nv04_resource(info->index.resource),
518 start * index_size, NOUVEAU_BO_RD);
519 else
520 data = info->index.user;
521 if (!data)
522 return;
523
524 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
525 PUSH_DATA (push, prim);
526 switch (index_size) {
527 case 1:
528 nv30_draw_elements_inline_u08(push, data, start, count);
529 break;
530 case 2:
531 nv30_draw_elements_inline_u16(push, data, start, count);
532 break;
533 case 4:
534 if (shorten)
535 nv30_draw_elements_inline_u32_short(push, data, start, count);
536 else
537 nv30_draw_elements_inline_u32(push, data, start, count);
538 break;
539 default:
540 assert(0);
541 return;
542 }
543 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
544 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
545 }
546 }
547
548 static void
549 nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
550 {
551 struct nv30_context *nv30 = nv30_context(pipe);
552 struct nouveau_pushbuf *push = nv30->base.pushbuf;
553 int i;
554
555 if (!info->primitive_restart &&
556 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
557 return;
558
559 /* For picking only a few vertices from a large user buffer, push is better,
560 * if index count is larger and we expect repeated vertices, suggest upload.
561 */
562 nv30->vbo_push_hint = /* the 64 is heuristic */
563 !(info->index_size &&
564 ((info->max_index - info->min_index + 64) < info->count));
565
566 nv30->vbo_min_index = info->min_index;
567 nv30->vbo_max_index = info->max_index;
568
569 if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
570 nv30->dirty |= NV30_NEW_ARRAYS;
571
572 push->user_priv = &nv30->bufctx;
573 if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
574 nv30_update_user_vbufs(nv30);
575
576 nv30_state_validate(nv30, ~0, true);
577 if (nv30->draw_flags) {
578 nv30_render_vbo(pipe, info);
579 return;
580 } else
581 if (nv30->vbo_fifo) {
582 nv30_push_vbo(nv30, info);
583 return;
584 }
585
586 for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
587 if (!nv30->vtxbuf[i].buffer.resource)
588 continue;
589 if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
590 nv30->base.vbo_dirty = true;
591 }
592
593 if (!nv30->base.vbo_dirty && info->index_size && !info->has_user_indices &&
594 info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
595 nv30->base.vbo_dirty = true;
596
597 if (nv30->base.vbo_dirty) {
598 BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
599 PUSH_DATA (push, 0);
600 nv30->base.vbo_dirty = false;
601 }
602
603 if (!info->index_size) {
604 nv30_draw_arrays(nv30,
605 info->mode, info->start, info->count,
606 info->instance_count);
607 } else {
608 bool shorten = info->max_index <= 65535;
609
610 if (info->primitive_restart != nv30->state.prim_restart) {
611 if (info->primitive_restart) {
612 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 2);
613 PUSH_DATA (push, 1);
614 PUSH_DATA (push, info->restart_index);
615
616 if (info->restart_index > 65535)
617 shorten = false;
618 } else {
619 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 1);
620 PUSH_DATA (push, 0);
621 }
622 nv30->state.prim_restart = info->primitive_restart;
623 } else
624 if (info->primitive_restart) {
625 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_INDEX), 1);
626 PUSH_DATA (push, info->restart_index);
627
628 if (info->restart_index > 65535)
629 shorten = false;
630 }
631
632 nv30_draw_elements(nv30, shorten, info,
633 info->mode, info->start, info->count,
634 info->instance_count, info->index_bias, info->index_size);
635 }
636
637 nv30_state_release(nv30);
638 nv30_release_user_vbufs(nv30);
639 }
640
641 void
642 nv30_vbo_init(struct pipe_context *pipe)
643 {
644 pipe->create_vertex_elements_state = nv30_vertex_state_create;
645 pipe->delete_vertex_elements_state = nv30_vertex_state_delete;
646 pipe->bind_vertex_elements_state = nv30_vertex_state_bind;
647 pipe->draw_vbo = nv30_draw_vbo;
648 }
649