• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/format/u_format.h"
6 #include "translate/translate.h"
7 
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
10 
11 #include "nvc0/nvc0_3d.xml.h"
12 
13 struct push_context {
14    struct nouveau_pushbuf *push;
15 
16    struct translate *translate;
17    void *dest;
18    const void *idxbuf;
19 
20    uint32_t vertex_size;
21    uint32_t restart_index;
22    uint32_t start_instance;
23    uint32_t instance_id;
24 
25    bool prim_restart;
26    bool need_vertex_id;
27 
28    struct {
29       bool enabled;
30       bool value;
31       uint8_t width;
32       unsigned stride;
33       const uint8_t *data;
34    } edgeflag;
35 };
36 
37 static void nvc0_push_upload_vertex_ids(struct push_context *,
38                                         struct nvc0_context *,
39                                         const struct pipe_draw_info *,
40                                         const struct pipe_draw_start_count_bias *draw);
41 
42 static void
nvc0_push_context_init(struct nvc0_context * nvc0,struct push_context * ctx)43 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
44 {
45    ctx->push = nvc0->base.pushbuf;
46 
47    ctx->translate = nvc0->vertex->translate;
48    ctx->vertex_size = nvc0->vertex->size;
49    ctx->instance_id = 0;
50 
51    ctx->need_vertex_id =
52       nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
53 
54    ctx->edgeflag.value = true;
55    ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
56 
57    /* silence warnings */
58    ctx->edgeflag.data = NULL;
59    ctx->edgeflag.stride = 0;
60    ctx->edgeflag.width = 0;
61 }
62 
63 static inline void
nvc0_vertex_configure_translate(struct nvc0_context * nvc0,int32_t index_bias)64 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
65 {
66    struct translate *translate = nvc0->vertex->translate;
67    unsigned i;
68 
69    for (i = 0; i < nvc0->num_vtxbufs; ++i) {
70       const uint8_t *map;
71       const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
72 
73       if (likely(vb->is_user_buffer))
74          map = (const uint8_t *)vb->buffer.user;
75       else {
76          if (!vb->buffer.resource)
77             continue;
78 
79          map = nouveau_resource_map_offset(&nvc0->base,
80             nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
81       }
82 
83       if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
84          map += (intptr_t)index_bias * vb->stride;
85 
86       translate->set_buffer(translate, i, map, vb->stride, ~0);
87    }
88 }
89 
90 static inline void
nvc0_push_map_idxbuf(struct push_context * ctx,struct nvc0_context * nvc0,const struct pipe_draw_info * info)91 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0,
92                      const struct pipe_draw_info *info)
93 {
94    if (!info->has_user_indices) {
95       struct nv04_resource *buf = nv04_resource(info->index.resource);
96       ctx->idxbuf = nouveau_resource_map_offset(
97             &nvc0->base, buf, 0, NOUVEAU_BO_RD);
98    } else {
99       ctx->idxbuf = info->index.user;
100    }
101 }
102 
103 static inline void
nvc0_push_map_edgeflag(struct push_context * ctx,struct nvc0_context * nvc0,int32_t index_bias)104 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
105                        int32_t index_bias)
106 {
107    unsigned attr = nvc0->vertprog->vp.edgeflag;
108    struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
109    struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
110    struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
111 
112    ctx->edgeflag.stride = vb->stride;
113    ctx->edgeflag.width = util_format_get_blocksize(ve->src_format);
114    if (!vb->is_user_buffer) {
115       unsigned offset = vb->buffer_offset + ve->src_offset;
116       ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
117                            buf, offset, NOUVEAU_BO_RD);
118    } else {
119       ctx->edgeflag.data = (const uint8_t *)vb->buffer.user + ve->src_offset;
120    }
121 
122    if (index_bias)
123       ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
124 }
125 
126 static inline unsigned
prim_restart_search_i08(const uint8_t * elts,unsigned push,uint8_t index)127 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
128 {
129    unsigned i;
130    for (i = 0; i < push && elts[i] != index; ++i);
131    return i;
132 }
133 
134 static inline unsigned
prim_restart_search_i16(const uint16_t * elts,unsigned push,uint16_t index)135 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
136 {
137    unsigned i;
138    for (i = 0; i < push && elts[i] != index; ++i);
139    return i;
140 }
141 
142 static inline unsigned
prim_restart_search_i32(const uint32_t * elts,unsigned push,uint32_t index)143 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
144 {
145    unsigned i;
146    for (i = 0; i < push && elts[i] != index; ++i);
147    return i;
148 }
149 
150 static inline bool
ef_value_8(const struct push_context * ctx,uint32_t index)151 ef_value_8(const struct push_context *ctx, uint32_t index)
152 {
153    uint8_t *pf = (uint8_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
154    return !!*pf;
155 }
156 
157 static inline bool
ef_value_32(const struct push_context * ctx,uint32_t index)158 ef_value_32(const struct push_context *ctx, uint32_t index)
159 {
160    uint32_t *pf = (uint32_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
161    return !!*pf;
162 }
163 
164 static inline bool
ef_toggle(struct push_context * ctx)165 ef_toggle(struct push_context *ctx)
166 {
167    ctx->edgeflag.value = !ctx->edgeflag.value;
168    return ctx->edgeflag.value;
169 }
170 
171 static inline unsigned
ef_toggle_search_i08(struct push_context * ctx,const uint8_t * elts,unsigned n)172 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
173 {
174    unsigned i;
175    bool ef = ctx->edgeflag.value;
176    if (ctx->edgeflag.width == 1)
177       for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
178    else
179       for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
180    return i;
181 }
182 
183 static inline unsigned
ef_toggle_search_i16(struct push_context * ctx,const uint16_t * elts,unsigned n)184 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
185 {
186    unsigned i;
187    bool ef = ctx->edgeflag.value;
188    if (ctx->edgeflag.width == 1)
189       for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
190    else
191       for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
192    return i;
193 }
194 
195 static inline unsigned
ef_toggle_search_i32(struct push_context * ctx,const uint32_t * elts,unsigned n)196 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
197 {
198    unsigned i;
199    bool ef = ctx->edgeflag.value;
200    if (ctx->edgeflag.width == 1)
201       for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
202    else
203       for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
204    return i;
205 }
206 
207 static inline unsigned
ef_toggle_search_seq(struct push_context * ctx,unsigned start,unsigned n)208 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
209 {
210    unsigned i;
211    bool ef = ctx->edgeflag.value;
212    if (ctx->edgeflag.width == 1)
213       for (i = 0; i < n && ef_value_8(ctx, start++) == ef; ++i);
214    else
215       for (i = 0; i < n && ef_value_32(ctx, start++) == ef; ++i);
216    return i;
217 }
218 
219 static inline void *
nvc0_push_setup_vertex_array(struct nvc0_context * nvc0,const unsigned count)220 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
221 {
222    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
223    struct nouveau_bo *bo;
224    uint64_t va;
225    const unsigned size = count * nvc0->vertex->size;
226 
227    void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
228 
229    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
230    PUSH_DATAh(push, va);
231    PUSH_DATA (push, va);
232 
233    if (nvc0->screen->eng3d->oclass < TU102_3D_CLASS)
234       BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
235    else
236       BEGIN_NVC0(push, SUBC_3D(TU102_3D_VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
237    PUSH_DATAh(push, va + size - 1);
238    PUSH_DATA (push, va + size - 1);
239 
240    BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
241                 bo);
242    nouveau_pushbuf_validate(push);
243 
244    return dest;
245 }
246 
247 static void
disp_vertices_i08(struct push_context * ctx,unsigned start,unsigned count)248 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
249 {
250    struct nouveau_pushbuf *push = ctx->push;
251    struct translate *translate = ctx->translate;
252    const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
253    unsigned pos = 0;
254 
255    do {
256       unsigned nR = count;
257 
258       if (unlikely(ctx->prim_restart))
259          nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
260 
261       translate->run_elts8(translate, elts, nR,
262                            ctx->start_instance, ctx->instance_id, ctx->dest);
263       count -= nR;
264       ctx->dest += nR * ctx->vertex_size;
265 
266       while (nR) {
267          unsigned nE = nR;
268 
269          if (unlikely(ctx->edgeflag.enabled))
270             nE = ef_toggle_search_i08(ctx, elts, nR);
271 
272          PUSH_SPACE(push, 4);
273          if (likely(nE >= 2)) {
274             BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
275             PUSH_DATA (push, pos);
276             PUSH_DATA (push, nE);
277          } else
278          if (nE) {
279             if (pos <= 0xff) {
280                IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
281             } else {
282                BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
283                PUSH_DATA (push, pos);
284             }
285          }
286          if (unlikely(nE != nR))
287             IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
288 
289          pos += nE;
290          elts += nE;
291          nR -= nE;
292       }
293       if (count) {
294          BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
295          PUSH_DATA (push, 0xffffffff);
296          ++elts;
297          ctx->dest += ctx->vertex_size;
298          ++pos;
299          --count;
300       }
301    } while (count);
302 }
303 
304 static void
disp_vertices_i16(struct push_context * ctx,unsigned start,unsigned count)305 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
306 {
307    struct nouveau_pushbuf *push = ctx->push;
308    struct translate *translate = ctx->translate;
309    const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
310    unsigned pos = 0;
311 
312    do {
313       unsigned nR = count;
314 
315       if (unlikely(ctx->prim_restart))
316          nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
317 
318       translate->run_elts16(translate, elts, nR,
319                             ctx->start_instance, ctx->instance_id, ctx->dest);
320       count -= nR;
321       ctx->dest += nR * ctx->vertex_size;
322 
323       while (nR) {
324          unsigned nE = nR;
325 
326          if (unlikely(ctx->edgeflag.enabled))
327             nE = ef_toggle_search_i16(ctx, elts, nR);
328 
329          PUSH_SPACE(push, 4);
330          if (likely(nE >= 2)) {
331             BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
332             PUSH_DATA (push, pos);
333             PUSH_DATA (push, nE);
334          } else
335          if (nE) {
336             if (pos <= 0xff) {
337                IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
338             } else {
339                BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
340                PUSH_DATA (push, pos);
341             }
342          }
343          if (unlikely(nE != nR))
344             IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
345 
346          pos += nE;
347          elts += nE;
348          nR -= nE;
349       }
350       if (count) {
351          BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
352          PUSH_DATA (push, 0xffffffff);
353          ++elts;
354          ctx->dest += ctx->vertex_size;
355          ++pos;
356          --count;
357       }
358    } while (count);
359 }
360 
361 static void
disp_vertices_i32(struct push_context * ctx,unsigned start,unsigned count)362 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
363 {
364    struct nouveau_pushbuf *push = ctx->push;
365    struct translate *translate = ctx->translate;
366    const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
367    unsigned pos = 0;
368 
369    do {
370       unsigned nR = count;
371 
372       if (unlikely(ctx->prim_restart))
373          nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
374 
375       translate->run_elts(translate, elts, nR,
376                           ctx->start_instance, ctx->instance_id, ctx->dest);
377       count -= nR;
378       ctx->dest += nR * ctx->vertex_size;
379 
380       while (nR) {
381          unsigned nE = nR;
382 
383          if (unlikely(ctx->edgeflag.enabled))
384             nE = ef_toggle_search_i32(ctx, elts, nR);
385 
386          PUSH_SPACE(push, 4);
387          if (likely(nE >= 2)) {
388             BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
389             PUSH_DATA (push, pos);
390             PUSH_DATA (push, nE);
391          } else
392          if (nE) {
393             if (pos <= 0xff) {
394                IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
395             } else {
396                BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
397                PUSH_DATA (push, pos);
398             }
399          }
400          if (unlikely(nE != nR))
401             IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
402 
403          pos += nE;
404          elts += nE;
405          nR -= nE;
406       }
407       if (count) {
408          BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
409          PUSH_DATA (push, 0xffffffff);
410          ++elts;
411          ctx->dest += ctx->vertex_size;
412          ++pos;
413          --count;
414       }
415    } while (count);
416 }
417 
418 static void
disp_vertices_seq(struct push_context * ctx,unsigned start,unsigned count)419 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
420 {
421    struct nouveau_pushbuf *push = ctx->push;
422    struct translate *translate = ctx->translate;
423    unsigned pos = 0;
424 
425    /* XXX: This will read the data corresponding to the primitive restart index,
426     *  maybe we should avoid that ?
427     */
428    translate->run(translate, start, count,
429                   ctx->start_instance, ctx->instance_id, ctx->dest);
430    do {
431       unsigned nr = count;
432 
433       if (unlikely(ctx->edgeflag.enabled))
434          nr = ef_toggle_search_seq(ctx, start + pos, nr);
435 
436       PUSH_SPACE(push, 4);
437       if (likely(nr)) {
438          BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
439          PUSH_DATA (push, pos);
440          PUSH_DATA (push, nr);
441       }
442       if (unlikely(nr != count))
443          IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
444 
445       pos += nr;
446       count -= nr;
447    } while (count);
448 }
449 
450 
451 #define NVC0_PRIM_GL_CASE(n) \
452    case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
453 
454 static inline unsigned
nvc0_prim_gl(unsigned prim)455 nvc0_prim_gl(unsigned prim)
456 {
457    switch (prim) {
458    NVC0_PRIM_GL_CASE(POINTS);
459    NVC0_PRIM_GL_CASE(LINES);
460    NVC0_PRIM_GL_CASE(LINE_LOOP);
461    NVC0_PRIM_GL_CASE(LINE_STRIP);
462    NVC0_PRIM_GL_CASE(TRIANGLES);
463    NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
464    NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
465    NVC0_PRIM_GL_CASE(QUADS);
466    NVC0_PRIM_GL_CASE(QUAD_STRIP);
467    NVC0_PRIM_GL_CASE(POLYGON);
468    NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
469    NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
470    NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
471    NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
472    NVC0_PRIM_GL_CASE(PATCHES);
473    default:
474       return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
475    }
476 }
477 
478 typedef struct {
479    uint32_t count;
480    uint32_t primCount;
481    uint32_t first;
482    uint32_t baseInstance;
483 } DrawArraysIndirectCommand;
484 
485 typedef struct {
486    uint32_t count;
487    uint32_t primCount;
488    uint32_t firstIndex;
489    int32_t  baseVertex;
490    uint32_t baseInstance;
491 } DrawElementsIndirectCommand;
492 
493 void
nvc0_push_vbo_indirect(struct nvc0_context * nvc0,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw)494 nvc0_push_vbo_indirect(struct nvc0_context *nvc0, const struct pipe_draw_info *info,
495                        unsigned drawid_offset,
496                        const struct pipe_draw_indirect_info *indirect,
497                        const struct pipe_draw_start_count_bias *draw)
498 {
499    /* The strategy here is to just read the commands from the indirect buffer
500     * and do the draws. This is suboptimal, but will only happen in the case
501     * that conversion is required for FIXED or DOUBLE inputs.
502     */
503    struct nvc0_screen *screen = nvc0->screen;
504    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
505    struct nv04_resource *buf = nv04_resource(indirect->buffer);
506    struct nv04_resource *buf_count = nv04_resource(indirect->indirect_draw_count);
507    unsigned i;
508 
509    unsigned draw_count = indirect->draw_count;
510    if (buf_count) {
511       uint32_t *count = nouveau_resource_map_offset(
512             &nvc0->base, buf_count, indirect->indirect_draw_count_offset,
513             NOUVEAU_BO_RD);
514       draw_count = *count;
515    }
516 
517    uint8_t *buf_data = nouveau_resource_map_offset(
518             &nvc0->base, buf, indirect->offset, NOUVEAU_BO_RD);
519    struct pipe_draw_info single = *info;
520    struct pipe_draw_start_count_bias sdraw = *draw;
521    for (i = 0; i < draw_count; i++, buf_data += indirect->stride) {
522       if (info->index_size) {
523          DrawElementsIndirectCommand *cmd = (void *)buf_data;
524          sdraw.start = draw->start + cmd->firstIndex;
525          sdraw.count = cmd->count;
526          single.start_instance = cmd->baseInstance;
527          single.instance_count = cmd->primCount;
528          sdraw.index_bias = cmd->baseVertex;
529       } else {
530          DrawArraysIndirectCommand *cmd = (void *)buf_data;
531          sdraw.start = cmd->first;
532          sdraw.count = cmd->count;
533          single.start_instance = cmd->baseInstance;
534          single.instance_count = cmd->primCount;
535       }
536 
537       if (nvc0->vertprog->vp.need_draw_parameters) {
538          PUSH_SPACE(push, 9);
539          BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
540          PUSH_DATA (push, NVC0_CB_AUX_SIZE);
541          PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
542          PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
543          BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 3);
544          PUSH_DATA (push, NVC0_CB_AUX_DRAW_INFO);
545          PUSH_DATA (push, sdraw.index_bias);
546          PUSH_DATA (push, single.start_instance);
547          PUSH_DATA (push, drawid_offset + i);
548       }
549 
550       nvc0_push_vbo(nvc0, &single, NULL, &sdraw);
551    }
552 
553    nouveau_resource_unmap(buf);
554    if (buf_count)
555       nouveau_resource_unmap(buf_count);
556 }
557 
558 void
nvc0_push_vbo(struct nvc0_context * nvc0,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw)559 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info,
560               const struct pipe_draw_indirect_info *indirect,
561               const struct pipe_draw_start_count_bias *draw)
562 {
563    struct push_context ctx;
564    unsigned i, index_size;
565    unsigned index_bias = info->index_size ? draw->index_bias : 0;
566    unsigned inst_count = info->instance_count;
567    unsigned vert_count = draw->count;
568    unsigned prim;
569 
570    nvc0_push_context_init(nvc0, &ctx);
571 
572    nvc0_vertex_configure_translate(nvc0, index_bias);
573 
574    if (nvc0->state.index_bias) {
575       /* this is already taken care of by translate */
576       IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
577       nvc0->state.index_bias = 0;
578    }
579 
580    if (unlikely(ctx.edgeflag.enabled))
581       nvc0_push_map_edgeflag(&ctx, nvc0, index_bias);
582 
583    ctx.prim_restart = info->primitive_restart;
584    ctx.restart_index = info->restart_index;
585 
586    if (info->primitive_restart) {
587       /* NOTE: I hope we won't ever need that last index (~0).
588        * If we do, we have to disable primitive restart here always and
589        * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
590        * We could also deactivate PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
591        * and add manual restart to disp_vertices_seq.
592        */
593       BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
594       PUSH_DATA (ctx.push, 1);
595       PUSH_DATA (ctx.push, info->index_size ? 0xffffffff : info->restart_index);
596    } else
597    if (nvc0->state.prim_restart) {
598       IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
599    }
600    nvc0->state.prim_restart = info->primitive_restart;
601 
602    if (info->index_size) {
603       nvc0_push_map_idxbuf(&ctx, nvc0, info);
604       index_size = info->index_size;
605    } else {
606       if (unlikely(indirect && indirect->count_from_stream_output)) {
607          struct pipe_context *pipe = &nvc0->base.pipe;
608          struct nvc0_so_target *targ;
609          targ = nvc0_so_target(indirect->count_from_stream_output);
610          pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
611          vert_count /= targ->stride;
612       }
613       ctx.idxbuf = NULL; /* shut up warnings */
614       index_size = 0;
615    }
616 
617    ctx.start_instance = info->start_instance;
618 
619    prim = nvc0_prim_gl(info->mode);
620    do {
621       PUSH_SPACE(ctx.push, 9);
622 
623       ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
624       if (unlikely(!ctx.dest))
625          break;
626 
627       if (unlikely(ctx.need_vertex_id))
628          nvc0_push_upload_vertex_ids(&ctx, nvc0, info, draw);
629 
630       if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
631          IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
632       BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
633       PUSH_DATA (ctx.push, prim);
634       switch (index_size) {
635       case 1:
636          disp_vertices_i08(&ctx, draw->start, vert_count);
637          break;
638       case 2:
639          disp_vertices_i16(&ctx, draw->start, vert_count);
640          break;
641       case 4:
642          disp_vertices_i32(&ctx, draw->start, vert_count);
643          break;
644       default:
645          assert(index_size == 0);
646          disp_vertices_seq(&ctx, draw->start, vert_count);
647          break;
648       }
649       PUSH_SPACE(ctx.push, 1);
650       IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
651 
652       if (--inst_count) {
653          prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
654          ++ctx.instance_id;
655       }
656       nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX_TMP);
657       nouveau_scratch_done(&nvc0->base);
658    } while (inst_count);
659 
660 
661    /* reset state and unmap buffers (no-op) */
662 
663    if (unlikely(!ctx.edgeflag.value)) {
664       PUSH_SPACE(ctx.push, 1);
665       IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
666    }
667 
668    if (unlikely(ctx.need_vertex_id)) {
669       PUSH_SPACE(ctx.push, 4);
670       IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
671       BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
672       PUSH_DATA (ctx.push,
673                  NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
674                  NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
675                  NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
676       IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
677    }
678 
679    if (info->index_size && !info->has_user_indices)
680       nouveau_resource_unmap(nv04_resource(info->index.resource));
681    for (i = 0; i < nvc0->num_vtxbufs; ++i)
682       nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
683 
684    NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
685 }
686 
687 static inline void
copy_indices_u8(uint32_t * dst,const uint8_t * elts,uint32_t bias,unsigned n)688 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
689 {
690    unsigned i;
691    for (i = 0; i < n; ++i)
692       dst[i] = elts[i] + bias;
693 }
694 
695 static inline void
copy_indices_u16(uint32_t * dst,const uint16_t * elts,uint32_t bias,unsigned n)696 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
697 {
698    unsigned i;
699    for (i = 0; i < n; ++i)
700       dst[i] = elts[i] + bias;
701 }
702 
703 static inline void
copy_indices_u32(uint32_t * dst,const uint32_t * elts,uint32_t bias,unsigned n)704 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
705 {
706    unsigned i;
707    for (i = 0; i < n; ++i)
708       dst[i] = elts[i] + bias;
709 }
710 
711 static void
nvc0_push_upload_vertex_ids(struct push_context * ctx,struct nvc0_context * nvc0,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draw)712 nvc0_push_upload_vertex_ids(struct push_context *ctx,
713                             struct nvc0_context *nvc0,
714                             const struct pipe_draw_info *info,
715                             const struct pipe_draw_start_count_bias *draw)
716 
717 {
718    struct nouveau_pushbuf *push = ctx->push;
719    struct nouveau_bo *bo;
720    uint64_t va;
721    uint32_t *data;
722    uint32_t format;
723    unsigned index_size = info->index_size;
724    unsigned i;
725    unsigned a = nvc0->vertex->num_elements;
726 
727    if (!index_size || draw->index_bias)
728       index_size = 4;
729    data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
730                                           draw->count * index_size, &va, &bo);
731 
732    BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
733                 bo);
734    nouveau_pushbuf_validate(push);
735 
736    if (info->index_size) {
737       if (!draw->index_bias) {
738          memcpy(data, ctx->idxbuf, draw->count * index_size);
739       } else {
740          switch (info->index_size) {
741          case 1:
742             copy_indices_u8(data, ctx->idxbuf, draw->index_bias, draw->count);
743             break;
744          case 2:
745             copy_indices_u16(data, ctx->idxbuf, draw->index_bias, draw->count);
746             break;
747          default:
748             copy_indices_u32(data, ctx->idxbuf, draw->index_bias, draw->count);
749             break;
750          }
751       }
752    } else {
753       for (i = 0; i < draw->count; ++i)
754          data[i] = i + (draw->start + draw->index_bias);
755    }
756 
757    format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
758       NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
759 
760    switch (index_size) {
761    case 1:
762       format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
763       break;
764    case 2:
765       format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
766       break;
767    default:
768       format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
769       break;
770    }
771 
772    PUSH_SPACE(push, 12);
773 
774    if (unlikely(nvc0->state.instance_elts & 2)) {
775       nvc0->state.instance_elts &= ~2;
776       IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
777    }
778 
779    BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
780    PUSH_DATA (push, format);
781 
782    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
783    PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
784    PUSH_DATAh(push, va);
785    PUSH_DATA (push, va);
786 
787    if (nvc0->screen->eng3d->oclass < TU102_3D_CLASS)
788       BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
789    else
790       BEGIN_NVC0(push, SUBC_3D(TU102_3D_VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
791    PUSH_DATAh(push, va + draw->count * index_size - 1);
792    PUSH_DATA (push, va + draw->count * index_size - 1);
793 
794 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
795    (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
796 
797    BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
798    PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
799 }
800