• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2007 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28  /**
29   * @file
30   *
31   * Wrap the cso cache & hash mechanisms in a simplified
32   * pipe-driver-specific interface.
33   *
34   * @author Zack Rusin <zackr@vmware.com>
35   * @author Keith Whitwell <keithw@vmware.com>
36   */
37 
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_helpers.h"
42 #include "util/u_inlines.h"
43 #include "util/u_math.h"
44 #include "util/u_memory.h"
45 #include "util/u_vbuf.h"
46 #include "tgsi/tgsi_parse.h"
47 
48 #include "cso_cache/cso_context.h"
49 #include "cso_cache/cso_cache.h"
50 #include "cso_cache/cso_hash.h"
51 #include "cso_context.h"
52 #include "driver_trace/tr_dump.h"
53 
54 /**
55  * Per-shader sampler information.
56  */
57 struct sampler_info
58 {
59    struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
60    void *samplers[PIPE_MAX_SAMPLERS];
61 };
62 
63 
64 
65 struct cso_context {
66    struct pipe_context *pipe;
67 
68    struct u_vbuf *vbuf;
69    struct u_vbuf *vbuf_current;
70    bool always_use_vbuf;
71    bool sampler_format;
72 
73    boolean has_geometry_shader;
74    boolean has_tessellation;
75    boolean has_compute_shader;
76    boolean has_streamout;
77 
78    uint32_t max_fs_samplerviews : 16;
79 
80    unsigned saved_state;  /**< bitmask of CSO_BIT_x flags */
81    unsigned saved_compute_state;  /**< bitmask of CSO_BIT_COMPUTE_x flags */
82 
83    struct sampler_info fragment_samplers_saved;
84    struct sampler_info compute_samplers_saved;
85    struct sampler_info samplers[PIPE_SHADER_TYPES];
86 
87    /* Temporary number until cso_single_sampler_done is called.
88     * It tracks the highest sampler seen in cso_single_sampler.
89     */
90    int max_sampler_seen;
91 
92    unsigned nr_so_targets;
93    struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
94 
95    unsigned nr_so_targets_saved;
96    struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
97 
98    /** Current and saved state.
99     * The saved state is used as a 1-deep stack.
100     */
101    void *blend, *blend_saved;
102    void *depth_stencil, *depth_stencil_saved;
103    void *rasterizer, *rasterizer_saved;
104    void *fragment_shader, *fragment_shader_saved;
105    void *vertex_shader, *vertex_shader_saved;
106    void *geometry_shader, *geometry_shader_saved;
107    void *tessctrl_shader, *tessctrl_shader_saved;
108    void *tesseval_shader, *tesseval_shader_saved;
109    void *compute_shader, *compute_shader_saved;
110    void *velements, *velements_saved;
111    struct pipe_query *render_condition, *render_condition_saved;
112    uint render_condition_mode, render_condition_mode_saved;
113    boolean render_condition_cond, render_condition_cond_saved;
114    bool flatshade_first, flatshade_first_saved;
115 
116    struct pipe_framebuffer_state fb, fb_saved;
117    struct pipe_viewport_state vp, vp_saved;
118    unsigned sample_mask, sample_mask_saved;
119    unsigned min_samples, min_samples_saved;
120    struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
121 
122    /* This should be last to keep all of the above together in memory. */
123    struct cso_cache cache;
124 };
125 
cso_get_pipe_context(struct cso_context * cso)126 struct pipe_context *cso_get_pipe_context(struct cso_context *cso)
127 {
128    return cso->pipe;
129 }
130 
delete_cso(struct cso_context * ctx,void * state,enum cso_cache_type type)131 static inline boolean delete_cso(struct cso_context *ctx,
132                                  void *state, enum cso_cache_type type)
133 {
134    switch (type) {
135    case CSO_BLEND:
136       if (ctx->blend == ((struct cso_blend*)state)->data)
137          return false;
138       break;
139    case CSO_DEPTH_STENCIL_ALPHA:
140       if (ctx->depth_stencil == ((struct cso_depth_stencil_alpha*)state)->data)
141          return false;
142       break;
143    case CSO_RASTERIZER:
144       if (ctx->rasterizer == ((struct cso_rasterizer*)state)->data)
145          return false;
146       break;
147    case CSO_VELEMENTS:
148       if (ctx->velements == ((struct cso_velements*)state)->data)
149          return false;
150       break;
151    case CSO_SAMPLER:
152       /* nothing to do for samplers */
153       break;
154    default:
155       assert(0);
156    }
157 
158    cso_delete_state(ctx->pipe, state, type);
159    return true;
160 }
161 
162 static inline void
sanitize_hash(struct cso_hash * hash,enum cso_cache_type type,int max_size,void * user_data)163 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
164               int max_size, void *user_data)
165 {
166    struct cso_context *ctx = (struct cso_context *)user_data;
167    /* if we're approach the maximum size, remove fourth of the entries
168     * otherwise every subsequent call will go through the same */
169    int hash_size = cso_hash_size(hash);
170    int max_entries = (max_size > hash_size) ? max_size : hash_size;
171    int to_remove =  (max_size < max_entries) * max_entries/4;
172    struct cso_hash_iter iter;
173    struct cso_sampler **samplers_to_restore = NULL;
174    unsigned to_restore = 0;
175 
176    if (hash_size > max_size)
177       to_remove += hash_size - max_size;
178 
179    if (to_remove == 0)
180       return;
181 
182    if (type == CSO_SAMPLER) {
183       int i, j;
184 
185       samplers_to_restore = MALLOC(PIPE_SHADER_TYPES * PIPE_MAX_SAMPLERS *
186                                    sizeof(*samplers_to_restore));
187 
188       /* Temporarily remove currently bound sampler states from the hash
189        * table, to prevent them from being deleted
190        */
191       for (i = 0; i < PIPE_SHADER_TYPES; i++) {
192          for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
193             struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
194 
195             if (sampler && cso_hash_take(hash, sampler->hash_key))
196                samplers_to_restore[to_restore++] = sampler;
197          }
198       }
199    }
200 
201    iter = cso_hash_first_node(hash);
202    while (to_remove) {
203       /*remove elements until we're good */
204       /*fixme: currently we pick the nodes to remove at random*/
205       void *cso = cso_hash_iter_data(iter);
206 
207       if (!cso)
208          break;
209 
210       if (delete_cso(ctx, cso, type)) {
211          iter = cso_hash_erase(hash, iter);
212          --to_remove;
213       } else
214          iter = cso_hash_iter_next(iter);
215    }
216 
217    if (type == CSO_SAMPLER) {
218       /* Put currently bound sampler states back into the hash table */
219       while (to_restore--) {
220          struct cso_sampler *sampler = samplers_to_restore[to_restore];
221 
222          cso_hash_insert(hash, sampler->hash_key, sampler);
223       }
224 
225       FREE(samplers_to_restore);
226    }
227 }
228 
cso_init_vbuf(struct cso_context * cso,unsigned flags)229 static void cso_init_vbuf(struct cso_context *cso, unsigned flags)
230 {
231    struct u_vbuf_caps caps;
232    bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
233    bool needs64b = !(flags & CSO_NO_64B_VERTEX_BUFFERS);
234 
235    u_vbuf_get_caps(cso->pipe->screen, &caps, needs64b);
236 
237    /* Enable u_vbuf if needed. */
238    if (caps.fallback_always ||
239        (uses_user_vertex_buffers &&
240         caps.fallback_only_for_user_vbuffers)) {
241       cso->vbuf = u_vbuf_create(cso->pipe, &caps);
242       cso->vbuf_current = cso->vbuf;
243       cso->always_use_vbuf = caps.fallback_always;
244    }
245 }
246 
247 struct cso_context *
cso_create_context(struct pipe_context * pipe,unsigned flags)248 cso_create_context(struct pipe_context *pipe, unsigned flags)
249 {
250    struct cso_context *ctx = CALLOC_STRUCT(cso_context);
251    if (!ctx)
252       return NULL;
253 
254    cso_cache_init(&ctx->cache, pipe);
255    cso_cache_set_sanitize_callback(&ctx->cache, sanitize_hash, ctx);
256 
257    ctx->pipe = pipe;
258    ctx->sample_mask = ~0;
259 
260    if (!(flags & CSO_NO_VBUF))
261       cso_init_vbuf(ctx, flags);
262 
263    /* Enable for testing: */
264    if (0) cso_set_maximum_cache_size(&ctx->cache, 4);
265 
266    if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
267                                 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
268       ctx->has_geometry_shader = TRUE;
269    }
270    if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
271                                 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
272       ctx->has_tessellation = TRUE;
273    }
274    if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
275                                       PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
276       int supported_irs =
277          pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
278                                         PIPE_SHADER_CAP_SUPPORTED_IRS);
279       if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
280                            (1 << PIPE_SHADER_IR_NIR))) {
281          ctx->has_compute_shader = TRUE;
282       }
283    }
284    if (pipe->screen->get_param(pipe->screen,
285                                PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
286       ctx->has_streamout = TRUE;
287    }
288 
289    if (pipe->screen->get_param(pipe->screen, PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK) &
290        PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_FREEDRENO)
291       ctx->sampler_format = true;
292 
293    ctx->max_fs_samplerviews = pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_FRAGMENT,
294                                                              PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
295 
296    ctx->max_sampler_seen = -1;
297    return ctx;
298 }
299 
cso_unbind_context(struct cso_context * ctx)300 void cso_unbind_context(struct cso_context *ctx)
301 {
302    unsigned i;
303 
304    bool dumping = trace_dumping_enabled_locked();
305    if (dumping)
306       trace_dumping_stop_locked();
307    if (ctx->pipe) {
308       ctx->pipe->bind_blend_state( ctx->pipe, NULL );
309       ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
310 
311       {
312          static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
313          static struct pipe_shader_buffer ssbos[PIPE_MAX_SHADER_BUFFERS] = { 0 };
314          static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
315          struct pipe_screen *scr = ctx->pipe->screen;
316          enum pipe_shader_type sh;
317          for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
318             switch (sh) {
319             case PIPE_SHADER_GEOMETRY:
320                if (!ctx->has_geometry_shader)
321                   continue;
322                break;
323             case PIPE_SHADER_TESS_CTRL:
324             case PIPE_SHADER_TESS_EVAL:
325                if (!ctx->has_tessellation)
326                   continue;
327                break;
328             case PIPE_SHADER_COMPUTE:
329                if (!ctx->has_compute_shader)
330                   continue;
331                break;
332             default:
333                break;
334             }
335 
336             int maxsam = scr->get_shader_param(scr, sh,
337                                                PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
338             int maxview = scr->get_shader_param(scr, sh,
339                                                 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
340             int maxssbo = scr->get_shader_param(scr, sh,
341                                                 PIPE_SHADER_CAP_MAX_SHADER_BUFFERS);
342             int maxcb = scr->get_shader_param(scr, sh,
343                                               PIPE_SHADER_CAP_MAX_CONST_BUFFERS);
344             int maximg = scr->get_shader_param(scr, sh,
345                                               PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
346             assert(maxsam <= PIPE_MAX_SAMPLERS);
347             assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
348             assert(maxssbo <= PIPE_MAX_SHADER_BUFFERS);
349             assert(maxcb <= PIPE_MAX_CONSTANT_BUFFERS);
350             assert(maximg <= PIPE_MAX_SHADER_IMAGES);
351             if (maxsam > 0) {
352                ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros);
353             }
354             if (maxview > 0) {
355                ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, 0, false, views);
356             }
357             if (maxssbo > 0) {
358                ctx->pipe->set_shader_buffers(ctx->pipe, sh, 0, maxssbo, ssbos, 0);
359             }
360             if (maximg > 0) {
361                ctx->pipe->set_shader_images(ctx->pipe, sh, 0, 0, maximg, NULL);
362             }
363             for (int i = 0; i < maxcb; i++) {
364                ctx->pipe->set_constant_buffer(ctx->pipe, sh, i, false, NULL);
365             }
366          }
367       }
368 
369       ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
370       struct pipe_stencil_ref sr = {0};
371       ctx->pipe->set_stencil_ref(ctx->pipe, sr);
372       ctx->pipe->bind_fs_state( ctx->pipe, NULL );
373       ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
374       ctx->pipe->bind_vs_state( ctx->pipe, NULL );
375       ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
376       if (ctx->has_geometry_shader) {
377          ctx->pipe->bind_gs_state(ctx->pipe, NULL);
378       }
379       if (ctx->has_tessellation) {
380          ctx->pipe->bind_tcs_state(ctx->pipe, NULL);
381          ctx->pipe->bind_tes_state(ctx->pipe, NULL);
382       }
383       if (ctx->has_compute_shader) {
384          ctx->pipe->bind_compute_state(ctx->pipe, NULL);
385       }
386       ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL );
387 
388       if (ctx->has_streamout)
389          ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL);
390    }
391 
392    util_unreference_framebuffer_state(&ctx->fb);
393    util_unreference_framebuffer_state(&ctx->fb_saved);
394 
395    for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
396       pipe_so_target_reference(&ctx->so_targets[i], NULL);
397       pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
398    }
399 
400    memset(&ctx->samplers, 0, sizeof(ctx->samplers));
401    memset(&ctx->nr_so_targets, 0, offsetof(struct cso_context, cache) - offsetof(struct cso_context, nr_so_targets));
402    ctx->sample_mask = ~0;
403    /*
404     * If the cso context is reused (with the same pipe context),
405     * need to really make sure the context state doesn't get out of sync.
406     */
407    ctx->pipe->set_sample_mask(ctx->pipe, ctx->sample_mask);
408    if (ctx->pipe->set_min_samples)
409       ctx->pipe->set_min_samples(ctx->pipe, ctx->min_samples);
410    if (dumping)
411       trace_dumping_start_locked();
412 }
413 
414 /**
415  * Free the CSO context.
416  */
cso_destroy_context(struct cso_context * ctx)417 void cso_destroy_context( struct cso_context *ctx )
418 {
419    cso_unbind_context(ctx);
420    cso_cache_delete(&ctx->cache);
421 
422    if (ctx->vbuf)
423       u_vbuf_destroy(ctx->vbuf);
424    FREE( ctx );
425 }
426 
427 
428 /* Those function will either find the state of the given template
429  * in the cache or they will create a new state from the given
430  * template, insert it in the cache and return it.
431  */
432 
433 /*
434  * If the driver returns 0 from the create method then they will assign
435  * the data member of the cso to be the template itself.
436  */
437 
cso_set_blend(struct cso_context * ctx,const struct pipe_blend_state * templ)438 enum pipe_error cso_set_blend(struct cso_context *ctx,
439                               const struct pipe_blend_state *templ)
440 {
441    unsigned key_size, hash_key;
442    struct cso_hash_iter iter;
443    void *handle;
444 
445    key_size = templ->independent_blend_enable ?
446       sizeof(struct pipe_blend_state) :
447       (char *)&(templ->rt[1]) - (char *)templ;
448    hash_key = cso_construct_key((void*)templ, key_size);
449    iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
450                                   (void*)templ, key_size);
451 
452    if (cso_hash_iter_is_null(iter)) {
453       struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
454       if (!cso)
455          return PIPE_ERROR_OUT_OF_MEMORY;
456 
457       memset(&cso->state, 0, sizeof cso->state);
458       memcpy(&cso->state, templ, key_size);
459       cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
460 
461       iter = cso_insert_state(&ctx->cache, hash_key, CSO_BLEND, cso);
462       if (cso_hash_iter_is_null(iter)) {
463          FREE(cso);
464          return PIPE_ERROR_OUT_OF_MEMORY;
465       }
466 
467       handle = cso->data;
468    }
469    else {
470       handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
471    }
472 
473    if (ctx->blend != handle) {
474       ctx->blend = handle;
475       ctx->pipe->bind_blend_state(ctx->pipe, handle);
476    }
477    return PIPE_OK;
478 }
479 
480 static void
cso_save_blend(struct cso_context * ctx)481 cso_save_blend(struct cso_context *ctx)
482 {
483    assert(!ctx->blend_saved);
484    ctx->blend_saved = ctx->blend;
485 }
486 
487 static void
cso_restore_blend(struct cso_context * ctx)488 cso_restore_blend(struct cso_context *ctx)
489 {
490    if (ctx->blend != ctx->blend_saved) {
491       ctx->blend = ctx->blend_saved;
492       ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
493    }
494    ctx->blend_saved = NULL;
495 }
496 
497 
498 
499 enum pipe_error
cso_set_depth_stencil_alpha(struct cso_context * ctx,const struct pipe_depth_stencil_alpha_state * templ)500 cso_set_depth_stencil_alpha(struct cso_context *ctx,
501                             const struct pipe_depth_stencil_alpha_state *templ)
502 {
503    unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
504    unsigned hash_key = cso_construct_key((void*)templ, key_size);
505    struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
506                                                        hash_key,
507                                                        CSO_DEPTH_STENCIL_ALPHA,
508                                                        (void*)templ, key_size);
509    void *handle;
510 
511    if (cso_hash_iter_is_null(iter)) {
512       struct cso_depth_stencil_alpha *cso =
513          MALLOC(sizeof(struct cso_depth_stencil_alpha));
514       if (!cso)
515          return PIPE_ERROR_OUT_OF_MEMORY;
516 
517       memcpy(&cso->state, templ, sizeof(*templ));
518       cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe,
519                                                               &cso->state);
520 
521       iter = cso_insert_state(&ctx->cache, hash_key,
522                               CSO_DEPTH_STENCIL_ALPHA, cso);
523       if (cso_hash_iter_is_null(iter)) {
524          FREE(cso);
525          return PIPE_ERROR_OUT_OF_MEMORY;
526       }
527 
528       handle = cso->data;
529    }
530    else {
531       handle = ((struct cso_depth_stencil_alpha *)
532                 cso_hash_iter_data(iter))->data;
533    }
534 
535    if (ctx->depth_stencil != handle) {
536       ctx->depth_stencil = handle;
537       ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
538    }
539    return PIPE_OK;
540 }
541 
542 static void
cso_save_depth_stencil_alpha(struct cso_context * ctx)543 cso_save_depth_stencil_alpha(struct cso_context *ctx)
544 {
545    assert(!ctx->depth_stencil_saved);
546    ctx->depth_stencil_saved = ctx->depth_stencil;
547 }
548 
549 static void
cso_restore_depth_stencil_alpha(struct cso_context * ctx)550 cso_restore_depth_stencil_alpha(struct cso_context *ctx)
551 {
552    if (ctx->depth_stencil != ctx->depth_stencil_saved) {
553       ctx->depth_stencil = ctx->depth_stencil_saved;
554       ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe,
555                                                 ctx->depth_stencil_saved);
556    }
557    ctx->depth_stencil_saved = NULL;
558 }
559 
560 
561 
cso_set_rasterizer(struct cso_context * ctx,const struct pipe_rasterizer_state * templ)562 enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
563                                    const struct pipe_rasterizer_state *templ)
564 {
565    unsigned key_size = sizeof(struct pipe_rasterizer_state);
566    unsigned hash_key = cso_construct_key((void*)templ, key_size);
567    struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
568                                                        hash_key,
569                                                        CSO_RASTERIZER,
570                                                        (void*)templ, key_size);
571    void *handle = NULL;
572 
573    /* We can't have both point_quad_rasterization (sprites) and point_smooth
574     * (round AA points) enabled at the same time.
575     */
576    assert(!(templ->point_quad_rasterization && templ->point_smooth));
577 
578    if (cso_hash_iter_is_null(iter)) {
579       struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
580       if (!cso)
581          return PIPE_ERROR_OUT_OF_MEMORY;
582 
583       memcpy(&cso->state, templ, sizeof(*templ));
584       cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
585 
586       iter = cso_insert_state(&ctx->cache, hash_key, CSO_RASTERIZER, cso);
587       if (cso_hash_iter_is_null(iter)) {
588          FREE(cso);
589          return PIPE_ERROR_OUT_OF_MEMORY;
590       }
591 
592       handle = cso->data;
593    }
594    else {
595       handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
596    }
597 
598    if (ctx->rasterizer != handle) {
599       ctx->rasterizer = handle;
600       ctx->flatshade_first = templ->flatshade_first;
601       if (ctx->vbuf)
602          u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
603       ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
604    }
605    return PIPE_OK;
606 }
607 
608 static void
cso_save_rasterizer(struct cso_context * ctx)609 cso_save_rasterizer(struct cso_context *ctx)
610 {
611    assert(!ctx->rasterizer_saved);
612    ctx->rasterizer_saved = ctx->rasterizer;
613    ctx->flatshade_first_saved = ctx->flatshade_first;
614 }
615 
616 static void
cso_restore_rasterizer(struct cso_context * ctx)617 cso_restore_rasterizer(struct cso_context *ctx)
618 {
619    if (ctx->rasterizer != ctx->rasterizer_saved) {
620       ctx->rasterizer = ctx->rasterizer_saved;
621       ctx->flatshade_first = ctx->flatshade_first_saved;
622       if (ctx->vbuf)
623          u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
624       ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
625    }
626    ctx->rasterizer_saved = NULL;
627 }
628 
629 
cso_set_fragment_shader_handle(struct cso_context * ctx,void * handle)630 void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle )
631 {
632    if (ctx->fragment_shader != handle) {
633       ctx->fragment_shader = handle;
634       ctx->pipe->bind_fs_state(ctx->pipe, handle);
635    }
636 }
637 
638 static void
cso_save_fragment_shader(struct cso_context * ctx)639 cso_save_fragment_shader(struct cso_context *ctx)
640 {
641    assert(!ctx->fragment_shader_saved);
642    ctx->fragment_shader_saved = ctx->fragment_shader;
643 }
644 
645 static void
cso_restore_fragment_shader(struct cso_context * ctx)646 cso_restore_fragment_shader(struct cso_context *ctx)
647 {
648    if (ctx->fragment_shader_saved != ctx->fragment_shader) {
649       ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
650       ctx->fragment_shader = ctx->fragment_shader_saved;
651    }
652    ctx->fragment_shader_saved = NULL;
653 }
654 
655 
cso_set_vertex_shader_handle(struct cso_context * ctx,void * handle)656 void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle)
657 {
658    if (ctx->vertex_shader != handle) {
659       ctx->vertex_shader = handle;
660       ctx->pipe->bind_vs_state(ctx->pipe, handle);
661    }
662 }
663 
664 static void
cso_save_vertex_shader(struct cso_context * ctx)665 cso_save_vertex_shader(struct cso_context *ctx)
666 {
667    assert(!ctx->vertex_shader_saved);
668    ctx->vertex_shader_saved = ctx->vertex_shader;
669 }
670 
671 static void
cso_restore_vertex_shader(struct cso_context * ctx)672 cso_restore_vertex_shader(struct cso_context *ctx)
673 {
674    if (ctx->vertex_shader_saved != ctx->vertex_shader) {
675       ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
676       ctx->vertex_shader = ctx->vertex_shader_saved;
677    }
678    ctx->vertex_shader_saved = NULL;
679 }
680 
681 
cso_set_framebuffer(struct cso_context * ctx,const struct pipe_framebuffer_state * fb)682 void cso_set_framebuffer(struct cso_context *ctx,
683                          const struct pipe_framebuffer_state *fb)
684 {
685    if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
686       util_copy_framebuffer_state(&ctx->fb, fb);
687       ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
688    }
689 }
690 
691 static void
cso_save_framebuffer(struct cso_context * ctx)692 cso_save_framebuffer(struct cso_context *ctx)
693 {
694    util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
695 }
696 
697 static void
cso_restore_framebuffer(struct cso_context * ctx)698 cso_restore_framebuffer(struct cso_context *ctx)
699 {
700    if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
701       util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
702       ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
703       util_unreference_framebuffer_state(&ctx->fb_saved);
704    }
705 }
706 
707 
cso_set_viewport(struct cso_context * ctx,const struct pipe_viewport_state * vp)708 void cso_set_viewport(struct cso_context *ctx,
709                       const struct pipe_viewport_state *vp)
710 {
711    if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
712       ctx->vp = *vp;
713       ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp);
714    }
715 }
716 
717 /**
718  * Setup viewport state for given width and height (position is always (0,0)).
719  * Invert the Y axis if 'invert' is true.
720  */
721 void
cso_set_viewport_dims(struct cso_context * ctx,float width,float height,boolean invert)722 cso_set_viewport_dims(struct cso_context *ctx,
723                       float width, float height, boolean invert)
724 {
725    struct pipe_viewport_state vp;
726    vp.scale[0] = width * 0.5f;
727    vp.scale[1] = height * (invert ? -0.5f : 0.5f);
728    vp.scale[2] = 0.5f;
729    vp.translate[0] = 0.5f * width;
730    vp.translate[1] = 0.5f * height;
731    vp.translate[2] = 0.5f;
732    vp.swizzle_x = PIPE_VIEWPORT_SWIZZLE_POSITIVE_X;
733    vp.swizzle_y = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Y;
734    vp.swizzle_z = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Z;
735    vp.swizzle_w = PIPE_VIEWPORT_SWIZZLE_POSITIVE_W;
736    cso_set_viewport(ctx, &vp);
737 }
738 
739 static void
cso_save_viewport(struct cso_context * ctx)740 cso_save_viewport(struct cso_context *ctx)
741 {
742    ctx->vp_saved = ctx->vp;
743 }
744 
745 
746 static void
cso_restore_viewport(struct cso_context * ctx)747 cso_restore_viewport(struct cso_context *ctx)
748 {
749    if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
750       ctx->vp = ctx->vp_saved;
751       ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp);
752    }
753 }
754 
cso_set_sample_mask(struct cso_context * ctx,unsigned sample_mask)755 void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask)
756 {
757    if (ctx->sample_mask != sample_mask) {
758       ctx->sample_mask = sample_mask;
759       ctx->pipe->set_sample_mask(ctx->pipe, sample_mask);
760    }
761 }
762 
763 static void
cso_save_sample_mask(struct cso_context * ctx)764 cso_save_sample_mask(struct cso_context *ctx)
765 {
766    ctx->sample_mask_saved = ctx->sample_mask;
767 }
768 
769 static void
cso_restore_sample_mask(struct cso_context * ctx)770 cso_restore_sample_mask(struct cso_context *ctx)
771 {
772    cso_set_sample_mask(ctx, ctx->sample_mask_saved);
773 }
774 
cso_set_min_samples(struct cso_context * ctx,unsigned min_samples)775 void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples)
776 {
777    if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) {
778       ctx->min_samples = min_samples;
779       ctx->pipe->set_min_samples(ctx->pipe, min_samples);
780    }
781 }
782 
783 static void
cso_save_min_samples(struct cso_context * ctx)784 cso_save_min_samples(struct cso_context *ctx)
785 {
786    ctx->min_samples_saved = ctx->min_samples;
787 }
788 
789 static void
cso_restore_min_samples(struct cso_context * ctx)790 cso_restore_min_samples(struct cso_context *ctx)
791 {
792    cso_set_min_samples(ctx, ctx->min_samples_saved);
793 }
794 
cso_set_stencil_ref(struct cso_context * ctx,const struct pipe_stencil_ref sr)795 void cso_set_stencil_ref(struct cso_context *ctx,
796                          const struct pipe_stencil_ref sr)
797 {
798    if (memcmp(&ctx->stencil_ref, &sr, sizeof(ctx->stencil_ref))) {
799       ctx->stencil_ref = sr;
800       ctx->pipe->set_stencil_ref(ctx->pipe, sr);
801    }
802 }
803 
804 static void
cso_save_stencil_ref(struct cso_context * ctx)805 cso_save_stencil_ref(struct cso_context *ctx)
806 {
807    ctx->stencil_ref_saved = ctx->stencil_ref;
808 }
809 
810 
811 static void
cso_restore_stencil_ref(struct cso_context * ctx)812 cso_restore_stencil_ref(struct cso_context *ctx)
813 {
814    if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
815               sizeof(ctx->stencil_ref))) {
816       ctx->stencil_ref = ctx->stencil_ref_saved;
817       ctx->pipe->set_stencil_ref(ctx->pipe, ctx->stencil_ref);
818    }
819 }
820 
cso_set_render_condition(struct cso_context * ctx,struct pipe_query * query,boolean condition,enum pipe_render_cond_flag mode)821 void cso_set_render_condition(struct cso_context *ctx,
822                               struct pipe_query *query,
823                               boolean condition,
824                               enum pipe_render_cond_flag mode)
825 {
826    struct pipe_context *pipe = ctx->pipe;
827 
828    if (ctx->render_condition != query ||
829        ctx->render_condition_mode != mode ||
830        ctx->render_condition_cond != condition) {
831       pipe->render_condition(pipe, query, condition, mode);
832       ctx->render_condition = query;
833       ctx->render_condition_cond = condition;
834       ctx->render_condition_mode = mode;
835    }
836 }
837 
838 static void
cso_save_render_condition(struct cso_context * ctx)839 cso_save_render_condition(struct cso_context *ctx)
840 {
841    ctx->render_condition_saved = ctx->render_condition;
842    ctx->render_condition_cond_saved = ctx->render_condition_cond;
843    ctx->render_condition_mode_saved = ctx->render_condition_mode;
844 }
845 
846 static void
cso_restore_render_condition(struct cso_context * ctx)847 cso_restore_render_condition(struct cso_context *ctx)
848 {
849    cso_set_render_condition(ctx, ctx->render_condition_saved,
850                             ctx->render_condition_cond_saved,
851                             ctx->render_condition_mode_saved);
852 }
853 
cso_set_geometry_shader_handle(struct cso_context * ctx,void * handle)854 void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle)
855 {
856    assert(ctx->has_geometry_shader || !handle);
857 
858    if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
859       ctx->geometry_shader = handle;
860       ctx->pipe->bind_gs_state(ctx->pipe, handle);
861    }
862 }
863 
864 static void
cso_save_geometry_shader(struct cso_context * ctx)865 cso_save_geometry_shader(struct cso_context *ctx)
866 {
867    if (!ctx->has_geometry_shader) {
868       return;
869    }
870 
871    assert(!ctx->geometry_shader_saved);
872    ctx->geometry_shader_saved = ctx->geometry_shader;
873 }
874 
875 static void
cso_restore_geometry_shader(struct cso_context * ctx)876 cso_restore_geometry_shader(struct cso_context *ctx)
877 {
878    if (!ctx->has_geometry_shader) {
879       return;
880    }
881 
882    if (ctx->geometry_shader_saved != ctx->geometry_shader) {
883       ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved);
884       ctx->geometry_shader = ctx->geometry_shader_saved;
885    }
886    ctx->geometry_shader_saved = NULL;
887 }
888 
cso_set_tessctrl_shader_handle(struct cso_context * ctx,void * handle)889 void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle)
890 {
891    assert(ctx->has_tessellation || !handle);
892 
893    if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
894       ctx->tessctrl_shader = handle;
895       ctx->pipe->bind_tcs_state(ctx->pipe, handle);
896    }
897 }
898 
899 static void
cso_save_tessctrl_shader(struct cso_context * ctx)900 cso_save_tessctrl_shader(struct cso_context *ctx)
901 {
902    if (!ctx->has_tessellation) {
903       return;
904    }
905 
906    assert(!ctx->tessctrl_shader_saved);
907    ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
908 }
909 
910 static void
cso_restore_tessctrl_shader(struct cso_context * ctx)911 cso_restore_tessctrl_shader(struct cso_context *ctx)
912 {
913    if (!ctx->has_tessellation) {
914       return;
915    }
916 
917    if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
918       ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved);
919       ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
920    }
921    ctx->tessctrl_shader_saved = NULL;
922 }
923 
cso_set_tesseval_shader_handle(struct cso_context * ctx,void * handle)924 void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle)
925 {
926    assert(ctx->has_tessellation || !handle);
927 
928    if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
929       ctx->tesseval_shader = handle;
930       ctx->pipe->bind_tes_state(ctx->pipe, handle);
931    }
932 }
933 
934 static void
cso_save_tesseval_shader(struct cso_context * ctx)935 cso_save_tesseval_shader(struct cso_context *ctx)
936 {
937    if (!ctx->has_tessellation) {
938       return;
939    }
940 
941    assert(!ctx->tesseval_shader_saved);
942    ctx->tesseval_shader_saved = ctx->tesseval_shader;
943 }
944 
945 static void
cso_restore_tesseval_shader(struct cso_context * ctx)946 cso_restore_tesseval_shader(struct cso_context *ctx)
947 {
948    if (!ctx->has_tessellation) {
949       return;
950    }
951 
952    if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
953       ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved);
954       ctx->tesseval_shader = ctx->tesseval_shader_saved;
955    }
956    ctx->tesseval_shader_saved = NULL;
957 }
958 
cso_set_compute_shader_handle(struct cso_context * ctx,void * handle)959 void cso_set_compute_shader_handle(struct cso_context *ctx, void *handle)
960 {
961    assert(ctx->has_compute_shader || !handle);
962 
963    if (ctx->has_compute_shader && ctx->compute_shader != handle) {
964       ctx->compute_shader = handle;
965       ctx->pipe->bind_compute_state(ctx->pipe, handle);
966    }
967 }
968 
969 static void
cso_save_compute_shader(struct cso_context * ctx)970 cso_save_compute_shader(struct cso_context *ctx)
971 {
972    if (!ctx->has_compute_shader) {
973       return;
974    }
975 
976    assert(!ctx->compute_shader_saved);
977    ctx->compute_shader_saved = ctx->compute_shader;
978 }
979 
980 static void
cso_restore_compute_shader(struct cso_context * ctx)981 cso_restore_compute_shader(struct cso_context *ctx)
982 {
983    if (!ctx->has_compute_shader) {
984       return;
985    }
986 
987    if (ctx->compute_shader_saved != ctx->compute_shader) {
988       ctx->pipe->bind_compute_state(ctx->pipe, ctx->compute_shader_saved);
989       ctx->compute_shader = ctx->compute_shader_saved;
990    }
991    ctx->compute_shader_saved = NULL;
992 }
993 
994 
995 static void
cso_save_compute_samplers(struct cso_context * ctx)996 cso_save_compute_samplers(struct cso_context *ctx)
997 {
998    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
999    struct sampler_info *saved = &ctx->compute_samplers_saved;
1000 
1001    memcpy(saved->cso_samplers, info->cso_samplers,
1002           sizeof(info->cso_samplers));
1003    memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1004 }
1005 
1006 
1007 static void
cso_restore_compute_samplers(struct cso_context * ctx)1008 cso_restore_compute_samplers(struct cso_context *ctx)
1009 {
1010    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
1011    struct sampler_info *saved = &ctx->compute_samplers_saved;
1012 
1013    memcpy(info->cso_samplers, saved->cso_samplers,
1014           sizeof(info->cso_samplers));
1015    memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1016 
1017    for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1018       if (info->samplers[i]) {
1019          ctx->max_sampler_seen = i;
1020          break;
1021       }
1022    }
1023 
1024    cso_single_sampler_done(ctx, PIPE_SHADER_COMPUTE);
1025 }
1026 
1027 
1028 static void
cso_set_vertex_elements_direct(struct cso_context * ctx,const struct cso_velems_state * velems)1029 cso_set_vertex_elements_direct(struct cso_context *ctx,
1030                                const struct cso_velems_state *velems)
1031 {
1032    unsigned key_size, hash_key;
1033    struct cso_hash_iter iter;
1034    void *handle;
1035 
1036    /* Need to include the count into the stored state data too.
1037     * Otherwise first few count pipe_vertex_elements could be identical
1038     * even if count is different, and there's no guarantee the hash would
1039     * be different in that case neither.
1040     */
1041    key_size = sizeof(struct pipe_vertex_element) * velems->count +
1042               sizeof(unsigned);
1043    hash_key = cso_construct_key((void*)velems, key_size);
1044    iter = cso_find_state_template(&ctx->cache, hash_key, CSO_VELEMENTS,
1045                                   (void*)velems, key_size);
1046 
1047    if (cso_hash_iter_is_null(iter)) {
1048       struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1049       if (!cso)
1050          return;
1051 
1052       memcpy(&cso->state, velems, key_size);
1053 
1054       /* Lower 64-bit vertex attributes. */
1055       unsigned new_count = velems->count;
1056       const struct pipe_vertex_element *new_elems = velems->velems;
1057       struct pipe_vertex_element tmp[PIPE_MAX_ATTRIBS];
1058       util_lower_uint64_vertex_elements(&new_elems, &new_count, tmp);
1059 
1060       cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, new_count,
1061                                                           new_elems);
1062 
1063       iter = cso_insert_state(&ctx->cache, hash_key, CSO_VELEMENTS, cso);
1064       if (cso_hash_iter_is_null(iter)) {
1065          FREE(cso);
1066          return;
1067       }
1068 
1069       handle = cso->data;
1070    }
1071    else {
1072       handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1073    }
1074 
1075    if (ctx->velements != handle) {
1076       ctx->velements = handle;
1077       ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle);
1078    }
1079 }
1080 
1081 enum pipe_error
cso_set_vertex_elements(struct cso_context * ctx,const struct cso_velems_state * velems)1082 cso_set_vertex_elements(struct cso_context *ctx,
1083                         const struct cso_velems_state *velems)
1084 {
1085    struct u_vbuf *vbuf = ctx->vbuf_current;
1086 
1087    if (vbuf) {
1088       u_vbuf_set_vertex_elements(vbuf, velems);
1089       return PIPE_OK;
1090    }
1091 
1092    cso_set_vertex_elements_direct(ctx, velems);
1093    return PIPE_OK;
1094 }
1095 
1096 static void
cso_save_vertex_elements(struct cso_context * ctx)1097 cso_save_vertex_elements(struct cso_context *ctx)
1098 {
1099    struct u_vbuf *vbuf = ctx->vbuf_current;
1100 
1101    if (vbuf) {
1102       u_vbuf_save_vertex_elements(vbuf);
1103       return;
1104    }
1105 
1106    assert(!ctx->velements_saved);
1107    ctx->velements_saved = ctx->velements;
1108 }
1109 
1110 static void
cso_restore_vertex_elements(struct cso_context * ctx)1111 cso_restore_vertex_elements(struct cso_context *ctx)
1112 {
1113    struct u_vbuf *vbuf = ctx->vbuf_current;
1114 
1115    if (vbuf) {
1116       u_vbuf_restore_vertex_elements(vbuf);
1117       return;
1118    }
1119 
1120    if (ctx->velements != ctx->velements_saved) {
1121       ctx->velements = ctx->velements_saved;
1122       ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved);
1123    }
1124    ctx->velements_saved = NULL;
1125 }
1126 
1127 /* vertex buffers */
1128 
cso_set_vertex_buffers(struct cso_context * ctx,unsigned start_slot,unsigned count,unsigned unbind_trailing_count,bool take_ownership,const struct pipe_vertex_buffer * buffers)1129 void cso_set_vertex_buffers(struct cso_context *ctx,
1130                             unsigned start_slot, unsigned count,
1131                             unsigned unbind_trailing_count,
1132                             bool take_ownership,
1133                             const struct pipe_vertex_buffer *buffers)
1134 {
1135    struct u_vbuf *vbuf = ctx->vbuf_current;
1136 
1137    if (!count && !unbind_trailing_count)
1138       return;
1139 
1140    if (vbuf) {
1141       u_vbuf_set_vertex_buffers(vbuf, start_slot, count, unbind_trailing_count,
1142                                 take_ownership, buffers);
1143       return;
1144    }
1145 
1146    struct pipe_context *pipe = ctx->pipe;
1147    pipe->set_vertex_buffers(pipe, start_slot, count, unbind_trailing_count,
1148                             take_ownership, buffers);
1149 }
1150 
1151 /**
1152  * Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1153  * for user vertex buffers and user vertex buffers are not set by this call.
1154  * u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1155  *
1156  * Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1157  * such as VBOs, glBegin/End, and display lists.
1158  *
1159  * Internal operations that do "save states, draw, restore states" shouldn't
1160  * use this, because the states are only saved in either cso_context or
1161  * u_vbuf, not both.
1162  */
1163 void
cso_set_vertex_buffers_and_elements(struct cso_context * ctx,const struct cso_velems_state * velems,unsigned vb_count,unsigned unbind_trailing_vb_count,bool take_ownership,bool uses_user_vertex_buffers,const struct pipe_vertex_buffer * vbuffers)1164 cso_set_vertex_buffers_and_elements(struct cso_context *ctx,
1165                                     const struct cso_velems_state *velems,
1166                                     unsigned vb_count,
1167                                     unsigned unbind_trailing_vb_count,
1168                                     bool take_ownership,
1169                                     bool uses_user_vertex_buffers,
1170                                     const struct pipe_vertex_buffer *vbuffers)
1171 {
1172    struct u_vbuf *vbuf = ctx->vbuf;
1173    struct pipe_context *pipe = ctx->pipe;
1174 
1175    if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
1176       if (!ctx->vbuf_current) {
1177          /* Unbind all buffers in cso_context, because we'll use u_vbuf. */
1178          unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1179          if (unbind_vb_count)
1180             pipe->set_vertex_buffers(pipe, 0, 0, unbind_vb_count, false, NULL);
1181 
1182          /* Unset this to make sure the CSO is re-bound on the next use. */
1183          ctx->velements = NULL;
1184          ctx->vbuf_current = vbuf;
1185          unbind_trailing_vb_count = 0;
1186       }
1187 
1188       if (vb_count || unbind_trailing_vb_count) {
1189          u_vbuf_set_vertex_buffers(vbuf, 0, vb_count,
1190                                    unbind_trailing_vb_count,
1191                                    take_ownership, vbuffers);
1192       }
1193       u_vbuf_set_vertex_elements(vbuf, velems);
1194       return;
1195    }
1196 
1197    if (ctx->vbuf_current) {
1198       /* Unbind all buffers in u_vbuf, because we'll use cso_context. */
1199       unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
1200       if (unbind_vb_count)
1201          u_vbuf_set_vertex_buffers(vbuf, 0, 0, unbind_vb_count, false, NULL);
1202 
1203       /* Unset this to make sure the CSO is re-bound on the next use. */
1204       u_vbuf_unset_vertex_elements(vbuf);
1205       ctx->vbuf_current = NULL;
1206       unbind_trailing_vb_count = 0;
1207    }
1208 
1209    if (vb_count || unbind_trailing_vb_count) {
1210       pipe->set_vertex_buffers(pipe, 0, vb_count, unbind_trailing_vb_count,
1211                                take_ownership, vbuffers);
1212    }
1213    cso_set_vertex_elements_direct(ctx, velems);
1214 }
1215 
1216 ALWAYS_INLINE static struct cso_sampler *
set_sampler(struct cso_context * ctx,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ,size_t key_size)1217 set_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1218             unsigned idx, const struct pipe_sampler_state *templ, size_t key_size)
1219 {
1220    unsigned hash_key = cso_construct_key((void*)templ, key_size);
1221    struct cso_sampler *cso;
1222    struct cso_hash_iter iter =
1223       cso_find_state_template(&ctx->cache,
1224                               hash_key, CSO_SAMPLER,
1225                               (void *) templ, key_size);
1226 
1227    if (cso_hash_iter_is_null(iter)) {
1228       cso = MALLOC(sizeof(struct cso_sampler));
1229       if (!cso)
1230          return false;
1231 
1232       memcpy(&cso->state, templ, sizeof(*templ));
1233       cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
1234       cso->hash_key = hash_key;
1235 
1236       iter = cso_insert_state(&ctx->cache, hash_key, CSO_SAMPLER, cso);
1237       if (cso_hash_iter_is_null(iter)) {
1238          FREE(cso);
1239          return false;
1240       }
1241    } else {
1242       cso = cso_hash_iter_data(iter);
1243    }
1244    return cso;
1245 }
1246 
1247 ALWAYS_INLINE  static bool
cso_set_sampler(struct cso_context * ctx,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ,size_t size)1248 cso_set_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1249                 unsigned idx, const struct pipe_sampler_state *templ, size_t size)
1250 {
1251    struct cso_sampler *cso = set_sampler(ctx, shader_stage, idx, templ, size);
1252    ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1253    ctx->samplers[shader_stage].samplers[idx] = cso->data;
1254    return true;
1255 }
1256 
1257 void
cso_single_sampler(struct cso_context * ctx,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ)1258 cso_single_sampler(struct cso_context *ctx, enum pipe_shader_type shader_stage,
1259                    unsigned idx, const struct pipe_sampler_state *templ)
1260 {
1261    size_t size = ctx->sampler_format ? sizeof(struct pipe_sampler_state) :
1262                                        offsetof(struct pipe_sampler_state, border_color_format);
1263    if (cso_set_sampler(ctx, shader_stage, idx, templ, size))
1264       ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1265 }
1266 
1267 /**
1268  * Send staged sampler state to the driver.
1269  */
1270 void
cso_single_sampler_done(struct cso_context * ctx,enum pipe_shader_type shader_stage)1271 cso_single_sampler_done(struct cso_context *ctx,
1272                         enum pipe_shader_type shader_stage)
1273 {
1274    struct sampler_info *info = &ctx->samplers[shader_stage];
1275 
1276    if (ctx->max_sampler_seen == -1)
1277       return;
1278 
1279    ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0,
1280                                   ctx->max_sampler_seen + 1,
1281                                   info->samplers);
1282    ctx->max_sampler_seen = -1;
1283 }
1284 
1285 ALWAYS_INLINE static int
set_samplers(struct cso_context * ctx,enum pipe_shader_type shader_stage,unsigned nr,const struct pipe_sampler_state ** templates,size_t key_size)1286 set_samplers(struct cso_context *ctx,
1287              enum pipe_shader_type shader_stage,
1288              unsigned nr,
1289              const struct pipe_sampler_state **templates,
1290              size_t key_size)
1291 {
1292    int last = -1;
1293    for (unsigned i = 0; i < nr; i++) {
1294       if (!templates[i])
1295          continue;
1296 
1297       /* Reuse the same sampler state CSO if 2 consecutive sampler states
1298        * are identical.
1299        *
1300        * The trivial case where both pointers are equal doesn't occur in
1301        * frequented codepaths.
1302        *
1303        * Reuse rate:
1304        * - Borderlands 2: 55%
1305        * - Hitman: 65%
1306        * - Rocket League: 75%
1307        * - Tomb Raider: 50-65%
1308        * - XCOM 2: 55%
1309        */
1310       if (last >= 0 &&
1311           !memcmp(templates[i], templates[last],
1312                   key_size)) {
1313          ctx->samplers[shader_stage].cso_samplers[i] =
1314             ctx->samplers[shader_stage].cso_samplers[last];
1315          ctx->samplers[shader_stage].samplers[i] =
1316             ctx->samplers[shader_stage].samplers[last];
1317       } else {
1318          /* Look up the sampler state CSO. */
1319          cso_set_sampler(ctx, shader_stage, i, templates[i], key_size);
1320       }
1321 
1322       last = i;
1323    }
1324    return last;
1325 }
1326 
1327 /*
1328  * If the function encouters any errors it will return the
1329  * last one. Done to always try to set as many samplers
1330  * as possible.
1331  */
1332 void
cso_set_samplers(struct cso_context * ctx,enum pipe_shader_type shader_stage,unsigned nr,const struct pipe_sampler_state ** templates)1333 cso_set_samplers(struct cso_context *ctx,
1334                  enum pipe_shader_type shader_stage,
1335                  unsigned nr,
1336                  const struct pipe_sampler_state **templates)
1337 {
1338    int last = -1;
1339 
1340    /* ensure sampler size is a constant for memcmp */
1341    size_t size = ctx->sampler_format ? sizeof(struct pipe_sampler_state) :
1342                                        offsetof(struct pipe_sampler_state, border_color_format);
1343    last = set_samplers(ctx, shader_stage, nr, templates, size);
1344 
1345    ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, last);
1346    cso_single_sampler_done(ctx, shader_stage);
1347 }
1348 
1349 static void
cso_save_fragment_samplers(struct cso_context * ctx)1350 cso_save_fragment_samplers(struct cso_context *ctx)
1351 {
1352    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1353    struct sampler_info *saved = &ctx->fragment_samplers_saved;
1354 
1355    memcpy(saved->cso_samplers, info->cso_samplers,
1356           sizeof(info->cso_samplers));
1357    memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1358 }
1359 
1360 
1361 static void
cso_restore_fragment_samplers(struct cso_context * ctx)1362 cso_restore_fragment_samplers(struct cso_context *ctx)
1363 {
1364    struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1365    struct sampler_info *saved = &ctx->fragment_samplers_saved;
1366 
1367    memcpy(info->cso_samplers, saved->cso_samplers,
1368           sizeof(info->cso_samplers));
1369    memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1370 
1371    for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1372       if (info->samplers[i]) {
1373          ctx->max_sampler_seen = i;
1374          break;
1375       }
1376    }
1377 
1378    cso_single_sampler_done(ctx, PIPE_SHADER_FRAGMENT);
1379 }
1380 
1381 
1382 void
cso_set_stream_outputs(struct cso_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)1383 cso_set_stream_outputs(struct cso_context *ctx,
1384                        unsigned num_targets,
1385                        struct pipe_stream_output_target **targets,
1386                        const unsigned *offsets)
1387 {
1388    struct pipe_context *pipe = ctx->pipe;
1389    uint i;
1390 
1391    if (!ctx->has_streamout) {
1392       assert(num_targets == 0);
1393       return;
1394    }
1395 
1396    if (ctx->nr_so_targets == 0 && num_targets == 0) {
1397       /* Nothing to do. */
1398       return;
1399    }
1400 
1401    /* reference new targets */
1402    for (i = 0; i < num_targets; i++) {
1403       pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1404    }
1405    /* unref extra old targets, if any */
1406    for (; i < ctx->nr_so_targets; i++) {
1407       pipe_so_target_reference(&ctx->so_targets[i], NULL);
1408    }
1409 
1410    pipe->set_stream_output_targets(pipe, num_targets, targets,
1411                                    offsets);
1412    ctx->nr_so_targets = num_targets;
1413 }
1414 
1415 static void
cso_save_stream_outputs(struct cso_context * ctx)1416 cso_save_stream_outputs(struct cso_context *ctx)
1417 {
1418    uint i;
1419 
1420    if (!ctx->has_streamout) {
1421       return;
1422    }
1423 
1424    ctx->nr_so_targets_saved = ctx->nr_so_targets;
1425 
1426    for (i = 0; i < ctx->nr_so_targets; i++) {
1427       assert(!ctx->so_targets_saved[i]);
1428       pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1429    }
1430 }
1431 
1432 static void
cso_restore_stream_outputs(struct cso_context * ctx)1433 cso_restore_stream_outputs(struct cso_context *ctx)
1434 {
1435    struct pipe_context *pipe = ctx->pipe;
1436    uint i;
1437    unsigned offset[PIPE_MAX_SO_BUFFERS];
1438 
1439    if (!ctx->has_streamout) {
1440       return;
1441    }
1442 
1443    if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1444       /* Nothing to do. */
1445       return;
1446    }
1447 
1448    assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1449    for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1450       pipe_so_target_reference(&ctx->so_targets[i], NULL);
1451       /* move the reference from one pointer to another */
1452       ctx->so_targets[i] = ctx->so_targets_saved[i];
1453       ctx->so_targets_saved[i] = NULL;
1454       /* -1 means append */
1455       offset[i] = (unsigned)-1;
1456    }
1457    for (; i < ctx->nr_so_targets; i++) {
1458       pipe_so_target_reference(&ctx->so_targets[i], NULL);
1459    }
1460 
1461    pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1462                                    ctx->so_targets, offset);
1463 
1464    ctx->nr_so_targets = ctx->nr_so_targets_saved;
1465    ctx->nr_so_targets_saved = 0;
1466 }
1467 
1468 
1469 /**
1470  * Save all the CSO state items specified by the state_mask bitmask
1471  * of CSO_BIT_x flags.
1472  */
1473 void
cso_save_state(struct cso_context * cso,unsigned state_mask)1474 cso_save_state(struct cso_context *cso, unsigned state_mask)
1475 {
1476    assert(cso->saved_state == 0);
1477 
1478    cso->saved_state = state_mask;
1479 
1480    if (state_mask & CSO_BIT_BLEND)
1481       cso_save_blend(cso);
1482    if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1483       cso_save_depth_stencil_alpha(cso);
1484    if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1485       cso_save_fragment_samplers(cso);
1486    if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1487       cso_save_fragment_shader(cso);
1488    if (state_mask & CSO_BIT_FRAMEBUFFER)
1489       cso_save_framebuffer(cso);
1490    if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1491       cso_save_geometry_shader(cso);
1492    if (state_mask & CSO_BIT_MIN_SAMPLES)
1493       cso_save_min_samples(cso);
1494    if (state_mask & CSO_BIT_RASTERIZER)
1495       cso_save_rasterizer(cso);
1496    if (state_mask & CSO_BIT_RENDER_CONDITION)
1497       cso_save_render_condition(cso);
1498    if (state_mask & CSO_BIT_SAMPLE_MASK)
1499       cso_save_sample_mask(cso);
1500    if (state_mask & CSO_BIT_STENCIL_REF)
1501       cso_save_stencil_ref(cso);
1502    if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1503       cso_save_stream_outputs(cso);
1504    if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1505       cso_save_tessctrl_shader(cso);
1506    if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1507       cso_save_tesseval_shader(cso);
1508    if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1509       cso_save_vertex_elements(cso);
1510    if (state_mask & CSO_BIT_VERTEX_SHADER)
1511       cso_save_vertex_shader(cso);
1512    if (state_mask & CSO_BIT_VIEWPORT)
1513       cso_save_viewport(cso);
1514    if (state_mask & CSO_BIT_PAUSE_QUERIES)
1515       cso->pipe->set_active_query_state(cso->pipe, false);
1516 }
1517 
1518 
1519 /**
1520  * Restore the state which was saved by cso_save_state().
1521  */
1522 void
cso_restore_state(struct cso_context * cso,unsigned unbind)1523 cso_restore_state(struct cso_context *cso, unsigned unbind)
1524 {
1525    unsigned state_mask = cso->saved_state;
1526 
1527    assert(state_mask);
1528 
1529    if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1530       cso_restore_depth_stencil_alpha(cso);
1531    if (state_mask & CSO_BIT_STENCIL_REF)
1532       cso_restore_stencil_ref(cso);
1533    if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1534       cso_restore_fragment_shader(cso);
1535    if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1536       cso_restore_geometry_shader(cso);
1537    if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1538       cso_restore_tesseval_shader(cso);
1539    if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1540       cso_restore_tessctrl_shader(cso);
1541    if (state_mask & CSO_BIT_VERTEX_SHADER)
1542       cso_restore_vertex_shader(cso);
1543    if (unbind & CSO_UNBIND_FS_SAMPLERVIEWS)
1544       cso->pipe->set_sampler_views(cso->pipe, PIPE_SHADER_FRAGMENT, 0, 0,
1545                                    cso->max_fs_samplerviews, false, NULL);
1546    if (unbind & CSO_UNBIND_FS_SAMPLERVIEW0)
1547       cso->pipe->set_sampler_views(cso->pipe, PIPE_SHADER_FRAGMENT, 0, 0,
1548                                    1, false, NULL);
1549    if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1550       cso_restore_fragment_samplers(cso);
1551    if (unbind & CSO_UNBIND_FS_IMAGE0)
1552       cso->pipe->set_shader_images(cso->pipe, PIPE_SHADER_FRAGMENT, 0, 0, 1, NULL);
1553    if (state_mask & CSO_BIT_FRAMEBUFFER)
1554       cso_restore_framebuffer(cso);
1555    if (state_mask & CSO_BIT_BLEND)
1556       cso_restore_blend(cso);
1557    if (state_mask & CSO_BIT_RASTERIZER)
1558       cso_restore_rasterizer(cso);
1559    if (state_mask & CSO_BIT_MIN_SAMPLES)
1560       cso_restore_min_samples(cso);
1561    if (state_mask & CSO_BIT_RENDER_CONDITION)
1562       cso_restore_render_condition(cso);
1563    if (state_mask & CSO_BIT_SAMPLE_MASK)
1564       cso_restore_sample_mask(cso);
1565    if (state_mask & CSO_BIT_VIEWPORT)
1566       cso_restore_viewport(cso);
1567    if (unbind & CSO_UNBIND_VS_CONSTANTS)
1568       cso->pipe->set_constant_buffer(cso->pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
1569    if (unbind & CSO_UNBIND_FS_CONSTANTS)
1570       cso->pipe->set_constant_buffer(cso->pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
1571    if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1572       cso_restore_vertex_elements(cso);
1573    if (unbind & CSO_UNBIND_VERTEX_BUFFER0)
1574       cso->pipe->set_vertex_buffers(cso->pipe, 0, 0, 1, false, NULL);
1575    if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1576       cso_restore_stream_outputs(cso);
1577    if (state_mask & CSO_BIT_PAUSE_QUERIES)
1578       cso->pipe->set_active_query_state(cso->pipe, true);
1579 
1580    cso->saved_state = 0;
1581 }
1582 
1583 /**
1584  * Save all the CSO state items specified by the state_mask bitmask
1585  * of CSO_BIT_COMPUTE_x flags.
1586  */
1587 void
cso_save_compute_state(struct cso_context * cso,unsigned state_mask)1588 cso_save_compute_state(struct cso_context *cso, unsigned state_mask)
1589 {
1590    assert(cso->saved_compute_state == 0);
1591 
1592    cso->saved_compute_state = state_mask;
1593 
1594    if (state_mask & CSO_BIT_COMPUTE_SHADER)
1595       cso_save_compute_shader(cso);
1596 
1597    if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1598       cso_save_compute_samplers(cso);
1599 }
1600 
1601 
1602 /**
1603  * Restore the state which was saved by cso_save_compute_state().
1604  */
1605 void
cso_restore_compute_state(struct cso_context * cso)1606 cso_restore_compute_state(struct cso_context *cso)
1607 {
1608    unsigned state_mask = cso->saved_compute_state;
1609 
1610    assert(state_mask);
1611 
1612    if (state_mask & CSO_BIT_COMPUTE_SHADER)
1613       cso_restore_compute_shader(cso);
1614 
1615    if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1616       cso_restore_compute_samplers(cso);
1617 
1618    cso->saved_compute_state = 0;
1619 }
1620 
1621 
1622 
1623 /* drawing */
1624 
1625 void
cso_draw_vbo(struct cso_context * cso,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias draw)1626 cso_draw_vbo(struct cso_context *cso,
1627              const struct pipe_draw_info *info,
1628              unsigned drawid_offset,
1629              const struct pipe_draw_indirect_info *indirect,
1630              const struct pipe_draw_start_count_bias draw)
1631 {
1632    struct u_vbuf *vbuf = cso->vbuf_current;
1633 
1634    /* We can't have both indirect drawing and SO-vertex-count drawing */
1635    assert(!indirect ||
1636           indirect->buffer == NULL ||
1637           indirect->count_from_stream_output == NULL);
1638 
1639    /* We can't have SO-vertex-count drawing with an index buffer */
1640    assert(info->index_size == 0 ||
1641           !indirect ||
1642           indirect->count_from_stream_output == NULL);
1643 
1644    if (vbuf) {
1645       u_vbuf_draw_vbo(vbuf, info, drawid_offset, indirect, &draw, 1);
1646    } else {
1647       struct pipe_context *pipe = cso->pipe;
1648       pipe->draw_vbo(pipe, info, drawid_offset, indirect, &draw, 1);
1649    }
1650 }
1651 
1652 /* info->draw_id can be changed by the callee if increment_draw_id is true. */
1653 void
cso_multi_draw(struct cso_context * cso,struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)1654 cso_multi_draw(struct cso_context *cso,
1655                struct pipe_draw_info *info,
1656                unsigned drawid_offset,
1657                const struct pipe_draw_start_count_bias *draws,
1658                unsigned num_draws)
1659 {
1660    struct u_vbuf *vbuf = cso->vbuf_current;
1661 
1662    if (vbuf) {
1663       u_vbuf_draw_vbo(vbuf, info, drawid_offset, NULL, draws, num_draws);
1664    } else {
1665       struct pipe_context *pipe = cso->pipe;
1666 
1667       pipe->draw_vbo(pipe, info, drawid_offset, NULL, draws, num_draws);
1668    }
1669 }
1670 
1671 void
cso_draw_arrays(struct cso_context * cso,uint mode,uint start,uint count)1672 cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count)
1673 {
1674    struct pipe_draw_info info;
1675    struct pipe_draw_start_count_bias draw;
1676 
1677    util_draw_init_info(&info);
1678 
1679    info.mode = mode;
1680    info.index_bounds_valid = true;
1681    info.min_index = start;
1682    info.max_index = start + count - 1;
1683 
1684    draw.start = start;
1685    draw.count = count;
1686    draw.index_bias = 0;
1687 
1688    cso_draw_vbo(cso, &info, 0, NULL, draw);
1689 }
1690 
1691 void
cso_draw_arrays_instanced(struct cso_context * cso,uint mode,uint start,uint count,uint start_instance,uint instance_count)1692 cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
1693                           uint start, uint count,
1694                           uint start_instance, uint instance_count)
1695 {
1696    struct pipe_draw_info info;
1697    struct pipe_draw_start_count_bias draw;
1698 
1699    util_draw_init_info(&info);
1700 
1701    info.mode = mode;
1702    info.index_bounds_valid = true;
1703    info.min_index = start;
1704    info.max_index = start + count - 1;
1705    info.start_instance = start_instance;
1706    info.instance_count = instance_count;
1707 
1708    draw.start = start;
1709    draw.count = count;
1710    draw.index_bias = 0;
1711 
1712    cso_draw_vbo(cso, &info, 0, NULL, draw);
1713 }
1714