1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 *
31 * Wrap the cso cache & hash mechanisms in a simplified
32 * pipe-driver-specific interface.
33 *
34 * @author Zack Rusin <zackr@vmware.com>
35 * @author Keith Whitwell <keithw@vmware.com>
36 */
37
38 #include "pipe/p_state.h"
39 #include "util/u_draw.h"
40 #include "util/u_framebuffer.h"
41 #include "util/u_helpers.h"
42 #include "util/u_inlines.h"
43 #include "util/u_math.h"
44 #include "util/u_memory.h"
45 #include "util/u_vbuf.h"
46
47 #include "cso_cache/cso_context.h"
48 #include "cso_cache/cso_cache.h"
49 #include "cso_cache/cso_hash.h"
50 #include "cso_context.h"
51 #include "driver_trace/tr_dump.h"
52 #include "util/u_threaded_context.h"
53
54 /**
55 * Per-shader sampler information.
56 */
57 struct sampler_info
58 {
59 struct cso_sampler *cso_samplers[PIPE_MAX_SAMPLERS];
60 void *samplers[PIPE_MAX_SAMPLERS];
61 };
62
63
64
65 struct cso_context_priv {
66 struct cso_context base;
67
68 struct u_vbuf *vbuf;
69 struct u_vbuf *vbuf_current;
70 bool always_use_vbuf;
71 bool sampler_format;
72
73 bool has_geometry_shader;
74 bool has_tessellation;
75 bool has_compute_shader;
76 bool has_task_mesh_shader;
77 bool has_streamout;
78
79 uint32_t max_fs_samplerviews : 16;
80
81 unsigned saved_state; /**< bitmask of CSO_BIT_x flags */
82 unsigned saved_compute_state; /**< bitmask of CSO_BIT_COMPUTE_x flags */
83
84 struct sampler_info fragment_samplers_saved;
85 struct sampler_info compute_samplers_saved;
86 struct sampler_info samplers[PIPE_SHADER_MESH_TYPES];
87
88 /* Temporary number until cso_single_sampler_done is called.
89 * It tracks the highest sampler seen in cso_single_sampler.
90 */
91 int max_sampler_seen;
92
93 unsigned nr_so_targets;
94 struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
95
96 unsigned nr_so_targets_saved;
97 struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS];
98
99 /** Current and saved state.
100 * The saved state is used as a 1-deep stack.
101 */
102 void *blend, *blend_saved;
103 void *depth_stencil, *depth_stencil_saved;
104 void *rasterizer, *rasterizer_saved;
105 void *fragment_shader, *fragment_shader_saved;
106 void *vertex_shader, *vertex_shader_saved;
107 void *geometry_shader, *geometry_shader_saved;
108 void *tessctrl_shader, *tessctrl_shader_saved;
109 void *tesseval_shader, *tesseval_shader_saved;
110 void *compute_shader, *compute_shader_saved;
111 void *velements, *velements_saved;
112 struct pipe_query *render_condition, *render_condition_saved;
113 enum pipe_render_cond_flag render_condition_mode, render_condition_mode_saved;
114 bool render_condition_cond, render_condition_cond_saved;
115 bool flatshade_first, flatshade_first_saved;
116
117 struct pipe_framebuffer_state fb, fb_saved;
118 struct pipe_viewport_state vp, vp_saved;
119 unsigned sample_mask, sample_mask_saved;
120 unsigned min_samples, min_samples_saved;
121 struct pipe_stencil_ref stencil_ref, stencil_ref_saved;
122
123 /* This should be last to keep all of the above together in memory. */
124 struct cso_cache cache;
125 };
126
127
128 static inline bool
delete_cso(struct cso_context_priv * ctx,void * state,enum cso_cache_type type)129 delete_cso(struct cso_context_priv *ctx,
130 void *state, enum cso_cache_type type)
131 {
132 switch (type) {
133 case CSO_BLEND:
134 if (ctx->blend == ((struct cso_blend*)state)->data ||
135 ctx->blend_saved == ((struct cso_blend*)state)->data)
136 return false;
137 break;
138 case CSO_DEPTH_STENCIL_ALPHA:
139 if (ctx->depth_stencil == ((struct cso_depth_stencil_alpha*)state)->data ||
140 ctx->depth_stencil_saved == ((struct cso_depth_stencil_alpha*)state)->data)
141 return false;
142 break;
143 case CSO_RASTERIZER:
144 if (ctx->rasterizer == ((struct cso_rasterizer*)state)->data ||
145 ctx->rasterizer_saved == ((struct cso_rasterizer*)state)->data)
146 return false;
147 break;
148 case CSO_VELEMENTS:
149 if (ctx->velements == ((struct cso_velements*)state)->data ||
150 ctx->velements_saved == ((struct cso_velements*)state)->data)
151 return false;
152 break;
153 case CSO_SAMPLER:
154 /* nothing to do for samplers */
155 break;
156 default:
157 assert(0);
158 }
159
160 cso_delete_state(ctx->base.pipe, state, type);
161 return true;
162 }
163
164
165 static inline void
sanitize_hash(struct cso_hash * hash,enum cso_cache_type type,int max_size,void * user_data)166 sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
167 int max_size, void *user_data)
168 {
169 struct cso_context_priv *ctx = (struct cso_context_priv *)user_data;
170 /* if we're approach the maximum size, remove fourth of the entries
171 * otherwise every subsequent call will go through the same */
172 const int hash_size = cso_hash_size(hash);
173 const int max_entries = (max_size > hash_size) ? max_size : hash_size;
174 int to_remove = (max_size < max_entries) * max_entries/4;
175 struct cso_sampler **samplers_to_restore = NULL;
176 unsigned to_restore = 0;
177
178 if (hash_size > max_size)
179 to_remove += hash_size - max_size;
180
181 if (to_remove == 0)
182 return;
183
184 if (type == CSO_SAMPLER) {
185 samplers_to_restore = MALLOC((PIPE_SHADER_MESH_TYPES + 2) * PIPE_MAX_SAMPLERS *
186 sizeof(*samplers_to_restore));
187
188 /* Temporarily remove currently bound sampler states from the hash
189 * table, to prevent them from being deleted
190 */
191 for (int i = 0; i < PIPE_SHADER_MESH_TYPES; i++) {
192 for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
193 struct cso_sampler *sampler = ctx->samplers[i].cso_samplers[j];
194
195 if (sampler && cso_hash_take(hash, sampler->hash_key))
196 samplers_to_restore[to_restore++] = sampler;
197 }
198 }
199 for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
200 struct cso_sampler *sampler = ctx->fragment_samplers_saved.cso_samplers[j];
201
202 if (sampler && cso_hash_take(hash, sampler->hash_key))
203 samplers_to_restore[to_restore++] = sampler;
204 }
205 for (int j = 0; j < PIPE_MAX_SAMPLERS; j++) {
206 struct cso_sampler *sampler = ctx->compute_samplers_saved.cso_samplers[j];
207
208 if (sampler && cso_hash_take(hash, sampler->hash_key))
209 samplers_to_restore[to_restore++] = sampler;
210 }
211 }
212
213 struct cso_hash_iter iter = cso_hash_first_node(hash);
214 while (to_remove) {
215 /*remove elements until we're good */
216 /*fixme: currently we pick the nodes to remove at random*/
217 void *cso = cso_hash_iter_data(iter);
218
219 if (!cso)
220 break;
221
222 if (delete_cso(ctx, cso, type)) {
223 iter = cso_hash_erase(hash, iter);
224 --to_remove;
225 } else {
226 iter = cso_hash_iter_next(iter);
227 }
228 }
229
230 if (type == CSO_SAMPLER) {
231 /* Put currently bound sampler states back into the hash table */
232 while (to_restore--) {
233 struct cso_sampler *sampler = samplers_to_restore[to_restore];
234
235 cso_hash_insert(hash, sampler->hash_key, sampler);
236 }
237
238 FREE(samplers_to_restore);
239 }
240 }
241
242
243 static void
cso_init_vbuf(struct cso_context_priv * cso,unsigned flags)244 cso_init_vbuf(struct cso_context_priv *cso, unsigned flags)
245 {
246 struct u_vbuf_caps caps;
247 bool uses_user_vertex_buffers = !(flags & CSO_NO_USER_VERTEX_BUFFERS);
248 bool needs64b = !(flags & CSO_NO_64B_VERTEX_BUFFERS);
249
250 u_vbuf_get_caps(cso->base.pipe->screen, &caps, needs64b);
251
252 /* Enable u_vbuf if needed. */
253 if (caps.fallback_always ||
254 (uses_user_vertex_buffers &&
255 caps.fallback_only_for_user_vbuffers)) {
256 assert(!cso->base.pipe->vbuf);
257 cso->vbuf = u_vbuf_create(cso->base.pipe, &caps);
258 cso->base.pipe->vbuf = cso->vbuf;
259 cso->always_use_vbuf = caps.fallback_always;
260 cso->vbuf_current = cso->base.pipe->vbuf =
261 caps.fallback_always ? cso->vbuf : NULL;
262 }
263 }
264
265 static void
cso_draw_vbo_default(struct pipe_context * pipe,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)266 cso_draw_vbo_default(struct pipe_context *pipe,
267 const struct pipe_draw_info *info,
268 unsigned drawid_offset,
269 const struct pipe_draw_indirect_info *indirect,
270 const struct pipe_draw_start_count_bias *draws,
271 unsigned num_draws)
272 {
273 if (pipe->vbuf)
274 u_vbuf_draw_vbo(pipe, info, drawid_offset, indirect, draws, num_draws);
275 else
276 pipe->draw_vbo(pipe, info, drawid_offset, indirect, draws, num_draws);
277 }
278
279 struct cso_context *
cso_create_context(struct pipe_context * pipe,unsigned flags)280 cso_create_context(struct pipe_context *pipe, unsigned flags)
281 {
282 struct cso_context_priv *ctx = CALLOC_STRUCT(cso_context_priv);
283 if (!ctx)
284 return NULL;
285
286 cso_cache_init(&ctx->cache, pipe);
287 cso_cache_set_sanitize_callback(&ctx->cache, sanitize_hash, ctx);
288
289 ctx->base.pipe = pipe;
290 ctx->sample_mask = ~0;
291
292 if (!(flags & CSO_NO_VBUF))
293 cso_init_vbuf(ctx, flags);
294
295 /* Only drivers using u_threaded_context benefit from the direct call.
296 * This is because drivers can change draw_vbo, but u_threaded_context
297 * never changes it.
298 */
299 if (pipe->draw_vbo == tc_draw_vbo) {
300 if (ctx->vbuf_current)
301 ctx->base.draw_vbo = u_vbuf_draw_vbo;
302 else
303 ctx->base.draw_vbo = pipe->draw_vbo;
304 } else if (ctx->always_use_vbuf) {
305 ctx->base.draw_vbo = u_vbuf_draw_vbo;
306 } else {
307 ctx->base.draw_vbo = cso_draw_vbo_default;
308 }
309
310 /* Enable for testing: */
311 if (0) cso_set_maximum_cache_size(&ctx->cache, 4);
312
313 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY,
314 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
315 ctx->has_geometry_shader = true;
316 }
317 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL,
318 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
319 ctx->has_tessellation = true;
320 }
321 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
322 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
323 int supported_irs =
324 pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_COMPUTE,
325 PIPE_SHADER_CAP_SUPPORTED_IRS);
326 if (supported_irs & ((1 << PIPE_SHADER_IR_TGSI) |
327 (1 << PIPE_SHADER_IR_NIR))) {
328 ctx->has_compute_shader = true;
329 }
330 }
331 if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_MESH,
332 PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
333 ctx->has_task_mesh_shader = true;
334 }
335 if (pipe->screen->get_param(pipe->screen,
336 PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) {
337 ctx->has_streamout = true;
338 }
339
340 if (pipe->screen->get_param(pipe->screen,
341 PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK) &
342 PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_FREEDRENO)
343 ctx->sampler_format = true;
344
345 ctx->max_fs_samplerviews =
346 pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_FRAGMENT,
347 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
348
349 ctx->max_sampler_seen = -1;
350 return &ctx->base;
351 }
352
353
354 void
cso_unbind_context(struct cso_context * cso)355 cso_unbind_context(struct cso_context *cso)
356 {
357 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
358 unsigned i;
359
360 bool dumping = trace_dumping_enabled_locked();
361 if (dumping)
362 trace_dumping_stop_locked();
363 if (ctx->base.pipe) {
364 ctx->base.pipe->bind_blend_state(ctx->base.pipe, NULL);
365 ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, NULL);
366
367 {
368 static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
369 static struct pipe_shader_buffer ssbos[PIPE_MAX_SHADER_BUFFERS] = { 0 };
370 static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
371 struct pipe_screen *scr = ctx->base.pipe->screen;
372 enum pipe_shader_type sh;
373 for (sh = 0; sh < PIPE_SHADER_MESH_TYPES; sh++) {
374 switch (sh) {
375 case PIPE_SHADER_GEOMETRY:
376 if (!ctx->has_geometry_shader)
377 continue;
378 break;
379 case PIPE_SHADER_TESS_CTRL:
380 case PIPE_SHADER_TESS_EVAL:
381 if (!ctx->has_tessellation)
382 continue;
383 break;
384 case PIPE_SHADER_COMPUTE:
385 if (!ctx->has_compute_shader)
386 continue;
387 break;
388 case PIPE_SHADER_MESH:
389 case PIPE_SHADER_TASK:
390 if (!ctx->has_task_mesh_shader)
391 continue;
392 break;
393 default:
394 break;
395 }
396
397 int maxsam = scr->get_shader_param(scr, sh,
398 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
399 int maxview = scr->get_shader_param(scr, sh,
400 PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS);
401 int maxssbo = scr->get_shader_param(scr, sh,
402 PIPE_SHADER_CAP_MAX_SHADER_BUFFERS);
403 int maxcb = scr->get_shader_param(scr, sh,
404 PIPE_SHADER_CAP_MAX_CONST_BUFFERS);
405 int maximg = scr->get_shader_param(scr, sh,
406 PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
407 assert(maxsam <= PIPE_MAX_SAMPLERS);
408 assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
409 assert(maxssbo <= PIPE_MAX_SHADER_BUFFERS);
410 assert(maxcb <= PIPE_MAX_CONSTANT_BUFFERS);
411 assert(maximg <= PIPE_MAX_SHADER_IMAGES);
412 if (maxsam > 0) {
413 ctx->base.pipe->bind_sampler_states(ctx->base.pipe, sh, 0, maxsam, zeros);
414 }
415 if (maxview > 0) {
416 ctx->base.pipe->set_sampler_views(ctx->base.pipe, sh, 0, maxview, 0, false, views);
417 }
418 if (maxssbo > 0) {
419 ctx->base.pipe->set_shader_buffers(ctx->base.pipe, sh, 0, maxssbo, ssbos, 0);
420 }
421 if (maximg > 0) {
422 ctx->base.pipe->set_shader_images(ctx->base.pipe, sh, 0, 0, maximg, NULL);
423 }
424 for (int i = 0; i < maxcb; i++) {
425 ctx->base.pipe->set_constant_buffer(ctx->base.pipe, sh, i, false, NULL);
426 }
427 if (maxssbo > 0) {
428 ctx->pipe->set_shader_buffers(ctx->pipe, sh, 0, maxssbo, ssbos, 0);
429 }
430 if (maximg > 0) {
431 ctx->pipe->set_shader_images(ctx->pipe, sh, 0, maximg, NULL);
432 }
433 for (int i = 0; i < maxcb; i++) {
434 ctx->pipe->set_constant_buffer(ctx->pipe, sh, i, NULL);
435 }
436 }
437 }
438
439 ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe, NULL);
440 struct pipe_stencil_ref sr = {0};
441 ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr);
442 ctx->base.pipe->bind_fs_state(ctx->base.pipe, NULL);
443 ctx->base.pipe->set_constant_buffer(ctx->base.pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
444 ctx->base.pipe->bind_vs_state(ctx->base.pipe, NULL);
445 ctx->base.pipe->set_constant_buffer(ctx->base.pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
446 if (ctx->has_geometry_shader) {
447 ctx->base.pipe->bind_gs_state(ctx->base.pipe, NULL);
448 }
449 if (ctx->has_tessellation) {
450 ctx->base.pipe->bind_tcs_state(ctx->base.pipe, NULL);
451 ctx->base.pipe->bind_tes_state(ctx->base.pipe, NULL);
452 }
453 if (ctx->has_compute_shader) {
454 ctx->base.pipe->bind_compute_state(ctx->base.pipe, NULL);
455 }
456 if (ctx->has_task_mesh_shader) {
457 ctx->base.pipe->bind_ts_state(ctx->base.pipe, NULL);
458 ctx->base.pipe->bind_ms_state(ctx->base.pipe, NULL);
459 }
460 ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, NULL);
461
462 if (ctx->has_streamout)
463 ctx->base.pipe->set_stream_output_targets(ctx->base.pipe, 0, NULL, NULL);
464
465 struct pipe_framebuffer_state fb = {0};
466 ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, &fb);
467 }
468
469 util_unreference_framebuffer_state(&ctx->fb);
470 util_unreference_framebuffer_state(&ctx->fb_saved);
471
472 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
473 pipe_so_target_reference(&ctx->so_targets[i], NULL);
474 pipe_so_target_reference(&ctx->so_targets_saved[i], NULL);
475 }
476
477 memset(&ctx->samplers, 0, sizeof(ctx->samplers));
478 memset(&ctx->nr_so_targets, 0,
479 offsetof(struct cso_context_priv, cache)
480 - offsetof(struct cso_context_priv, nr_so_targets));
481 ctx->sample_mask = ~0;
482 /*
483 * If the cso context is reused (with the same pipe context),
484 * need to really make sure the context state doesn't get out of sync.
485 */
486 ctx->base.pipe->set_sample_mask(ctx->base.pipe, ctx->sample_mask);
487 if (ctx->base.pipe->set_min_samples)
488 ctx->base.pipe->set_min_samples(ctx->base.pipe, ctx->min_samples);
489 if (dumping)
490 trace_dumping_start_locked();
491 }
492
493
494 /**
495 * Free the CSO context.
496 */
497 void
cso_destroy_context(struct cso_context * cso)498 cso_destroy_context(struct cso_context *cso)
499 {
500 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
501
502 cso_unbind_context(cso);
503 cso_cache_delete(&ctx->cache);
504
505 if (ctx->vbuf)
506 u_vbuf_destroy(ctx->vbuf);
507
508 ctx->base.pipe->vbuf = NULL;
509 FREE(ctx);
510 }
511
512
513 /* Those function will either find the state of the given template
514 * in the cache or they will create a new state from the given
515 * template, insert it in the cache and return it.
516 */
517
518 #define CSO_BLEND_KEY_SIZE_RT0 offsetof(struct pipe_blend_state, rt[1])
519 #define CSO_BLEND_KEY_SIZE_ALL_RT sizeof(struct pipe_blend_state)
520
521 /*
522 * If the driver returns 0 from the create method then they will assign
523 * the data member of the cso to be the template itself.
524 */
525
526 enum pipe_error
cso_set_blend(struct cso_context * cso,const struct pipe_blend_state * templ)527 cso_set_blend(struct cso_context *cso,
528 const struct pipe_blend_state *templ)
529 {
530 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
531 unsigned key_size, hash_key;
532 struct cso_hash_iter iter;
533 void *handle;
534
535 if (templ->independent_blend_enable) {
536 /* This is duplicated with the else block below because we want key_size
537 * to be a literal constant, so that memcpy and the hash computation can
538 * be inlined and unrolled.
539 */
540 hash_key = cso_construct_key(templ, CSO_BLEND_KEY_SIZE_ALL_RT);
541 iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
542 templ, CSO_BLEND_KEY_SIZE_ALL_RT);
543 key_size = CSO_BLEND_KEY_SIZE_ALL_RT;
544 } else {
545 hash_key = cso_construct_key(templ, CSO_BLEND_KEY_SIZE_RT0);
546 iter = cso_find_state_template(&ctx->cache, hash_key, CSO_BLEND,
547 templ, CSO_BLEND_KEY_SIZE_RT0);
548 key_size = CSO_BLEND_KEY_SIZE_RT0;
549 }
550
551 if (cso_hash_iter_is_null(iter)) {
552 struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
553 if (!cso)
554 return PIPE_ERROR_OUT_OF_MEMORY;
555
556 memset(&cso->state, 0, sizeof cso->state);
557 memcpy(&cso->state, templ, key_size);
558 cso->data = ctx->base.pipe->create_blend_state(ctx->base.pipe, &cso->state);
559
560 iter = cso_insert_state(&ctx->cache, hash_key, CSO_BLEND, cso);
561 if (cso_hash_iter_is_null(iter)) {
562 FREE(cso);
563 return PIPE_ERROR_OUT_OF_MEMORY;
564 }
565
566 handle = cso->data;
567 } else {
568 handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
569 }
570
571 if (ctx->blend != handle) {
572 ctx->blend = handle;
573 ctx->base.pipe->bind_blend_state(ctx->base.pipe, handle);
574 }
575 return PIPE_OK;
576 }
577
578
579 static void
cso_save_blend(struct cso_context_priv * ctx)580 cso_save_blend(struct cso_context_priv *ctx)
581 {
582 assert(!ctx->blend_saved);
583 ctx->blend_saved = ctx->blend;
584 }
585
586
587 static void
cso_restore_blend(struct cso_context_priv * ctx)588 cso_restore_blend(struct cso_context_priv *ctx)
589 {
590 if (ctx->blend != ctx->blend_saved) {
591 ctx->blend = ctx->blend_saved;
592 ctx->base.pipe->bind_blend_state(ctx->base.pipe, ctx->blend_saved);
593 }
594 ctx->blend_saved = NULL;
595 }
596
597
598 enum pipe_error
cso_set_depth_stencil_alpha(struct cso_context * cso,const struct pipe_depth_stencil_alpha_state * templ)599 cso_set_depth_stencil_alpha(struct cso_context *cso,
600 const struct pipe_depth_stencil_alpha_state *templ)
601 {
602 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
603 const unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state);
604 const unsigned hash_key = cso_construct_key(templ, key_size);
605 struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
606 hash_key,
607 CSO_DEPTH_STENCIL_ALPHA,
608 templ, key_size);
609 void *handle;
610
611 if (cso_hash_iter_is_null(iter)) {
612 struct cso_depth_stencil_alpha *cso =
613 MALLOC(sizeof(struct cso_depth_stencil_alpha));
614 if (!cso)
615 return PIPE_ERROR_OUT_OF_MEMORY;
616
617 memcpy(&cso->state, templ, sizeof(*templ));
618 cso->data = ctx->base.pipe->create_depth_stencil_alpha_state(ctx->base.pipe,
619 &cso->state);
620
621 iter = cso_insert_state(&ctx->cache, hash_key,
622 CSO_DEPTH_STENCIL_ALPHA, cso);
623 if (cso_hash_iter_is_null(iter)) {
624 FREE(cso);
625 return PIPE_ERROR_OUT_OF_MEMORY;
626 }
627
628 handle = cso->data;
629 } else {
630 handle = ((struct cso_depth_stencil_alpha *)
631 cso_hash_iter_data(iter))->data;
632 }
633
634 if (ctx->depth_stencil != handle) {
635 ctx->depth_stencil = handle;
636 ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe, handle);
637 }
638 return PIPE_OK;
639 }
640
641
642 static void
cso_save_depth_stencil_alpha(struct cso_context_priv * ctx)643 cso_save_depth_stencil_alpha(struct cso_context_priv *ctx)
644 {
645 assert(!ctx->depth_stencil_saved);
646 ctx->depth_stencil_saved = ctx->depth_stencil;
647 }
648
649
650 static void
cso_restore_depth_stencil_alpha(struct cso_context_priv * ctx)651 cso_restore_depth_stencil_alpha(struct cso_context_priv *ctx)
652 {
653 if (ctx->depth_stencil != ctx->depth_stencil_saved) {
654 ctx->depth_stencil = ctx->depth_stencil_saved;
655 ctx->base.pipe->bind_depth_stencil_alpha_state(ctx->base.pipe,
656 ctx->depth_stencil_saved);
657 }
658 ctx->depth_stencil_saved = NULL;
659 }
660
661
662 enum pipe_error
cso_set_rasterizer(struct cso_context * cso,const struct pipe_rasterizer_state * templ)663 cso_set_rasterizer(struct cso_context *cso,
664 const struct pipe_rasterizer_state *templ)
665 {
666 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
667 const unsigned key_size = sizeof(struct pipe_rasterizer_state);
668 const unsigned hash_key = cso_construct_key(templ, key_size);
669 struct cso_hash_iter iter = cso_find_state_template(&ctx->cache,
670 hash_key,
671 CSO_RASTERIZER,
672 templ, key_size);
673 void *handle = NULL;
674
675 /* We can't have both point_quad_rasterization (sprites) and point_smooth
676 * (round AA points) enabled at the same time.
677 */
678 assert(!(templ->point_quad_rasterization && templ->point_smooth));
679
680 if (cso_hash_iter_is_null(iter)) {
681 struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
682 if (!cso)
683 return PIPE_ERROR_OUT_OF_MEMORY;
684
685 memcpy(&cso->state, templ, sizeof(*templ));
686 cso->data = ctx->base.pipe->create_rasterizer_state(ctx->base.pipe, &cso->state);
687
688 iter = cso_insert_state(&ctx->cache, hash_key, CSO_RASTERIZER, cso);
689 if (cso_hash_iter_is_null(iter)) {
690 FREE(cso);
691 return PIPE_ERROR_OUT_OF_MEMORY;
692 }
693
694 handle = cso->data;
695 } else {
696 handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
697 }
698
699 if (ctx->rasterizer != handle) {
700 ctx->rasterizer = handle;
701 ctx->flatshade_first = templ->flatshade_first;
702 if (ctx->vbuf)
703 u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
704 ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, handle);
705 }
706 return PIPE_OK;
707 }
708
709
710 static void
cso_save_rasterizer(struct cso_context_priv * ctx)711 cso_save_rasterizer(struct cso_context_priv *ctx)
712 {
713 assert(!ctx->rasterizer_saved);
714 ctx->rasterizer_saved = ctx->rasterizer;
715 ctx->flatshade_first_saved = ctx->flatshade_first;
716 }
717
718
719 static void
cso_restore_rasterizer(struct cso_context_priv * ctx)720 cso_restore_rasterizer(struct cso_context_priv *ctx)
721 {
722 if (ctx->rasterizer != ctx->rasterizer_saved) {
723 ctx->rasterizer = ctx->rasterizer_saved;
724 ctx->flatshade_first = ctx->flatshade_first_saved;
725 if (ctx->vbuf)
726 u_vbuf_set_flatshade_first(ctx->vbuf, ctx->flatshade_first);
727 ctx->base.pipe->bind_rasterizer_state(ctx->base.pipe, ctx->rasterizer_saved);
728 }
729 ctx->rasterizer_saved = NULL;
730 }
731
732
733 void
cso_set_fragment_shader_handle(struct cso_context * cso,void * handle)734 cso_set_fragment_shader_handle(struct cso_context *cso, void *handle)
735 {
736 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
737
738 if (ctx->fragment_shader != handle) {
739 ctx->fragment_shader = handle;
740 ctx->base.pipe->bind_fs_state(ctx->base.pipe, handle);
741 }
742 }
743
744
745 static void
cso_save_fragment_shader(struct cso_context_priv * ctx)746 cso_save_fragment_shader(struct cso_context_priv *ctx)
747 {
748 assert(!ctx->fragment_shader_saved);
749 ctx->fragment_shader_saved = ctx->fragment_shader;
750 }
751
752
753 static void
cso_restore_fragment_shader(struct cso_context_priv * ctx)754 cso_restore_fragment_shader(struct cso_context_priv *ctx)
755 {
756 if (ctx->fragment_shader_saved != ctx->fragment_shader) {
757 ctx->base.pipe->bind_fs_state(ctx->base.pipe, ctx->fragment_shader_saved);
758 ctx->fragment_shader = ctx->fragment_shader_saved;
759 }
760 ctx->fragment_shader_saved = NULL;
761 }
762
763
764 void
cso_set_vertex_shader_handle(struct cso_context * cso,void * handle)765 cso_set_vertex_shader_handle(struct cso_context *cso, void *handle)
766 {
767 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
768
769 if (ctx->vertex_shader != handle) {
770 ctx->vertex_shader = handle;
771 ctx->base.pipe->bind_vs_state(ctx->base.pipe, handle);
772 }
773 }
774
775
776 static void
cso_save_vertex_shader(struct cso_context_priv * ctx)777 cso_save_vertex_shader(struct cso_context_priv *ctx)
778 {
779 assert(!ctx->vertex_shader_saved);
780 ctx->vertex_shader_saved = ctx->vertex_shader;
781 }
782
783
784 static void
cso_restore_vertex_shader(struct cso_context_priv * ctx)785 cso_restore_vertex_shader(struct cso_context_priv *ctx)
786 {
787 if (ctx->vertex_shader_saved != ctx->vertex_shader) {
788 ctx->base.pipe->bind_vs_state(ctx->base.pipe, ctx->vertex_shader_saved);
789 ctx->vertex_shader = ctx->vertex_shader_saved;
790 }
791 ctx->vertex_shader_saved = NULL;
792 }
793
794
795 void
cso_set_framebuffer(struct cso_context * cso,const struct pipe_framebuffer_state * fb)796 cso_set_framebuffer(struct cso_context *cso,
797 const struct pipe_framebuffer_state *fb)
798 {
799 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
800
801 if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
802 util_copy_framebuffer_state(&ctx->fb, fb);
803 ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, fb);
804 }
805 }
806
807
808 static void
cso_save_framebuffer(struct cso_context_priv * ctx)809 cso_save_framebuffer(struct cso_context_priv *ctx)
810 {
811 util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
812 }
813
814
815 static void
cso_restore_framebuffer(struct cso_context_priv * ctx)816 cso_restore_framebuffer(struct cso_context_priv *ctx)
817 {
818 if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
819 util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
820 ctx->base.pipe->set_framebuffer_state(ctx->base.pipe, &ctx->fb);
821 util_unreference_framebuffer_state(&ctx->fb_saved);
822 }
823 }
824
825
826 void
cso_set_viewport(struct cso_context * cso,const struct pipe_viewport_state * vp)827 cso_set_viewport(struct cso_context *cso,
828 const struct pipe_viewport_state *vp)
829 {
830 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
831
832 if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
833 ctx->vp = *vp;
834 ctx->base.pipe->set_viewport_states(ctx->base.pipe, 0, 1, vp);
835 }
836 }
837
838
839 /**
840 * Setup viewport state for given width and height (position is always (0,0)).
841 * Invert the Y axis if 'invert' is true.
842 */
843 void
cso_set_viewport_dims(struct cso_context * ctx,float width,float height,bool invert)844 cso_set_viewport_dims(struct cso_context *ctx,
845 float width, float height, bool invert)
846 {
847 struct pipe_viewport_state vp;
848 vp.scale[0] = width * 0.5f;
849 vp.scale[1] = height * (invert ? -0.5f : 0.5f);
850 vp.scale[2] = 0.5f;
851 vp.translate[0] = 0.5f * width;
852 vp.translate[1] = 0.5f * height;
853 vp.translate[2] = 0.5f;
854 vp.swizzle_x = PIPE_VIEWPORT_SWIZZLE_POSITIVE_X;
855 vp.swizzle_y = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Y;
856 vp.swizzle_z = PIPE_VIEWPORT_SWIZZLE_POSITIVE_Z;
857 vp.swizzle_w = PIPE_VIEWPORT_SWIZZLE_POSITIVE_W;
858 cso_set_viewport(ctx, &vp);
859 }
860
861
862 static void
cso_save_viewport(struct cso_context_priv * ctx)863 cso_save_viewport(struct cso_context_priv *ctx)
864 {
865 ctx->vp_saved = ctx->vp;
866 }
867
868
869 static void
cso_restore_viewport(struct cso_context_priv * ctx)870 cso_restore_viewport(struct cso_context_priv *ctx)
871 {
872 if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
873 ctx->vp = ctx->vp_saved;
874 ctx->base.pipe->set_viewport_states(ctx->base.pipe, 0, 1, &ctx->vp);
875 }
876 }
877
878
879 void
cso_set_sample_mask(struct cso_context * cso,unsigned sample_mask)880 cso_set_sample_mask(struct cso_context *cso, unsigned sample_mask)
881 {
882 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
883
884 if (ctx->sample_mask != sample_mask) {
885 ctx->sample_mask = sample_mask;
886 ctx->base.pipe->set_sample_mask(ctx->base.pipe, sample_mask);
887 }
888 }
889
890
891 static void
cso_save_sample_mask(struct cso_context_priv * ctx)892 cso_save_sample_mask(struct cso_context_priv *ctx)
893 {
894 ctx->sample_mask_saved = ctx->sample_mask;
895 }
896
897
898 static void
cso_restore_sample_mask(struct cso_context_priv * ctx)899 cso_restore_sample_mask(struct cso_context_priv *ctx)
900 {
901 cso_set_sample_mask(&ctx->base, ctx->sample_mask_saved);
902 }
903
904
905 void
cso_set_min_samples(struct cso_context * cso,unsigned min_samples)906 cso_set_min_samples(struct cso_context *cso, unsigned min_samples)
907 {
908 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
909
910 if (ctx->min_samples != min_samples && ctx->base.pipe->set_min_samples) {
911 ctx->min_samples = min_samples;
912 ctx->base.pipe->set_min_samples(ctx->base.pipe, min_samples);
913 }
914 }
915
916
917 static void
cso_save_min_samples(struct cso_context_priv * ctx)918 cso_save_min_samples(struct cso_context_priv *ctx)
919 {
920 ctx->min_samples_saved = ctx->min_samples;
921 }
922
923
924 static void
cso_restore_min_samples(struct cso_context_priv * ctx)925 cso_restore_min_samples(struct cso_context_priv *ctx)
926 {
927 cso_set_min_samples(&ctx->base, ctx->min_samples_saved);
928 }
929
930
931 void
cso_set_stencil_ref(struct cso_context * cso,const struct pipe_stencil_ref sr)932 cso_set_stencil_ref(struct cso_context *cso,
933 const struct pipe_stencil_ref sr)
934 {
935 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
936
937 if (memcmp(&ctx->stencil_ref, &sr, sizeof(ctx->stencil_ref))) {
938 ctx->stencil_ref = sr;
939 ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr);
940 }
941 }
942
943
944 static void
cso_save_stencil_ref(struct cso_context_priv * ctx)945 cso_save_stencil_ref(struct cso_context_priv *ctx)
946 {
947 ctx->stencil_ref_saved = ctx->stencil_ref;
948 }
949
950
951 static void
cso_restore_stencil_ref(struct cso_context_priv * ctx)952 cso_restore_stencil_ref(struct cso_context_priv *ctx)
953 {
954 if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved,
955 sizeof(ctx->stencil_ref))) {
956 ctx->stencil_ref = ctx->stencil_ref_saved;
957 ctx->base.pipe->set_stencil_ref(ctx->base.pipe, ctx->stencil_ref);
958 }
959 }
960
961
962 void
cso_set_render_condition(struct cso_context * cso,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)963 cso_set_render_condition(struct cso_context *cso,
964 struct pipe_query *query,
965 bool condition,
966 enum pipe_render_cond_flag mode)
967 {
968 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
969 struct pipe_context *pipe = ctx->base.pipe;
970
971 if (ctx->render_condition != query ||
972 ctx->render_condition_mode != mode ||
973 ctx->render_condition_cond != condition) {
974 pipe->render_condition(pipe, query, condition, mode);
975 ctx->render_condition = query;
976 ctx->render_condition_cond = condition;
977 ctx->render_condition_mode = mode;
978 }
979 }
980
981
982 static void
cso_save_render_condition(struct cso_context_priv * ctx)983 cso_save_render_condition(struct cso_context_priv *ctx)
984 {
985 ctx->render_condition_saved = ctx->render_condition;
986 ctx->render_condition_cond_saved = ctx->render_condition_cond;
987 ctx->render_condition_mode_saved = ctx->render_condition_mode;
988 }
989
990
991 static void
cso_restore_render_condition(struct cso_context_priv * ctx)992 cso_restore_render_condition(struct cso_context_priv *ctx)
993 {
994 cso_set_render_condition(&ctx->base, ctx->render_condition_saved,
995 ctx->render_condition_cond_saved,
996 ctx->render_condition_mode_saved);
997 }
998
999
1000 void
cso_set_geometry_shader_handle(struct cso_context * cso,void * handle)1001 cso_set_geometry_shader_handle(struct cso_context *cso, void *handle)
1002 {
1003 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1004 assert(ctx->has_geometry_shader || !handle);
1005
1006 if (ctx->has_geometry_shader && ctx->geometry_shader != handle) {
1007 ctx->geometry_shader = handle;
1008 ctx->base.pipe->bind_gs_state(ctx->base.pipe, handle);
1009 }
1010 }
1011
1012
1013 static void
cso_save_geometry_shader(struct cso_context_priv * ctx)1014 cso_save_geometry_shader(struct cso_context_priv *ctx)
1015 {
1016 if (!ctx->has_geometry_shader) {
1017 return;
1018 }
1019
1020 assert(!ctx->geometry_shader_saved);
1021 ctx->geometry_shader_saved = ctx->geometry_shader;
1022 }
1023
1024
1025 static void
cso_restore_geometry_shader(struct cso_context_priv * ctx)1026 cso_restore_geometry_shader(struct cso_context_priv *ctx)
1027 {
1028 if (!ctx->has_geometry_shader) {
1029 return;
1030 }
1031
1032 if (ctx->geometry_shader_saved != ctx->geometry_shader) {
1033 ctx->base.pipe->bind_gs_state(ctx->base.pipe, ctx->geometry_shader_saved);
1034 ctx->geometry_shader = ctx->geometry_shader_saved;
1035 }
1036 ctx->geometry_shader_saved = NULL;
1037 }
1038
1039
1040 void
cso_set_tessctrl_shader_handle(struct cso_context * cso,void * handle)1041 cso_set_tessctrl_shader_handle(struct cso_context *cso, void *handle)
1042 {
1043 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1044 assert(ctx->has_tessellation || !handle);
1045
1046 if (ctx->has_tessellation && ctx->tessctrl_shader != handle) {
1047 ctx->tessctrl_shader = handle;
1048 ctx->base.pipe->bind_tcs_state(ctx->base.pipe, handle);
1049 }
1050 }
1051
1052
1053 static void
cso_save_tessctrl_shader(struct cso_context_priv * ctx)1054 cso_save_tessctrl_shader(struct cso_context_priv *ctx)
1055 {
1056 if (!ctx->has_tessellation) {
1057 return;
1058 }
1059
1060 assert(!ctx->tessctrl_shader_saved);
1061 ctx->tessctrl_shader_saved = ctx->tessctrl_shader;
1062 }
1063
1064
1065 static void
cso_restore_tessctrl_shader(struct cso_context_priv * ctx)1066 cso_restore_tessctrl_shader(struct cso_context_priv *ctx)
1067 {
1068 if (!ctx->has_tessellation) {
1069 return;
1070 }
1071
1072 if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) {
1073 ctx->base.pipe->bind_tcs_state(ctx->base.pipe, ctx->tessctrl_shader_saved);
1074 ctx->tessctrl_shader = ctx->tessctrl_shader_saved;
1075 }
1076 ctx->tessctrl_shader_saved = NULL;
1077 }
1078
1079
1080 void
cso_set_tesseval_shader_handle(struct cso_context * cso,void * handle)1081 cso_set_tesseval_shader_handle(struct cso_context *cso, void *handle)
1082 {
1083 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1084
1085 assert(ctx->has_tessellation || !handle);
1086
1087 if (ctx->has_tessellation && ctx->tesseval_shader != handle) {
1088 ctx->tesseval_shader = handle;
1089 ctx->base.pipe->bind_tes_state(ctx->base.pipe, handle);
1090 }
1091 }
1092
1093
1094 static void
cso_save_tesseval_shader(struct cso_context_priv * ctx)1095 cso_save_tesseval_shader(struct cso_context_priv *ctx)
1096 {
1097 if (!ctx->has_tessellation) {
1098 return;
1099 }
1100
1101 assert(!ctx->tesseval_shader_saved);
1102 ctx->tesseval_shader_saved = ctx->tesseval_shader;
1103 }
1104
1105
1106 static void
cso_restore_tesseval_shader(struct cso_context_priv * ctx)1107 cso_restore_tesseval_shader(struct cso_context_priv *ctx)
1108 {
1109 if (!ctx->has_tessellation) {
1110 return;
1111 }
1112
1113 if (ctx->tesseval_shader_saved != ctx->tesseval_shader) {
1114 ctx->base.pipe->bind_tes_state(ctx->base.pipe, ctx->tesseval_shader_saved);
1115 ctx->tesseval_shader = ctx->tesseval_shader_saved;
1116 }
1117 ctx->tesseval_shader_saved = NULL;
1118 }
1119
1120
1121 void
cso_set_compute_shader_handle(struct cso_context * cso,void * handle)1122 cso_set_compute_shader_handle(struct cso_context *cso, void *handle)
1123 {
1124 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1125 assert(ctx->has_compute_shader || !handle);
1126
1127 if (ctx->has_compute_shader && ctx->compute_shader != handle) {
1128 ctx->compute_shader = handle;
1129 ctx->base.pipe->bind_compute_state(ctx->base.pipe, handle);
1130 }
1131 }
1132
1133
1134 static void
cso_save_compute_shader(struct cso_context_priv * ctx)1135 cso_save_compute_shader(struct cso_context_priv *ctx)
1136 {
1137 if (!ctx->has_compute_shader) {
1138 return;
1139 }
1140
1141 assert(!ctx->compute_shader_saved);
1142 ctx->compute_shader_saved = ctx->compute_shader;
1143 }
1144
1145
1146 static void
cso_restore_compute_shader(struct cso_context_priv * ctx)1147 cso_restore_compute_shader(struct cso_context_priv *ctx)
1148 {
1149 if (!ctx->has_compute_shader) {
1150 return;
1151 }
1152
1153 if (ctx->compute_shader_saved != ctx->compute_shader) {
1154 ctx->base.pipe->bind_compute_state(ctx->base.pipe, ctx->compute_shader_saved);
1155 ctx->compute_shader = ctx->compute_shader_saved;
1156 }
1157 ctx->compute_shader_saved = NULL;
1158 }
1159
1160
1161 static void
cso_save_compute_samplers(struct cso_context_priv * ctx)1162 cso_save_compute_samplers(struct cso_context_priv *ctx)
1163 {
1164 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
1165 struct sampler_info *saved = &ctx->compute_samplers_saved;
1166
1167 memcpy(saved->cso_samplers, info->cso_samplers,
1168 sizeof(info->cso_samplers));
1169 memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1170 }
1171
1172
1173 static void
cso_restore_compute_samplers(struct cso_context_priv * ctx)1174 cso_restore_compute_samplers(struct cso_context_priv *ctx)
1175 {
1176 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_COMPUTE];
1177 struct sampler_info *saved = &ctx->compute_samplers_saved;
1178
1179 memcpy(info->cso_samplers, saved->cso_samplers,
1180 sizeof(info->cso_samplers));
1181 memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1182
1183 for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1184 if (info->samplers[i]) {
1185 ctx->max_sampler_seen = i;
1186 break;
1187 }
1188 }
1189
1190 cso_single_sampler_done(&ctx->base, PIPE_SHADER_COMPUTE);
1191 }
1192
1193
1194 static void
cso_set_vertex_elements_direct(struct cso_context_priv * ctx,const struct cso_velems_state * velems)1195 cso_set_vertex_elements_direct(struct cso_context_priv *ctx,
1196 const struct cso_velems_state *velems)
1197 {
1198 /* Need to include the count into the stored state data too.
1199 * Otherwise first few count pipe_vertex_elements could be identical
1200 * even if count is different, and there's no guarantee the hash would
1201 * be different in that case neither.
1202 */
1203 const unsigned key_size =
1204 sizeof(struct pipe_vertex_element) * velems->count + sizeof(unsigned);
1205 const unsigned hash_key = cso_construct_key((void*)velems, key_size);
1206 struct cso_hash_iter iter =
1207 cso_find_state_template(&ctx->cache, hash_key, CSO_VELEMENTS,
1208 velems, key_size);
1209 void *handle;
1210
1211 if (cso_hash_iter_is_null(iter)) {
1212 struct cso_velements *cso = MALLOC(sizeof(struct cso_velements));
1213 if (!cso)
1214 return;
1215
1216 memcpy(&cso->state, velems, key_size);
1217
1218 /* Lower 64-bit vertex attributes. */
1219 unsigned new_count = velems->count;
1220 const struct pipe_vertex_element *new_elems = velems->velems;
1221 struct pipe_vertex_element tmp[PIPE_MAX_ATTRIBS];
1222 util_lower_uint64_vertex_elements(&new_elems, &new_count, tmp);
1223
1224 cso->data = ctx->base.pipe->create_vertex_elements_state(ctx->base.pipe, new_count,
1225 new_elems);
1226
1227 iter = cso_insert_state(&ctx->cache, hash_key, CSO_VELEMENTS, cso);
1228 if (cso_hash_iter_is_null(iter)) {
1229 FREE(cso);
1230 return;
1231 }
1232
1233 handle = cso->data;
1234 } else {
1235 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
1236 }
1237
1238 if (ctx->velements != handle) {
1239 ctx->velements = handle;
1240 ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, handle);
1241 }
1242 }
1243
1244
1245 enum pipe_error
cso_set_vertex_elements(struct cso_context * cso,const struct cso_velems_state * velems)1246 cso_set_vertex_elements(struct cso_context *cso,
1247 const struct cso_velems_state *velems)
1248 {
1249 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1250 struct u_vbuf *vbuf = ctx->vbuf_current;
1251
1252 if (vbuf) {
1253 u_vbuf_set_vertex_elements(vbuf, velems);
1254 return PIPE_OK;
1255 }
1256
1257 cso_set_vertex_elements_direct(ctx, velems);
1258 return PIPE_OK;
1259 }
1260
1261
1262 static void
cso_save_vertex_elements(struct cso_context_priv * ctx)1263 cso_save_vertex_elements(struct cso_context_priv *ctx)
1264 {
1265 struct u_vbuf *vbuf = ctx->vbuf_current;
1266
1267 if (vbuf) {
1268 u_vbuf_save_vertex_elements(vbuf);
1269 return;
1270 }
1271
1272 assert(!ctx->velements_saved);
1273 ctx->velements_saved = ctx->velements;
1274 }
1275
1276
1277 static void
cso_restore_vertex_elements(struct cso_context_priv * ctx)1278 cso_restore_vertex_elements(struct cso_context_priv *ctx)
1279 {
1280 struct u_vbuf *vbuf = ctx->vbuf_current;
1281
1282 if (vbuf) {
1283 u_vbuf_restore_vertex_elements(vbuf);
1284 return;
1285 }
1286
1287 if (ctx->velements != ctx->velements_saved) {
1288 ctx->velements = ctx->velements_saved;
1289 ctx->base.pipe->bind_vertex_elements_state(ctx->base.pipe, ctx->velements_saved);
1290 }
1291 ctx->velements_saved = NULL;
1292 }
1293
1294 /* vertex buffers */
1295
1296 void
cso_set_vertex_buffers(struct cso_context * cso,unsigned count,bool take_ownership,const struct pipe_vertex_buffer * buffers)1297 cso_set_vertex_buffers(struct cso_context *cso,
1298 unsigned count,
1299 bool take_ownership,
1300 const struct pipe_vertex_buffer *buffers)
1301 {
1302 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1303 struct u_vbuf *vbuf = ctx->vbuf_current;
1304
1305 if (vbuf) {
1306 u_vbuf_set_vertex_buffers(vbuf, count, take_ownership, buffers);
1307 return;
1308 }
1309
1310 util_set_vertex_buffers(ctx->base.pipe, count, take_ownership, buffers);
1311 }
1312
1313
1314 /**
1315 * Set vertex buffers and vertex elements. Skip u_vbuf if it's only needed
1316 * for user vertex buffers and user vertex buffers are not set by this call.
1317 * u_vbuf will be disabled. To re-enable u_vbuf, call this function again.
1318 *
1319 * Skipping u_vbuf decreases CPU overhead for draw calls that don't need it,
1320 * such as VBOs, glBegin/End, and display lists.
1321 *
1322 * Internal operations that do "save states, draw, restore states" shouldn't
1323 * use this, because the states are only saved in either cso_context or
1324 * u_vbuf, not both.
1325 */
1326 void
cso_set_vertex_buffers_and_elements(struct cso_context * cso,const struct cso_velems_state * velems,unsigned vb_count,bool uses_user_vertex_buffers,const struct pipe_vertex_buffer * vbuffers)1327 cso_set_vertex_buffers_and_elements(struct cso_context *cso,
1328 const struct cso_velems_state *velems,
1329 unsigned vb_count,
1330 bool uses_user_vertex_buffers,
1331 const struct pipe_vertex_buffer *vbuffers)
1332 {
1333 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1334 struct u_vbuf *vbuf = ctx->vbuf;
1335 struct pipe_context *pipe = ctx->base.pipe;
1336
1337 if (vbuf && (ctx->always_use_vbuf || uses_user_vertex_buffers)) {
1338 if (!ctx->vbuf_current) {
1339 /* Unset this to make sure the CSO is re-bound on the next use. */
1340 ctx->velements = NULL;
1341 ctx->vbuf_current = pipe->vbuf = vbuf;
1342 if (pipe->draw_vbo == tc_draw_vbo)
1343 ctx->base.draw_vbo = u_vbuf_draw_vbo;
1344 }
1345
1346 u_vbuf_set_vertex_elements(vbuf, velems);
1347 u_vbuf_set_vertex_buffers(vbuf, vb_count, true, vbuffers);
1348 return;
1349 }
1350
1351 if (ctx->vbuf_current) {
1352 /* Unset this to make sure the CSO is re-bound on the next use. */
1353 u_vbuf_unset_vertex_elements(vbuf);
1354 ctx->vbuf_current = pipe->vbuf = NULL;
1355 if (pipe->draw_vbo == tc_draw_vbo)
1356 ctx->base.draw_vbo = pipe->draw_vbo;
1357 }
1358
1359 cso_set_vertex_elements_direct(ctx, velems);
1360 pipe->set_vertex_buffers(pipe, vb_count, vbuffers);
1361 }
1362
1363
1364 ALWAYS_INLINE static struct cso_sampler *
set_sampler(struct cso_context_priv * ctx,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ,size_t key_size)1365 set_sampler(struct cso_context_priv *ctx, enum pipe_shader_type shader_stage,
1366 unsigned idx, const struct pipe_sampler_state *templ,
1367 size_t key_size)
1368 {
1369 unsigned hash_key = cso_construct_key(templ, key_size);
1370 struct cso_sampler *cso;
1371 struct cso_hash_iter iter =
1372 cso_find_state_template(&ctx->cache,
1373 hash_key, CSO_SAMPLER,
1374 templ, key_size);
1375
1376 if (cso_hash_iter_is_null(iter)) {
1377 cso = MALLOC(sizeof(struct cso_sampler));
1378 if (!cso)
1379 return false;
1380
1381 memcpy(&cso->state, templ, sizeof(*templ));
1382 cso->data = ctx->base.pipe->create_sampler_state(ctx->base.pipe, &cso->state);
1383 cso->hash_key = hash_key;
1384
1385 iter = cso_insert_state(&ctx->cache, hash_key, CSO_SAMPLER, cso);
1386 if (cso_hash_iter_is_null(iter)) {
1387 FREE(cso);
1388 return false;
1389 }
1390 } else {
1391 cso = cso_hash_iter_data(iter);
1392 }
1393 return cso;
1394 }
1395
1396
1397 ALWAYS_INLINE static bool
cso_set_sampler(struct cso_context_priv * ctx,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ,size_t size)1398 cso_set_sampler(struct cso_context_priv *ctx, enum pipe_shader_type shader_stage,
1399 unsigned idx, const struct pipe_sampler_state *templ,
1400 size_t size)
1401 {
1402 struct cso_sampler *cso = set_sampler(ctx, shader_stage, idx, templ, size);
1403 ctx->samplers[shader_stage].cso_samplers[idx] = cso;
1404 ctx->samplers[shader_stage].samplers[idx] = cso->data;
1405 return true;
1406 }
1407
1408
1409 void
cso_single_sampler(struct cso_context * cso,enum pipe_shader_type shader_stage,unsigned idx,const struct pipe_sampler_state * templ)1410 cso_single_sampler(struct cso_context *cso, enum pipe_shader_type shader_stage,
1411 unsigned idx, const struct pipe_sampler_state *templ)
1412 {
1413 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1414
1415 /* The reasons both blocks are duplicated is that we want the size parameter
1416 * to be a constant expression to inline and unroll memcmp and hash key
1417 * computations.
1418 */
1419 if (ctx->sampler_format) {
1420 if (cso_set_sampler(ctx, shader_stage, idx, templ,
1421 sizeof(struct pipe_sampler_state)))
1422 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1423 } else {
1424 if (cso_set_sampler(ctx, shader_stage, idx, templ,
1425 offsetof(struct pipe_sampler_state, border_color_format)))
1426 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, (int)idx);
1427 }
1428 }
1429
1430
1431 /**
1432 * Send staged sampler state to the driver.
1433 */
1434 void
cso_single_sampler_done(struct cso_context * cso,enum pipe_shader_type shader_stage)1435 cso_single_sampler_done(struct cso_context *cso,
1436 enum pipe_shader_type shader_stage)
1437 {
1438 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1439 struct sampler_info *info = &ctx->samplers[shader_stage];
1440
1441 if (ctx->max_sampler_seen == -1)
1442 return;
1443
1444 ctx->base.pipe->bind_sampler_states(ctx->base.pipe, shader_stage, 0,
1445 ctx->max_sampler_seen + 1,
1446 info->samplers);
1447 ctx->max_sampler_seen = -1;
1448 }
1449
1450
1451 ALWAYS_INLINE static int
set_samplers(struct cso_context_priv * ctx,enum pipe_shader_type shader_stage,unsigned nr,const struct pipe_sampler_state ** templates,size_t key_size)1452 set_samplers(struct cso_context_priv *ctx,
1453 enum pipe_shader_type shader_stage,
1454 unsigned nr,
1455 const struct pipe_sampler_state **templates,
1456 size_t key_size)
1457 {
1458 int last = -1;
1459 for (unsigned i = 0; i < nr; i++) {
1460 if (!templates[i])
1461 continue;
1462
1463 /* Reuse the same sampler state CSO if 2 consecutive sampler states
1464 * are identical.
1465 *
1466 * The trivial case where both pointers are equal doesn't occur in
1467 * frequented codepaths.
1468 *
1469 * Reuse rate:
1470 * - Borderlands 2: 55%
1471 * - Hitman: 65%
1472 * - Rocket League: 75%
1473 * - Tomb Raider: 50-65%
1474 * - XCOM 2: 55%
1475 */
1476 if (last >= 0 &&
1477 !memcmp(templates[i], templates[last],
1478 key_size)) {
1479 ctx->samplers[shader_stage].cso_samplers[i] =
1480 ctx->samplers[shader_stage].cso_samplers[last];
1481 ctx->samplers[shader_stage].samplers[i] =
1482 ctx->samplers[shader_stage].samplers[last];
1483 } else {
1484 /* Look up the sampler state CSO. */
1485 cso_set_sampler(ctx, shader_stage, i, templates[i], key_size);
1486 }
1487
1488 last = i;
1489 }
1490 return last;
1491 }
1492
1493
1494 /*
1495 * If the function encouters any errors it will return the
1496 * last one. Done to always try to set as many samplers
1497 * as possible.
1498 */
1499 void
cso_set_samplers(struct cso_context * cso,enum pipe_shader_type shader_stage,unsigned nr,const struct pipe_sampler_state ** templates)1500 cso_set_samplers(struct cso_context *cso,
1501 enum pipe_shader_type shader_stage,
1502 unsigned nr,
1503 const struct pipe_sampler_state **templates)
1504 {
1505 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1506 int last;
1507
1508 /* ensure sampler size is a constant for memcmp */
1509 if (ctx->sampler_format) {
1510 last = set_samplers(ctx, shader_stage, nr, templates,
1511 sizeof(struct pipe_sampler_state));
1512 } else {
1513 last = set_samplers(ctx, shader_stage, nr, templates,
1514 offsetof(struct pipe_sampler_state, border_color_format));
1515 }
1516
1517 ctx->max_sampler_seen = MAX2(ctx->max_sampler_seen, last);
1518 cso_single_sampler_done(&ctx->base, shader_stage);
1519 }
1520
1521
1522 static void
cso_save_fragment_samplers(struct cso_context_priv * ctx)1523 cso_save_fragment_samplers(struct cso_context_priv *ctx)
1524 {
1525 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1526 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1527
1528 memcpy(saved->cso_samplers, info->cso_samplers,
1529 sizeof(info->cso_samplers));
1530 memcpy(saved->samplers, info->samplers, sizeof(info->samplers));
1531 }
1532
1533
1534 static void
cso_restore_fragment_samplers(struct cso_context_priv * ctx)1535 cso_restore_fragment_samplers(struct cso_context_priv *ctx)
1536 {
1537 struct sampler_info *info = &ctx->samplers[PIPE_SHADER_FRAGMENT];
1538 struct sampler_info *saved = &ctx->fragment_samplers_saved;
1539
1540 memcpy(info->cso_samplers, saved->cso_samplers,
1541 sizeof(info->cso_samplers));
1542 memcpy(info->samplers, saved->samplers, sizeof(info->samplers));
1543
1544 for (int i = PIPE_MAX_SAMPLERS - 1; i >= 0; i--) {
1545 if (info->samplers[i]) {
1546 ctx->max_sampler_seen = i;
1547 break;
1548 }
1549 }
1550
1551 cso_single_sampler_done(&ctx->base, PIPE_SHADER_FRAGMENT);
1552 }
1553
1554
1555 void
cso_set_stream_outputs(struct cso_context * cso,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)1556 cso_set_stream_outputs(struct cso_context *cso,
1557 unsigned num_targets,
1558 struct pipe_stream_output_target **targets,
1559 const unsigned *offsets)
1560 {
1561 struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
1562 struct pipe_context *pipe = ctx->base.pipe;
1563 unsigned i;
1564
1565 if (!ctx->has_streamout) {
1566 assert(num_targets == 0);
1567 return;
1568 }
1569
1570 if (ctx->nr_so_targets == 0 && num_targets == 0) {
1571 /* Nothing to do. */
1572 return;
1573 }
1574
1575 /* reference new targets */
1576 for (i = 0; i < num_targets; i++) {
1577 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1578 }
1579 /* unref extra old targets, if any */
1580 for (; i < ctx->nr_so_targets; i++) {
1581 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1582 }
1583
1584 pipe->set_stream_output_targets(pipe, num_targets, targets,
1585 offsets);
1586 ctx->nr_so_targets = num_targets;
1587 }
1588
1589
1590 static void
cso_save_stream_outputs(struct cso_context_priv * ctx)1591 cso_save_stream_outputs(struct cso_context_priv *ctx)
1592 {
1593 if (!ctx->has_streamout) {
1594 return;
1595 }
1596
1597 ctx->nr_so_targets_saved = ctx->nr_so_targets;
1598
1599 for (unsigned i = 0; i < ctx->nr_so_targets; i++) {
1600 assert(!ctx->so_targets_saved[i]);
1601 pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]);
1602 }
1603 }
1604
1605
1606 static void
cso_restore_stream_outputs(struct cso_context_priv * ctx)1607 cso_restore_stream_outputs(struct cso_context_priv *ctx)
1608 {
1609 struct pipe_context *pipe = ctx->base.pipe;
1610 unsigned i;
1611 unsigned offset[PIPE_MAX_SO_BUFFERS];
1612
1613 if (!ctx->has_streamout) {
1614 return;
1615 }
1616
1617 if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) {
1618 /* Nothing to do. */
1619 return;
1620 }
1621
1622 assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS);
1623 for (i = 0; i < ctx->nr_so_targets_saved; i++) {
1624 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1625 /* move the reference from one pointer to another */
1626 ctx->so_targets[i] = ctx->so_targets_saved[i];
1627 ctx->so_targets_saved[i] = NULL;
1628 /* -1 means append */
1629 offset[i] = (unsigned)-1;
1630 }
1631 for (; i < ctx->nr_so_targets; i++) {
1632 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1633 }
1634
1635 pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved,
1636 ctx->so_targets, offset);
1637
1638 ctx->nr_so_targets = ctx->nr_so_targets_saved;
1639 ctx->nr_so_targets_saved = 0;
1640 }
1641
1642
1643 /**
1644 * Save all the CSO state items specified by the state_mask bitmask
1645 * of CSO_BIT_x flags.
1646 */
1647 void
cso_save_state(struct cso_context * ctx,unsigned state_mask)1648 cso_save_state(struct cso_context *ctx, unsigned state_mask)
1649 {
1650 struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1651 assert(cso->saved_state == 0);
1652
1653 cso->saved_state = state_mask;
1654
1655 if (state_mask & CSO_BIT_BLEND)
1656 cso_save_blend(cso);
1657 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1658 cso_save_depth_stencil_alpha(cso);
1659 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1660 cso_save_fragment_samplers(cso);
1661 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1662 cso_save_fragment_shader(cso);
1663 if (state_mask & CSO_BIT_FRAMEBUFFER)
1664 cso_save_framebuffer(cso);
1665 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1666 cso_save_geometry_shader(cso);
1667 if (state_mask & CSO_BIT_MIN_SAMPLES)
1668 cso_save_min_samples(cso);
1669 if (state_mask & CSO_BIT_RASTERIZER)
1670 cso_save_rasterizer(cso);
1671 if (state_mask & CSO_BIT_RENDER_CONDITION)
1672 cso_save_render_condition(cso);
1673 if (state_mask & CSO_BIT_SAMPLE_MASK)
1674 cso_save_sample_mask(cso);
1675 if (state_mask & CSO_BIT_STENCIL_REF)
1676 cso_save_stencil_ref(cso);
1677 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1678 cso_save_stream_outputs(cso);
1679 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1680 cso_save_tessctrl_shader(cso);
1681 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1682 cso_save_tesseval_shader(cso);
1683 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1684 cso_save_vertex_elements(cso);
1685 if (state_mask & CSO_BIT_VERTEX_SHADER)
1686 cso_save_vertex_shader(cso);
1687 if (state_mask & CSO_BIT_VIEWPORT)
1688 cso_save_viewport(cso);
1689 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1690 cso->base.pipe->set_active_query_state(cso->base.pipe, false);
1691 }
1692
1693
1694 /**
1695 * Restore the state which was saved by cso_save_state().
1696 */
1697 void
cso_restore_state(struct cso_context * ctx,unsigned unbind)1698 cso_restore_state(struct cso_context *ctx, unsigned unbind)
1699 {
1700 struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1701 unsigned state_mask = cso->saved_state;
1702
1703 assert(state_mask);
1704
1705 if (state_mask & CSO_BIT_DEPTH_STENCIL_ALPHA)
1706 cso_restore_depth_stencil_alpha(cso);
1707 if (state_mask & CSO_BIT_STENCIL_REF)
1708 cso_restore_stencil_ref(cso);
1709 if (state_mask & CSO_BIT_FRAGMENT_SHADER)
1710 cso_restore_fragment_shader(cso);
1711 if (state_mask & CSO_BIT_GEOMETRY_SHADER)
1712 cso_restore_geometry_shader(cso);
1713 if (state_mask & CSO_BIT_TESSEVAL_SHADER)
1714 cso_restore_tesseval_shader(cso);
1715 if (state_mask & CSO_BIT_TESSCTRL_SHADER)
1716 cso_restore_tessctrl_shader(cso);
1717 if (state_mask & CSO_BIT_VERTEX_SHADER)
1718 cso_restore_vertex_shader(cso);
1719 if (unbind & CSO_UNBIND_FS_SAMPLERVIEWS)
1720 cso->base.pipe->set_sampler_views(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0,
1721 cso->max_fs_samplerviews, false, NULL);
1722 if (unbind & CSO_UNBIND_FS_SAMPLERVIEW0)
1723 cso->base.pipe->set_sampler_views(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0,
1724 1, false, NULL);
1725 if (state_mask & CSO_BIT_FRAGMENT_SAMPLERS)
1726 cso_restore_fragment_samplers(cso);
1727 if (unbind & CSO_UNBIND_FS_IMAGE0)
1728 cso->base.pipe->set_shader_images(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, 0, 1, NULL);
1729 if (state_mask & CSO_BIT_FRAMEBUFFER)
1730 cso_restore_framebuffer(cso);
1731 if (state_mask & CSO_BIT_BLEND)
1732 cso_restore_blend(cso);
1733 if (state_mask & CSO_BIT_RASTERIZER)
1734 cso_restore_rasterizer(cso);
1735 if (state_mask & CSO_BIT_MIN_SAMPLES)
1736 cso_restore_min_samples(cso);
1737 if (state_mask & CSO_BIT_RENDER_CONDITION)
1738 cso_restore_render_condition(cso);
1739 if (state_mask & CSO_BIT_SAMPLE_MASK)
1740 cso_restore_sample_mask(cso);
1741 if (state_mask & CSO_BIT_VIEWPORT)
1742 cso_restore_viewport(cso);
1743 if (unbind & CSO_UNBIND_VS_CONSTANTS)
1744 cso->base.pipe->set_constant_buffer(cso->base.pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
1745 if (unbind & CSO_UNBIND_FS_CONSTANTS)
1746 cso->base.pipe->set_constant_buffer(cso->base.pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
1747 if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
1748 cso_restore_vertex_elements(cso);
1749 if (state_mask & CSO_BIT_STREAM_OUTPUTS)
1750 cso_restore_stream_outputs(cso);
1751 if (state_mask & CSO_BIT_PAUSE_QUERIES)
1752 cso->base.pipe->set_active_query_state(cso->base.pipe, true);
1753
1754 cso->saved_state = 0;
1755 }
1756
1757
1758 /**
1759 * Save all the CSO state items specified by the state_mask bitmask
1760 * of CSO_BIT_COMPUTE_x flags.
1761 */
1762 void
cso_save_compute_state(struct cso_context * ctx,unsigned state_mask)1763 cso_save_compute_state(struct cso_context *ctx, unsigned state_mask)
1764 {
1765 struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1766 assert(cso->saved_compute_state == 0);
1767
1768 cso->saved_compute_state = state_mask;
1769
1770 if (state_mask & CSO_BIT_COMPUTE_SHADER)
1771 cso_save_compute_shader(cso);
1772
1773 if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1774 cso_save_compute_samplers(cso);
1775 }
1776
1777
1778 /**
1779 * Restore the state which was saved by cso_save_compute_state().
1780 */
1781 void
cso_restore_compute_state(struct cso_context * ctx)1782 cso_restore_compute_state(struct cso_context *ctx)
1783 {
1784 struct cso_context_priv *cso = (struct cso_context_priv *)ctx;
1785 unsigned state_mask = cso->saved_compute_state;
1786
1787 assert(state_mask);
1788
1789 if (state_mask & CSO_BIT_COMPUTE_SHADER)
1790 cso_restore_compute_shader(cso);
1791
1792 if (state_mask & CSO_BIT_COMPUTE_SAMPLERS)
1793 cso_restore_compute_samplers(cso);
1794
1795 cso->saved_compute_state = 0;
1796 }
1797
1798
1799
1800 /* drawing */
1801
1802 void
cso_draw_arrays(struct cso_context * ctx,unsigned mode,unsigned start,unsigned count)1803 cso_draw_arrays(struct cso_context *ctx, unsigned mode, unsigned start, unsigned count)
1804 {
1805 struct pipe_draw_info info;
1806 struct pipe_draw_start_count_bias draw;
1807
1808 util_draw_init_info(&info);
1809
1810 info.mode = mode;
1811 info.index_bounds_valid = true;
1812 info.min_index = start;
1813 info.max_index = start + count - 1;
1814
1815 draw.start = start;
1816 draw.count = count;
1817 draw.index_bias = 0;
1818
1819 cso_draw_vbo(ctx, &info, 0, NULL, &draw, 1);
1820 }
1821
1822
1823 void
cso_draw_arrays_instanced(struct cso_context * ctx,unsigned mode,unsigned start,unsigned count,unsigned start_instance,unsigned instance_count)1824 cso_draw_arrays_instanced(struct cso_context *ctx, unsigned mode,
1825 unsigned start, unsigned count,
1826 unsigned start_instance, unsigned instance_count)
1827 {
1828 struct pipe_draw_info info;
1829 struct pipe_draw_start_count_bias draw;
1830
1831 util_draw_init_info(&info);
1832
1833 info.mode = mode;
1834 info.index_bounds_valid = true;
1835 info.min_index = start;
1836 info.max_index = start + count - 1;
1837 info.start_instance = start_instance;
1838 info.instance_count = instance_count;
1839
1840 draw.start = start;
1841 draw.count = count;
1842 draw.index_bias = 0;
1843
1844 cso_draw_vbo(ctx, &info, 0, NULL, &draw, 1);
1845 }
1846