1 /**********************************************************
2 * Copyright 2008-2012 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "util/u_bitmask.h"
27 #include "util/u_memory.h"
28 #include "util/format/u_format.h"
29 #include "svga_context.h"
30 #include "svga_cmd.h"
31 #include "svga_format.h"
32 #include "svga_shader.h"
33 #include "svga_resource_texture.h"
34
35
36 /**
37 * This bit isn't really used anywhere. It only serves to help
38 * generate a unique "signature" for the vertex shader output bitmask.
39 * Shader input/output signatures are used to resolve shader linking
40 * issues.
41 */
42 #define FOG_GENERIC_BIT (((uint64_t) 1) << 63)
43
44
45 /**
46 * Use the shader info to generate a bitmask indicating which generic
47 * inputs are used by the shader. A set bit indicates that GENERIC[i]
48 * is used.
49 */
50 uint64_t
svga_get_generic_inputs_mask(const struct tgsi_shader_info * info)51 svga_get_generic_inputs_mask(const struct tgsi_shader_info *info)
52 {
53 unsigned i;
54 uint64_t mask = 0x0;
55
56 for (i = 0; i < info->num_inputs; i++) {
57 if (info->input_semantic_name[i] == TGSI_SEMANTIC_GENERIC) {
58 unsigned j = info->input_semantic_index[i];
59 assert(j < sizeof(mask) * 8);
60 mask |= ((uint64_t) 1) << j;
61 }
62 }
63
64 return mask;
65 }
66
67
68 /**
69 * Scan shader info to return a bitmask of written outputs.
70 */
71 uint64_t
svga_get_generic_outputs_mask(const struct tgsi_shader_info * info)72 svga_get_generic_outputs_mask(const struct tgsi_shader_info *info)
73 {
74 unsigned i;
75 uint64_t mask = 0x0;
76
77 for (i = 0; i < info->num_outputs; i++) {
78 switch (info->output_semantic_name[i]) {
79 case TGSI_SEMANTIC_GENERIC:
80 {
81 unsigned j = info->output_semantic_index[i];
82 assert(j < sizeof(mask) * 8);
83 mask |= ((uint64_t) 1) << j;
84 }
85 break;
86 case TGSI_SEMANTIC_FOG:
87 mask |= FOG_GENERIC_BIT;
88 break;
89 }
90 }
91
92 return mask;
93 }
94
95
96
97 /**
98 * Given a mask of used generic variables (as returned by the above functions)
99 * fill in a table which maps those indexes to small integers.
100 * This table is used by the remap_generic_index() function in
101 * svga_tgsi_decl_sm30.c
102 * Example: if generics_mask = binary(1010) it means that GENERIC[1] and
103 * GENERIC[3] are used. The remap_table will contain:
104 * table[1] = 0;
105 * table[3] = 1;
106 * The remaining table entries will be filled in with the next unused
107 * generic index (in this example, 2).
108 */
109 void
svga_remap_generics(uint64_t generics_mask,int8_t remap_table[MAX_GENERIC_VARYING])110 svga_remap_generics(uint64_t generics_mask,
111 int8_t remap_table[MAX_GENERIC_VARYING])
112 {
113 /* Note texcoord[0] is reserved so start at 1 */
114 unsigned count = 1, i;
115
116 for (i = 0; i < MAX_GENERIC_VARYING; i++) {
117 remap_table[i] = -1;
118 }
119
120 /* for each bit set in generic_mask */
121 while (generics_mask) {
122 unsigned index = ffsll(generics_mask) - 1;
123 remap_table[index] = count++;
124 generics_mask &= ~((uint64_t) 1 << index);
125 }
126 }
127
128
129 /**
130 * Use the generic remap table to map a TGSI generic varying variable
131 * index to a small integer. If the remapping table doesn't have a
132 * valid value for the given index (the table entry is -1) it means
133 * the fragment shader doesn't use that VS output. Just allocate
134 * the next free value in that case. Alternately, we could cull
135 * VS instructions that write to register, or replace the register
136 * with a dummy temp register.
137 * XXX TODO: we should do one of the later as it would save precious
138 * texcoord registers.
139 */
140 int
svga_remap_generic_index(int8_t remap_table[MAX_GENERIC_VARYING],int generic_index)141 svga_remap_generic_index(int8_t remap_table[MAX_GENERIC_VARYING],
142 int generic_index)
143 {
144 assert(generic_index < MAX_GENERIC_VARYING);
145
146 if (generic_index >= MAX_GENERIC_VARYING) {
147 /* just don't return a random/garbage value */
148 generic_index = MAX_GENERIC_VARYING - 1;
149 }
150
151 if (remap_table[generic_index] == -1) {
152 /* This is a VS output that has no matching PS input. Find a
153 * free index.
154 */
155 int i, max = 0;
156 for (i = 0; i < MAX_GENERIC_VARYING; i++) {
157 max = MAX2(max, remap_table[i]);
158 }
159 remap_table[generic_index] = max + 1;
160 }
161
162 return remap_table[generic_index];
163 }
164
165 static const enum pipe_swizzle copy_alpha[PIPE_SWIZZLE_MAX] = {
166 PIPE_SWIZZLE_X,
167 PIPE_SWIZZLE_Y,
168 PIPE_SWIZZLE_Z,
169 PIPE_SWIZZLE_W,
170 PIPE_SWIZZLE_0,
171 PIPE_SWIZZLE_1,
172 PIPE_SWIZZLE_NONE
173 };
174
175 static const enum pipe_swizzle set_alpha[PIPE_SWIZZLE_MAX] = {
176 PIPE_SWIZZLE_X,
177 PIPE_SWIZZLE_Y,
178 PIPE_SWIZZLE_Z,
179 PIPE_SWIZZLE_1,
180 PIPE_SWIZZLE_0,
181 PIPE_SWIZZLE_1,
182 PIPE_SWIZZLE_NONE
183 };
184
185 static const enum pipe_swizzle set_000X[PIPE_SWIZZLE_MAX] = {
186 PIPE_SWIZZLE_0,
187 PIPE_SWIZZLE_0,
188 PIPE_SWIZZLE_0,
189 PIPE_SWIZZLE_X,
190 PIPE_SWIZZLE_0,
191 PIPE_SWIZZLE_1,
192 PIPE_SWIZZLE_NONE
193 };
194
195 static const enum pipe_swizzle set_XXXX[PIPE_SWIZZLE_MAX] = {
196 PIPE_SWIZZLE_X,
197 PIPE_SWIZZLE_X,
198 PIPE_SWIZZLE_X,
199 PIPE_SWIZZLE_X,
200 PIPE_SWIZZLE_0,
201 PIPE_SWIZZLE_1,
202 PIPE_SWIZZLE_NONE
203 };
204
205 static const enum pipe_swizzle set_XXX1[PIPE_SWIZZLE_MAX] = {
206 PIPE_SWIZZLE_X,
207 PIPE_SWIZZLE_X,
208 PIPE_SWIZZLE_X,
209 PIPE_SWIZZLE_1,
210 PIPE_SWIZZLE_0,
211 PIPE_SWIZZLE_1,
212 PIPE_SWIZZLE_NONE
213 };
214
215 static const enum pipe_swizzle set_XXXY[PIPE_SWIZZLE_MAX] = {
216 PIPE_SWIZZLE_X,
217 PIPE_SWIZZLE_X,
218 PIPE_SWIZZLE_X,
219 PIPE_SWIZZLE_Y,
220 PIPE_SWIZZLE_0,
221 PIPE_SWIZZLE_1,
222 PIPE_SWIZZLE_NONE
223 };
224
225
226 /**
227 * Initialize the shader-neutral fields of svga_compile_key from context
228 * state. This is basically the texture-related state.
229 */
230 void
svga_init_shader_key_common(const struct svga_context * svga,enum pipe_shader_type shader_type,const struct svga_shader * shader,struct svga_compile_key * key)231 svga_init_shader_key_common(const struct svga_context *svga,
232 enum pipe_shader_type shader_type,
233 const struct svga_shader *shader,
234 struct svga_compile_key *key)
235 {
236 unsigned i, idx = 0;
237
238 assert(shader_type < ARRAY_SIZE(svga->curr.num_sampler_views));
239
240 /* In case the number of samplers and sampler_views doesn't match,
241 * loop over the lower of the two counts.
242 */
243 key->num_textures = MAX2(svga->curr.num_sampler_views[shader_type],
244 svga->curr.num_samplers[shader_type]);
245
246 for (i = 0; i < key->num_textures; i++) {
247 struct pipe_sampler_view *view = svga->curr.sampler_views[shader_type][i];
248 const struct svga_sampler_state
249 *sampler = svga->curr.sampler[shader_type][i];
250
251 if (view) {
252 assert(view->texture);
253 assert(view->texture->target < (1 << 4)); /* texture_target:4 */
254
255 /* 1D/2D array textures with one slice and cube map array textures
256 * with one cube are treated as non-arrays by the SVGA3D device.
257 * Set the is_array flag only if we know that we have more than 1
258 * element. This will be used to select shader instruction/resource
259 * types during shader translation.
260 */
261 switch (view->texture->target) {
262 case PIPE_TEXTURE_1D_ARRAY:
263 case PIPE_TEXTURE_2D_ARRAY:
264 key->tex[i].is_array = view->texture->array_size > 1;
265 break;
266 case PIPE_TEXTURE_CUBE_ARRAY:
267 key->tex[i].is_array = view->texture->array_size > 6;
268 break;
269 default:
270 ; /* nothing / silence compiler warning */
271 }
272
273 assert(view->texture->nr_samples < (1 << 5)); /* 5-bit field */
274 key->tex[i].num_samples = view->texture->nr_samples;
275
276 const enum pipe_swizzle *swizzle_tab;
277 if (view->texture->target == PIPE_BUFFER) {
278 SVGA3dSurfaceFormat svga_format;
279 unsigned tf_flags;
280
281 /* Apply any special swizzle mask for the view format if needed */
282
283 svga_translate_texture_buffer_view_format(view->format,
284 &svga_format, &tf_flags);
285 if (tf_flags & TF_000X)
286 swizzle_tab = set_000X;
287 else if (tf_flags & TF_XXXX)
288 swizzle_tab = set_XXXX;
289 else if (tf_flags & TF_XXX1)
290 swizzle_tab = set_XXX1;
291 else if (tf_flags & TF_XXXY)
292 swizzle_tab = set_XXXY;
293 else
294 swizzle_tab = copy_alpha;
295 }
296 else {
297 /* If we have a non-alpha view into an svga3d surface with an
298 * alpha channel, then explicitly set the alpha channel to 1
299 * when sampling. Note that we need to check the
300 * actual device format to cover also imported surface cases.
301 */
302 swizzle_tab =
303 (!util_format_has_alpha(view->format) &&
304 svga_texture_device_format_has_alpha(view->texture)) ?
305 set_alpha : copy_alpha;
306
307 if (view->texture->format == PIPE_FORMAT_DXT1_RGB ||
308 view->texture->format == PIPE_FORMAT_DXT1_SRGB)
309 swizzle_tab = set_alpha;
310
311 /* Save the compare function as we need to handle
312 * depth compare in the shader.
313 */
314 key->tex[i].compare_mode = sampler->compare_mode;
315 key->tex[i].compare_func = sampler->compare_func;
316 }
317
318 key->tex[i].swizzle_r = swizzle_tab[view->swizzle_r];
319 key->tex[i].swizzle_g = swizzle_tab[view->swizzle_g];
320 key->tex[i].swizzle_b = swizzle_tab[view->swizzle_b];
321 key->tex[i].swizzle_a = swizzle_tab[view->swizzle_a];
322 }
323
324 if (sampler) {
325 if (!sampler->normalized_coords) {
326 if (view) {
327 assert(idx < (1 << 5)); /* width_height_idx:5 bitfield */
328 key->tex[i].width_height_idx = idx++;
329 }
330 key->tex[i].unnormalized = TRUE;
331 ++key->num_unnormalized_coords;
332
333 if (sampler->magfilter == SVGA3D_TEX_FILTER_NEAREST ||
334 sampler->minfilter == SVGA3D_TEX_FILTER_NEAREST) {
335 key->tex[i].texel_bias = TRUE;
336 }
337 }
338 }
339 }
340
341 key->clamp_vertex_color = svga->curr.rast ?
342 svga->curr.rast->templ.clamp_vertex_color : 0;
343 }
344
345
346 /** Search for a compiled shader variant with the same compile key */
347 struct svga_shader_variant *
svga_search_shader_key(const struct svga_shader * shader,const struct svga_compile_key * key)348 svga_search_shader_key(const struct svga_shader *shader,
349 const struct svga_compile_key *key)
350 {
351 struct svga_shader_variant *variant = shader->variants;
352
353 assert(key);
354
355 for ( ; variant; variant = variant->next) {
356 if (svga_compile_keys_equal(key, &variant->key))
357 return variant;
358 }
359 return NULL;
360 }
361
362 /** Search for a shader with the same token key */
363 struct svga_shader *
svga_search_shader_token_key(struct svga_shader * pshader,const struct svga_token_key * key)364 svga_search_shader_token_key(struct svga_shader *pshader,
365 const struct svga_token_key *key)
366 {
367 struct svga_shader *shader = pshader;
368
369 assert(key);
370
371 for ( ; shader; shader = shader->next) {
372 if (memcmp(key, &shader->token_key, sizeof(struct svga_token_key)) == 0)
373 return shader;
374 }
375 return NULL;
376 }
377
378 /**
379 * Helper function to define a gb shader for non-vgpu10 device
380 */
381 static enum pipe_error
define_gb_shader_vgpu9(struct svga_context * svga,struct svga_shader_variant * variant,unsigned codeLen)382 define_gb_shader_vgpu9(struct svga_context *svga,
383 struct svga_shader_variant *variant,
384 unsigned codeLen)
385 {
386 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
387 enum pipe_error ret;
388
389 /**
390 * Create gb memory for the shader and upload the shader code.
391 * Kernel module will allocate an id for the shader and issue
392 * the DefineGBShader command.
393 */
394 variant->gb_shader = sws->shader_create(sws, variant->type,
395 variant->tokens, codeLen);
396
397 svga->hud.shader_mem_used += codeLen;
398
399 if (!variant->gb_shader)
400 return PIPE_ERROR_OUT_OF_MEMORY;
401
402 ret = SVGA3D_BindGBShader(svga->swc, variant->gb_shader);
403
404 return ret;
405 }
406
407 /**
408 * Helper function to define a gb shader for vgpu10 device
409 */
410 static enum pipe_error
define_gb_shader_vgpu10(struct svga_context * svga,struct svga_shader_variant * variant,unsigned codeLen)411 define_gb_shader_vgpu10(struct svga_context *svga,
412 struct svga_shader_variant *variant,
413 unsigned codeLen)
414 {
415 struct svga_winsys_context *swc = svga->swc;
416 enum pipe_error ret;
417 unsigned len = codeLen + variant->signatureLen;
418
419 /**
420 * Shaders in VGPU10 enabled device reside in the device COTable.
421 * SVGA driver will allocate an integer ID for the shader and
422 * issue DXDefineShader and DXBindShader commands.
423 */
424 variant->id = util_bitmask_add(svga->shader_id_bm);
425 if (variant->id == UTIL_BITMASK_INVALID_INDEX) {
426 return PIPE_ERROR_OUT_OF_MEMORY;
427 }
428
429 /* Create gb memory for the shader and upload the shader code */
430 variant->gb_shader = swc->shader_create(swc,
431 variant->id, variant->type,
432 variant->tokens, codeLen,
433 variant->signature,
434 variant->signatureLen);
435
436 svga->hud.shader_mem_used += len;
437
438 if (!variant->gb_shader) {
439 /* Free the shader ID */
440 assert(variant->id != UTIL_BITMASK_INVALID_INDEX);
441 goto fail_no_allocation;
442 }
443
444 /**
445 * Since we don't want to do any flush within state emission to avoid
446 * partial state in a command buffer, it's important to make sure that
447 * there is enough room to send both the DXDefineShader & DXBindShader
448 * commands in the same command buffer. So let's send both
449 * commands in one command reservation. If it fails, we'll undo
450 * the shader creation and return an error.
451 */
452 ret = SVGA3D_vgpu10_DefineAndBindShader(swc, variant->gb_shader,
453 variant->id, variant->type,
454 len);
455
456 if (ret != PIPE_OK)
457 goto fail;
458
459 return PIPE_OK;
460
461 fail:
462 swc->shader_destroy(swc, variant->gb_shader);
463 variant->gb_shader = NULL;
464
465 fail_no_allocation:
466 util_bitmask_clear(svga->shader_id_bm, variant->id);
467 variant->id = UTIL_BITMASK_INVALID_INDEX;
468
469 return PIPE_ERROR_OUT_OF_MEMORY;
470 }
471
472 /**
473 * Issue the SVGA3D commands to define a new shader.
474 * \param variant contains the shader tokens, etc. The result->id field will
475 * be set here.
476 */
477 enum pipe_error
svga_define_shader(struct svga_context * svga,struct svga_shader_variant * variant)478 svga_define_shader(struct svga_context *svga,
479 struct svga_shader_variant *variant)
480 {
481 unsigned codeLen = variant->nr_tokens * sizeof(variant->tokens[0]);
482 enum pipe_error ret;
483
484 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DEFINESHADER);
485
486 variant->id = UTIL_BITMASK_INVALID_INDEX;
487
488 if (svga_have_gb_objects(svga)) {
489 if (svga_have_vgpu10(svga))
490 ret = define_gb_shader_vgpu10(svga, variant, codeLen);
491 else
492 ret = define_gb_shader_vgpu9(svga, variant, codeLen);
493 }
494 else {
495 /* Allocate an integer ID for the shader */
496 variant->id = util_bitmask_add(svga->shader_id_bm);
497 if (variant->id == UTIL_BITMASK_INVALID_INDEX) {
498 ret = PIPE_ERROR_OUT_OF_MEMORY;
499 goto done;
500 }
501
502 /* Issue SVGA3D device command to define the shader */
503 ret = SVGA3D_DefineShader(svga->swc,
504 variant->id,
505 variant->type,
506 variant->tokens,
507 codeLen);
508 if (ret != PIPE_OK) {
509 /* free the ID */
510 assert(variant->id != UTIL_BITMASK_INVALID_INDEX);
511 util_bitmask_clear(svga->shader_id_bm, variant->id);
512 variant->id = UTIL_BITMASK_INVALID_INDEX;
513 }
514 }
515
516 done:
517 SVGA_STATS_TIME_POP(svga_sws(svga));
518 return ret;
519 }
520
521
522 /**
523 * Issue the SVGA3D commands to set/bind a shader.
524 * \param result the shader to bind.
525 */
526 enum pipe_error
svga_set_shader(struct svga_context * svga,SVGA3dShaderType type,struct svga_shader_variant * variant)527 svga_set_shader(struct svga_context *svga,
528 SVGA3dShaderType type,
529 struct svga_shader_variant *variant)
530 {
531 enum pipe_error ret;
532 unsigned id = variant ? variant->id : SVGA3D_INVALID_ID;
533
534 assert(type == SVGA3D_SHADERTYPE_VS ||
535 type == SVGA3D_SHADERTYPE_GS ||
536 type == SVGA3D_SHADERTYPE_PS ||
537 type == SVGA3D_SHADERTYPE_HS ||
538 type == SVGA3D_SHADERTYPE_DS ||
539 type == SVGA3D_SHADERTYPE_CS);
540
541 if (svga_have_gb_objects(svga)) {
542 struct svga_winsys_gb_shader *gbshader =
543 variant ? variant->gb_shader : NULL;
544
545 if (svga_have_vgpu10(svga))
546 ret = SVGA3D_vgpu10_SetShader(svga->swc, type, gbshader, id);
547 else
548 ret = SVGA3D_SetGBShader(svga->swc, type, gbshader);
549 }
550 else {
551 ret = SVGA3D_SetShader(svga->swc, type, id);
552 }
553
554 return ret;
555 }
556
557
558 struct svga_shader_variant *
svga_new_shader_variant(struct svga_context * svga,enum pipe_shader_type type)559 svga_new_shader_variant(struct svga_context *svga, enum pipe_shader_type type)
560 {
561 struct svga_shader_variant *variant;
562
563 switch (type) {
564 case PIPE_SHADER_FRAGMENT:
565 variant = CALLOC(1, sizeof(struct svga_fs_variant));
566 break;
567 case PIPE_SHADER_GEOMETRY:
568 variant = CALLOC(1, sizeof(struct svga_gs_variant));
569 break;
570 case PIPE_SHADER_VERTEX:
571 variant = CALLOC(1, sizeof(struct svga_vs_variant));
572 break;
573 case PIPE_SHADER_TESS_EVAL:
574 variant = CALLOC(1, sizeof(struct svga_tes_variant));
575 break;
576 case PIPE_SHADER_TESS_CTRL:
577 variant = CALLOC(1, sizeof(struct svga_tcs_variant));
578 break;
579 default:
580 return NULL;
581 }
582
583 if (variant) {
584 variant->type = svga_shader_type(type);
585 svga->hud.num_shaders++;
586 }
587 return variant;
588 }
589
590
591 void
svga_destroy_shader_variant(struct svga_context * svga,struct svga_shader_variant * variant)592 svga_destroy_shader_variant(struct svga_context *svga,
593 struct svga_shader_variant *variant)
594 {
595 if (svga_have_gb_objects(svga) && variant->gb_shader) {
596 if (svga_have_vgpu10(svga)) {
597 struct svga_winsys_context *swc = svga->swc;
598 swc->shader_destroy(swc, variant->gb_shader);
599 SVGA_RETRY(svga, SVGA3D_vgpu10_DestroyShader(svga->swc, variant->id));
600 util_bitmask_clear(svga->shader_id_bm, variant->id);
601 }
602 else {
603 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
604 sws->shader_destroy(sws, variant->gb_shader);
605 }
606 variant->gb_shader = NULL;
607 }
608 else {
609 if (variant->id != UTIL_BITMASK_INVALID_INDEX) {
610 SVGA_RETRY(svga, SVGA3D_DestroyShader(svga->swc, variant->id,
611 variant->type));
612 util_bitmask_clear(svga->shader_id_bm, variant->id);
613 }
614 }
615
616 FREE(variant->signature);
617 FREE((unsigned *)variant->tokens);
618 FREE(variant);
619
620 svga->hud.num_shaders--;
621 }
622
623 /*
624 * Rebind shaders.
625 * Called at the beginning of every new command buffer to ensure that
626 * shaders are properly paged-in. Instead of sending the SetShader
627 * command, this function sends a private allocation command to
628 * page in a shader. This avoids emitting redundant state to the device
629 * just to page in a resource.
630 */
631 enum pipe_error
svga_rebind_shaders(struct svga_context * svga)632 svga_rebind_shaders(struct svga_context *svga)
633 {
634 struct svga_winsys_context *swc = svga->swc;
635 struct svga_hw_draw_state *hw = &svga->state.hw_draw;
636 enum pipe_error ret;
637
638 assert(svga_have_vgpu10(svga));
639
640 /**
641 * If the underlying winsys layer does not need resource rebinding,
642 * just clear the rebind flags and return.
643 */
644 if (swc->resource_rebind == NULL) {
645 svga->rebind.flags.vs = 0;
646 svga->rebind.flags.gs = 0;
647 svga->rebind.flags.fs = 0;
648 svga->rebind.flags.tcs = 0;
649 svga->rebind.flags.tes = 0;
650
651 return PIPE_OK;
652 }
653
654 if (svga->rebind.flags.vs && hw->vs && hw->vs->gb_shader) {
655 ret = swc->resource_rebind(swc, NULL, hw->vs->gb_shader, SVGA_RELOC_READ);
656 if (ret != PIPE_OK)
657 return ret;
658 }
659 svga->rebind.flags.vs = 0;
660
661 if (svga->rebind.flags.gs && hw->gs && hw->gs->gb_shader) {
662 ret = swc->resource_rebind(swc, NULL, hw->gs->gb_shader, SVGA_RELOC_READ);
663 if (ret != PIPE_OK)
664 return ret;
665 }
666 svga->rebind.flags.gs = 0;
667
668 if (svga->rebind.flags.fs && hw->fs && hw->fs->gb_shader) {
669 ret = swc->resource_rebind(swc, NULL, hw->fs->gb_shader, SVGA_RELOC_READ);
670 if (ret != PIPE_OK)
671 return ret;
672 }
673 svga->rebind.flags.fs = 0;
674
675 if (svga->rebind.flags.tcs && hw->tcs && hw->tcs->gb_shader) {
676 ret = swc->resource_rebind(swc, NULL, hw->tcs->gb_shader, SVGA_RELOC_READ);
677 if (ret != PIPE_OK)
678 return ret;
679 }
680 svga->rebind.flags.tcs = 0;
681
682 if (svga->rebind.flags.tes && hw->tes && hw->tes->gb_shader) {
683 ret = swc->resource_rebind(swc, NULL, hw->tes->gb_shader, SVGA_RELOC_READ);
684 if (ret != PIPE_OK)
685 return ret;
686 }
687 svga->rebind.flags.tes = 0;
688
689 return PIPE_OK;
690 }
691