1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "nir/tgsi_to_nir.h"
35 #include "compiler/v3d_compiler.h"
36 #include "vc5_context.h"
37 #include "broadcom/cle/v3d_packet_v33_pack.h"
38
39 static gl_varying_slot
vc5_get_slot_for_driver_location(nir_shader * s,uint32_t driver_location)40 vc5_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
41 {
42 nir_foreach_variable(var, &s->outputs) {
43 if (var->data.driver_location == driver_location) {
44 return var->data.location;
45 }
46 }
47
48 return -1;
49 }
50
51 static void
vc5_set_transform_feedback_outputs(struct vc5_uncompiled_shader * so,const struct pipe_stream_output_info * stream_output)52 vc5_set_transform_feedback_outputs(struct vc5_uncompiled_shader *so,
53 const struct pipe_stream_output_info *stream_output)
54 {
55 if (!stream_output->num_outputs)
56 return;
57
58 struct v3d_varying_slot slots[PIPE_MAX_SO_OUTPUTS * 4];
59 int slot_count = 0;
60
61 for (int buffer = 0; buffer < PIPE_MAX_SO_BUFFERS; buffer++) {
62 uint32_t buffer_offset = 0;
63 uint32_t vpm_start = slot_count;
64
65 for (int i = 0; i < stream_output->num_outputs; i++) {
66 const struct pipe_stream_output *output =
67 &stream_output->output[i];
68
69 if (output->output_buffer != buffer)
70 continue;
71
72 /* We assume that the SO outputs appear in increasing
73 * order in the buffer.
74 */
75 assert(output->dst_offset >= buffer_offset);
76
77 /* Pad any undefined slots in the output */
78 for (int j = buffer_offset; j < output->dst_offset; j++) {
79 slots[slot_count] =
80 v3d_slot_from_slot_and_component(VARYING_SLOT_POS, 0);
81 slot_count++;
82 buffer_offset++;
83 }
84
85 /* Set the coordinate shader up to output the
86 * components of this varying.
87 */
88 for (int j = 0; j < output->num_components; j++) {
89 gl_varying_slot slot =
90 vc5_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
91
92 slots[slot_count] =
93 v3d_slot_from_slot_and_component(slot,
94 output->start_component + j);
95 slot_count++;
96 buffer_offset++;
97 }
98 }
99
100 uint32_t vpm_size = slot_count - vpm_start;
101 if (!vpm_size)
102 continue;
103
104 struct V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked = {
105 /* We need the offset from the coordinate shader's VPM
106 * output block, which has the [X, Y, Z, W, Xs, Ys]
107 * values at the start. Note that this will need some
108 * shifting when PSIZ is also present.
109 */
110 .first_shaded_vertex_value_to_output = vpm_start + 6,
111 .number_of_consecutive_vertex_values_to_output_as_32_bit_values_minus_1 = vpm_size - 1,
112 .output_buffer_to_write_to = buffer,
113 };
114 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
115 (void *)&so->tf_specs[so->num_tf_specs++],
116 &unpacked);
117 }
118
119 so->num_tf_outputs = slot_count;
120 so->tf_outputs = ralloc_array(so->base.ir.nir, struct v3d_varying_slot,
121 slot_count);
122 memcpy(so->tf_outputs, slots, sizeof(*slots) * slot_count);
123 }
124
125 static int
type_size(const struct glsl_type * type)126 type_size(const struct glsl_type *type)
127 {
128 return glsl_count_attribute_slots(type, false);
129 }
130
131 static void *
vc5_shader_state_create(struct pipe_context * pctx,const struct pipe_shader_state * cso)132 vc5_shader_state_create(struct pipe_context *pctx,
133 const struct pipe_shader_state *cso)
134 {
135 struct vc5_context *vc5 = vc5_context(pctx);
136 struct vc5_uncompiled_shader *so = CALLOC_STRUCT(vc5_uncompiled_shader);
137 if (!so)
138 return NULL;
139
140 so->program_id = vc5->next_uncompiled_program_id++;
141
142 nir_shader *s;
143
144 if (cso->type == PIPE_SHADER_IR_NIR) {
145 /* The backend takes ownership of the NIR shader on state
146 * creation.
147 */
148 s = cso->ir.nir;
149
150 NIR_PASS_V(s, nir_lower_io, nir_var_all, type_size,
151 (nir_lower_io_options)0);
152 } else {
153 assert(cso->type == PIPE_SHADER_IR_TGSI);
154
155 if (V3D_DEBUG & V3D_DEBUG_TGSI) {
156 fprintf(stderr, "prog %d TGSI:\n",
157 so->program_id);
158 tgsi_dump(cso->tokens, 0);
159 fprintf(stderr, "\n");
160 }
161 s = tgsi_to_nir(cso->tokens, &v3d_nir_options);
162 }
163
164 NIR_PASS_V(s, nir_opt_global_to_local);
165 NIR_PASS_V(s, nir_lower_regs_to_ssa);
166 NIR_PASS_V(s, nir_normalize_cubemap_coords);
167
168 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
169
170 v3d_optimize_nir(s);
171
172 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
173
174 /* Garbage collect dead instructions */
175 nir_sweep(s);
176
177 so->base.type = PIPE_SHADER_IR_NIR;
178 so->base.ir.nir = s;
179
180 vc5_set_transform_feedback_outputs(so, &cso->stream_output);
181
182 if (V3D_DEBUG & (V3D_DEBUG_NIR |
183 v3d_debug_flag_for_shader_stage(s->info.stage))) {
184 fprintf(stderr, "%s prog %d NIR:\n",
185 gl_shader_stage_name(s->info.stage),
186 so->program_id);
187 nir_print_shader(s, stderr);
188 fprintf(stderr, "\n");
189 }
190
191 return so;
192 }
193
194 static struct vc5_compiled_shader *
vc5_get_compiled_shader(struct vc5_context * vc5,struct v3d_key * key)195 vc5_get_compiled_shader(struct vc5_context *vc5, struct v3d_key *key)
196 {
197 struct vc5_uncompiled_shader *shader_state = key->shader_state;
198 nir_shader *s = shader_state->base.ir.nir;
199
200 struct hash_table *ht;
201 uint32_t key_size;
202 if (s->info.stage == MESA_SHADER_FRAGMENT) {
203 ht = vc5->fs_cache;
204 key_size = sizeof(struct v3d_fs_key);
205 } else {
206 ht = vc5->vs_cache;
207 key_size = sizeof(struct v3d_vs_key);
208 }
209
210 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
211 if (entry)
212 return entry->data;
213
214 struct vc5_compiled_shader *shader =
215 rzalloc(NULL, struct vc5_compiled_shader);
216
217 int program_id = shader_state->program_id;
218 int variant_id =
219 p_atomic_inc_return(&shader_state->compiled_variant_count);
220 uint64_t *qpu_insts;
221 uint32_t shader_size;
222
223 switch (s->info.stage) {
224 case MESA_SHADER_VERTEX:
225 shader->prog_data.vs = rzalloc(shader, struct v3d_vs_prog_data);
226
227 qpu_insts = v3d_compile_vs(vc5->screen->compiler,
228 (struct v3d_vs_key *)key,
229 shader->prog_data.vs, s,
230 program_id, variant_id,
231 &shader_size);
232 break;
233 case MESA_SHADER_FRAGMENT:
234 shader->prog_data.fs = rzalloc(shader, struct v3d_fs_prog_data);
235
236 qpu_insts = v3d_compile_fs(vc5->screen->compiler,
237 (struct v3d_fs_key *)key,
238 shader->prog_data.fs, s,
239 program_id, variant_id,
240 &shader_size);
241 break;
242 default:
243 unreachable("bad stage");
244 }
245
246 vc5_set_shader_uniform_dirty_flags(shader);
247
248 shader->bo = vc5_bo_alloc(vc5->screen, shader_size, "shader");
249 vc5_bo_map(shader->bo);
250 memcpy(shader->bo->map, qpu_insts, shader_size);
251
252 free(qpu_insts);
253
254 struct vc5_key *dup_key;
255 dup_key = ralloc_size(shader, key_size);
256 memcpy(dup_key, key, key_size);
257 _mesa_hash_table_insert(ht, dup_key, shader);
258
259 return shader;
260 }
261
262 static void
vc5_setup_shared_key(struct vc5_context * vc5,struct v3d_key * key,struct vc5_texture_stateobj * texstate)263 vc5_setup_shared_key(struct vc5_context *vc5, struct v3d_key *key,
264 struct vc5_texture_stateobj *texstate)
265 {
266 const struct v3d_device_info *devinfo = &vc5->screen->devinfo;
267
268 for (int i = 0; i < texstate->num_textures; i++) {
269 struct pipe_sampler_view *sampler = texstate->textures[i];
270 struct vc5_sampler_view *vc5_sampler = vc5_sampler_view(sampler);
271 struct pipe_sampler_state *sampler_state =
272 texstate->samplers[i];
273
274 if (!sampler)
275 continue;
276
277 key->tex[i].return_size =
278 vc5_get_tex_return_size(devinfo,
279 sampler->format,
280 sampler_state->compare_mode);
281
282 /* For 16-bit, we set up the sampler to always return 2
283 * channels (meaning no recompiles for most statechanges),
284 * while for 32 we actually scale the returns with channels.
285 */
286 if (key->tex[i].return_size == 16) {
287 key->tex[i].return_channels = 2;
288 } else if (devinfo->ver > 40) {
289 key->tex[i].return_channels = 4;
290 } else {
291 key->tex[i].return_channels =
292 vc5_get_tex_return_channels(devinfo,
293 sampler->format);
294 }
295
296 if (key->tex[i].return_size == 32 && devinfo->ver < 40) {
297 memcpy(key->tex[i].swizzle,
298 vc5_sampler->swizzle,
299 sizeof(vc5_sampler->swizzle));
300 } else {
301 /* For 16-bit returns, we let the sampler state handle
302 * the swizzle.
303 */
304 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
305 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
306 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
307 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
308 }
309
310 if (sampler->texture->nr_samples > 1) {
311 key->tex[i].msaa_width = sampler->texture->width0;
312 key->tex[i].msaa_height = sampler->texture->height0;
313 } else if (sampler){
314 key->tex[i].compare_mode = sampler_state->compare_mode;
315 key->tex[i].compare_func = sampler_state->compare_func;
316 key->tex[i].clamp_s =
317 sampler_state->wrap_s == PIPE_TEX_WRAP_CLAMP;
318 key->tex[i].clamp_t =
319 sampler_state->wrap_t == PIPE_TEX_WRAP_CLAMP;
320 key->tex[i].clamp_r =
321 sampler_state->wrap_r == PIPE_TEX_WRAP_CLAMP;
322 }
323 }
324
325 key->ucp_enables = vc5->rasterizer->base.clip_plane_enable;
326 }
327
328 static void
vc5_update_compiled_fs(struct vc5_context * vc5,uint8_t prim_mode)329 vc5_update_compiled_fs(struct vc5_context *vc5, uint8_t prim_mode)
330 {
331 struct vc5_job *job = vc5->job;
332 struct v3d_fs_key local_key;
333 struct v3d_fs_key *key = &local_key;
334
335 if (!(vc5->dirty & (VC5_DIRTY_PRIM_MODE |
336 VC5_DIRTY_BLEND |
337 VC5_DIRTY_FRAMEBUFFER |
338 VC5_DIRTY_ZSA |
339 VC5_DIRTY_RASTERIZER |
340 VC5_DIRTY_SAMPLE_MASK |
341 VC5_DIRTY_FRAGTEX |
342 VC5_DIRTY_UNCOMPILED_FS))) {
343 return;
344 }
345
346 memset(key, 0, sizeof(*key));
347 vc5_setup_shared_key(vc5, &key->base, &vc5->fragtex);
348 key->base.shader_state = vc5->prog.bind_fs;
349 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
350 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
351 prim_mode <= PIPE_PRIM_LINE_STRIP);
352 key->clamp_color = vc5->rasterizer->base.clamp_fragment_color;
353 if (vc5->blend->logicop_enable) {
354 key->logicop_func = vc5->blend->logicop_func;
355 } else {
356 key->logicop_func = PIPE_LOGICOP_COPY;
357 }
358 if (job->msaa) {
359 key->msaa = vc5->rasterizer->base.multisample;
360 key->sample_coverage = (vc5->rasterizer->base.multisample &&
361 vc5->sample_mask != (1 << VC5_MAX_SAMPLES) - 1);
362 key->sample_alpha_to_coverage = vc5->blend->alpha_to_coverage;
363 key->sample_alpha_to_one = vc5->blend->alpha_to_one;
364 }
365
366 key->depth_enabled = (vc5->zsa->base.depth.enabled ||
367 vc5->zsa->base.stencil[0].enabled);
368 if (vc5->zsa->base.alpha.enabled) {
369 key->alpha_test = true;
370 key->alpha_test_func = vc5->zsa->base.alpha.func;
371 }
372
373 /* gl_FragColor's propagation to however many bound color buffers
374 * there are means that the buffer count needs to be in the key.
375 */
376 key->nr_cbufs = vc5->framebuffer.nr_cbufs;
377 key->swap_color_rb = vc5->swap_color_rb;
378
379 for (int i = 0; i < key->nr_cbufs; i++) {
380 struct pipe_surface *cbuf = vc5->framebuffer.cbufs[i];
381 const struct util_format_description *desc =
382 util_format_description(cbuf->format);
383
384 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT &&
385 desc->channel[0].size == 32) {
386 key->f32_color_rb |= 1 << i;
387 }
388 }
389
390 if (key->is_points) {
391 key->point_sprite_mask =
392 vc5->rasterizer->base.sprite_coord_enable;
393 key->point_coord_upper_left =
394 (vc5->rasterizer->base.sprite_coord_mode ==
395 PIPE_SPRITE_COORD_UPPER_LEFT);
396 }
397
398 key->light_twoside = vc5->rasterizer->base.light_twoside;
399 key->shade_model_flat = vc5->rasterizer->base.flatshade;
400
401 struct vc5_compiled_shader *old_fs = vc5->prog.fs;
402 vc5->prog.fs = vc5_get_compiled_shader(vc5, &key->base);
403 if (vc5->prog.fs == old_fs)
404 return;
405
406 vc5->dirty |= VC5_DIRTY_COMPILED_FS;
407
408 if (old_fs &&
409 vc5->prog.fs->prog_data.fs->flat_shade_flags !=
410 old_fs->prog_data.fs->flat_shade_flags) {
411 vc5->dirty |= VC5_DIRTY_FLAT_SHADE_FLAGS;
412 }
413
414 if (old_fs && memcmp(vc5->prog.fs->prog_data.fs->input_slots,
415 old_fs->prog_data.fs->input_slots,
416 sizeof(vc5->prog.fs->prog_data.fs->input_slots))) {
417 vc5->dirty |= VC5_DIRTY_FS_INPUTS;
418 }
419 }
420
421 static void
vc5_update_compiled_vs(struct vc5_context * vc5,uint8_t prim_mode)422 vc5_update_compiled_vs(struct vc5_context *vc5, uint8_t prim_mode)
423 {
424 struct v3d_vs_key local_key;
425 struct v3d_vs_key *key = &local_key;
426
427 if (!(vc5->dirty & (VC5_DIRTY_PRIM_MODE |
428 VC5_DIRTY_RASTERIZER |
429 VC5_DIRTY_VERTTEX |
430 VC5_DIRTY_VTXSTATE |
431 VC5_DIRTY_UNCOMPILED_VS |
432 VC5_DIRTY_FS_INPUTS))) {
433 return;
434 }
435
436 memset(key, 0, sizeof(*key));
437 vc5_setup_shared_key(vc5, &key->base, &vc5->verttex);
438 key->base.shader_state = vc5->prog.bind_vs;
439 key->num_fs_inputs = vc5->prog.fs->prog_data.fs->base.num_inputs;
440 STATIC_ASSERT(sizeof(key->fs_inputs) ==
441 sizeof(vc5->prog.fs->prog_data.fs->input_slots));
442 memcpy(key->fs_inputs, vc5->prog.fs->prog_data.fs->input_slots,
443 sizeof(key->fs_inputs));
444 key->clamp_color = vc5->rasterizer->base.clamp_vertex_color;
445
446 key->per_vertex_point_size =
447 (prim_mode == PIPE_PRIM_POINTS &&
448 vc5->rasterizer->base.point_size_per_vertex);
449
450 struct vc5_compiled_shader *vs =
451 vc5_get_compiled_shader(vc5, &key->base);
452 if (vs != vc5->prog.vs) {
453 vc5->prog.vs = vs;
454 vc5->dirty |= VC5_DIRTY_COMPILED_VS;
455 }
456
457 key->is_coord = true;
458 /* Coord shaders only output varyings used by transform feedback. */
459 struct vc5_uncompiled_shader *shader_state = key->base.shader_state;
460 memcpy(key->fs_inputs, shader_state->tf_outputs,
461 sizeof(*key->fs_inputs) * shader_state->num_tf_outputs);
462 if (shader_state->num_tf_outputs < key->num_fs_inputs) {
463 memset(&key->fs_inputs[shader_state->num_tf_outputs],
464 0,
465 sizeof(*key->fs_inputs) * (key->num_fs_inputs -
466 shader_state->num_tf_outputs));
467 }
468 key->num_fs_inputs = shader_state->num_tf_outputs;
469
470 struct vc5_compiled_shader *cs =
471 vc5_get_compiled_shader(vc5, &key->base);
472 if (cs != vc5->prog.cs) {
473 vc5->prog.cs = cs;
474 vc5->dirty |= VC5_DIRTY_COMPILED_CS;
475 }
476 }
477
478 void
vc5_update_compiled_shaders(struct vc5_context * vc5,uint8_t prim_mode)479 vc5_update_compiled_shaders(struct vc5_context *vc5, uint8_t prim_mode)
480 {
481 vc5_update_compiled_fs(vc5, prim_mode);
482 vc5_update_compiled_vs(vc5, prim_mode);
483 }
484
485 static uint32_t
fs_cache_hash(const void * key)486 fs_cache_hash(const void *key)
487 {
488 return _mesa_hash_data(key, sizeof(struct v3d_fs_key));
489 }
490
491 static uint32_t
vs_cache_hash(const void * key)492 vs_cache_hash(const void *key)
493 {
494 return _mesa_hash_data(key, sizeof(struct v3d_vs_key));
495 }
496
497 static bool
fs_cache_compare(const void * key1,const void * key2)498 fs_cache_compare(const void *key1, const void *key2)
499 {
500 return memcmp(key1, key2, sizeof(struct v3d_fs_key)) == 0;
501 }
502
503 static bool
vs_cache_compare(const void * key1,const void * key2)504 vs_cache_compare(const void *key1, const void *key2)
505 {
506 return memcmp(key1, key2, sizeof(struct v3d_vs_key)) == 0;
507 }
508
509 static void
delete_from_cache_if_matches(struct hash_table * ht,struct vc5_compiled_shader ** last_compile,struct hash_entry * entry,struct vc5_uncompiled_shader * so)510 delete_from_cache_if_matches(struct hash_table *ht,
511 struct vc5_compiled_shader **last_compile,
512 struct hash_entry *entry,
513 struct vc5_uncompiled_shader *so)
514 {
515 const struct v3d_key *key = entry->key;
516
517 if (key->shader_state == so) {
518 struct vc5_compiled_shader *shader = entry->data;
519 _mesa_hash_table_remove(ht, entry);
520 vc5_bo_unreference(&shader->bo);
521
522 if (shader == *last_compile)
523 *last_compile = NULL;
524
525 ralloc_free(shader);
526 }
527 }
528
529 static void
vc5_shader_state_delete(struct pipe_context * pctx,void * hwcso)530 vc5_shader_state_delete(struct pipe_context *pctx, void *hwcso)
531 {
532 struct vc5_context *vc5 = vc5_context(pctx);
533 struct vc5_uncompiled_shader *so = hwcso;
534
535 struct hash_entry *entry;
536 hash_table_foreach(vc5->fs_cache, entry) {
537 delete_from_cache_if_matches(vc5->fs_cache, &vc5->prog.fs,
538 entry, so);
539 }
540 hash_table_foreach(vc5->vs_cache, entry) {
541 delete_from_cache_if_matches(vc5->vs_cache, &vc5->prog.vs,
542 entry, so);
543 }
544
545 ralloc_free(so->base.ir.nir);
546 free(so);
547 }
548
549 static void
vc5_fp_state_bind(struct pipe_context * pctx,void * hwcso)550 vc5_fp_state_bind(struct pipe_context *pctx, void *hwcso)
551 {
552 struct vc5_context *vc5 = vc5_context(pctx);
553 vc5->prog.bind_fs = hwcso;
554 vc5->dirty |= VC5_DIRTY_UNCOMPILED_FS;
555 }
556
557 static void
vc5_vp_state_bind(struct pipe_context * pctx,void * hwcso)558 vc5_vp_state_bind(struct pipe_context *pctx, void *hwcso)
559 {
560 struct vc5_context *vc5 = vc5_context(pctx);
561 vc5->prog.bind_vs = hwcso;
562 vc5->dirty |= VC5_DIRTY_UNCOMPILED_VS;
563 }
564
565 void
vc5_program_init(struct pipe_context * pctx)566 vc5_program_init(struct pipe_context *pctx)
567 {
568 struct vc5_context *vc5 = vc5_context(pctx);
569
570 pctx->create_vs_state = vc5_shader_state_create;
571 pctx->delete_vs_state = vc5_shader_state_delete;
572
573 pctx->create_fs_state = vc5_shader_state_create;
574 pctx->delete_fs_state = vc5_shader_state_delete;
575
576 pctx->bind_fs_state = vc5_fp_state_bind;
577 pctx->bind_vs_state = vc5_vp_state_bind;
578
579 vc5->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
580 fs_cache_compare);
581 vc5->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
582 vs_cache_compare);
583 }
584
585 void
vc5_program_fini(struct pipe_context * pctx)586 vc5_program_fini(struct pipe_context *pctx)
587 {
588 struct vc5_context *vc5 = vc5_context(pctx);
589
590 struct hash_entry *entry;
591 hash_table_foreach(vc5->fs_cache, entry) {
592 struct vc5_compiled_shader *shader = entry->data;
593 vc5_bo_unreference(&shader->bo);
594 ralloc_free(shader);
595 _mesa_hash_table_remove(vc5->fs_cache, entry);
596 }
597
598 hash_table_foreach(vc5->vs_cache, entry) {
599 struct vc5_compiled_shader *shader = entry->data;
600 vc5_bo_unreference(&shader->bo);
601 ralloc_free(shader);
602 _mesa_hash_table_remove(vc5->vs_cache, entry);
603 }
604 }
605