• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014-2017 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <inttypes.h>
25 #include "util/format/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "util/u_upload_mgr.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "compiler/nir/nir_serialize.h"
35 #include "nir/tgsi_to_nir.h"
36 #include "compiler/v3d_compiler.h"
37 #include "v3d_context.h"
38 /* packets here are the same across V3D versions. */
39 #include "broadcom/cle/v3d_packet_v42_pack.h"
40 
41 static struct v3d_compiled_shader *
42 v3d_get_compiled_shader(struct v3d_context *v3d,
43                         struct v3d_key *key, size_t key_size,
44                         struct v3d_uncompiled_shader *uncompiled);
45 
46 static void
47 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
48                                 struct v3d_key *key);
49 
50 static gl_varying_slot
v3d_get_slot_for_driver_location(nir_shader * s,uint32_t driver_location)51 v3d_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
52 {
53         nir_foreach_shader_out_variable(var, s) {
54                 if (var->data.driver_location == driver_location) {
55                         return var->data.location;
56                 }
57 
58                 /* For compact arrays, we have more than one location to
59                  * check.
60                  */
61                 if (var->data.compact) {
62                         assert(glsl_type_is_array(var->type));
63                         for (int i = 0; i < DIV_ROUND_UP(glsl_array_size(var->type), 4); i++) {
64                                 if ((var->data.driver_location + i) == driver_location) {
65                                         return var->data.location;
66                                 }
67                         }
68                 }
69         }
70 
71         return -1;
72 }
73 
74 /**
75  * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
76  *
77  * A shader can have 16 of these specs, and each one of them can write up to
78  * 16 dwords.  Since we allow a total of 64 transform feedback output
79  * components (not 16 vectors), we have to group the writes of multiple
80  * varyings together in a single data spec.
81  */
82 static void
v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader * so,const struct pipe_stream_output_info * stream_output)83 v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader *so,
84                                    const struct pipe_stream_output_info *stream_output)
85 {
86         if (!stream_output->num_outputs)
87                 return;
88 
89         struct v3d_varying_slot slots[PIPE_MAX_SO_OUTPUTS * 4];
90         int slot_count = 0;
91 
92         for (int buffer = 0; buffer < PIPE_MAX_SO_BUFFERS; buffer++) {
93                 uint32_t buffer_offset = 0;
94                 uint32_t vpm_start = slot_count;
95 
96                 for (int i = 0; i < stream_output->num_outputs; i++) {
97                         const struct pipe_stream_output *output =
98                                 &stream_output->output[i];
99 
100                         if (output->output_buffer != buffer)
101                                 continue;
102 
103                         /* We assume that the SO outputs appear in increasing
104                          * order in the buffer.
105                          */
106                         assert(output->dst_offset >= buffer_offset);
107 
108                         /* Pad any undefined slots in the output */
109                         for (int j = buffer_offset; j < output->dst_offset; j++) {
110                                 slots[slot_count] =
111                                         v3d_slot_from_slot_and_component(VARYING_SLOT_POS, 0);
112                                 slot_count++;
113                                 buffer_offset++;
114                         }
115 
116                         /* Set the coordinate shader up to output the
117                          * components of this varying.
118                          */
119                         for (int j = 0; j < output->num_components; j++) {
120                                 gl_varying_slot slot =
121                                         v3d_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
122 
123                                 slots[slot_count] =
124                                         v3d_slot_from_slot_and_component(slot,
125                                                                          output->start_component + j);
126                                 slot_count++;
127                                 buffer_offset++;
128                         }
129                 }
130 
131                 uint32_t vpm_size = slot_count - vpm_start;
132                 if (!vpm_size)
133                         continue;
134 
135                 uint32_t vpm_start_offset = vpm_start + 6;
136 
137                 while (vpm_size) {
138                         uint32_t write_size = MIN2(vpm_size, 1 << 4);
139 
140                         struct V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked = {
141                                 /* We need the offset from the coordinate shader's VPM
142                                  * output block, which has the [X, Y, Z, W, Xs, Ys]
143                                  * values at the start.
144                                  */
145                                 .first_shaded_vertex_value_to_output = vpm_start_offset,
146                                 .number_of_consecutive_vertex_values_to_output_as_32_bit_values = write_size,
147                                 .output_buffer_to_write_to = buffer,
148                         };
149 
150                         /* GFXH-1559 */
151                         assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
152                                so->num_tf_specs != 0);
153 
154                         assert(so->num_tf_specs != ARRAY_SIZE(so->tf_specs));
155                         V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
156                                                                        (void *)&so->tf_specs[so->num_tf_specs],
157                                                                        &unpacked);
158 
159                         /* If point size is being written by the shader, then
160                          * all the VPM start offsets are shifted up by one.
161                          * We won't know that until the variant is compiled,
162                          * though.
163                          */
164                         unpacked.first_shaded_vertex_value_to_output++;
165 
166                         /* GFXH-1559 */
167                         assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
168                                so->num_tf_specs != 0);
169 
170                         V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
171                                                                        (void *)&so->tf_specs_psiz[so->num_tf_specs],
172                                                                        &unpacked);
173                         so->num_tf_specs++;
174                         vpm_start_offset += write_size;
175                         vpm_size -= write_size;
176                 }
177                 so->base.stream_output.stride[buffer] =
178                         stream_output->stride[buffer];
179         }
180 
181         so->num_tf_outputs = slot_count;
182         so->tf_outputs = ralloc_array(so->base.ir.nir, struct v3d_varying_slot,
183                                       slot_count);
184         memcpy(so->tf_outputs, slots, sizeof(*slots) * slot_count);
185 }
186 
187 static int
type_size(const struct glsl_type * type,bool bindless)188 type_size(const struct glsl_type *type, bool bindless)
189 {
190         return glsl_count_attribute_slots(type, false);
191 }
192 
193 static void
precompile_all_outputs(nir_shader * s,struct v3d_varying_slot * outputs,uint8_t * num_outputs)194 precompile_all_outputs(nir_shader *s,
195                        struct v3d_varying_slot *outputs,
196                        uint8_t *num_outputs)
197 {
198         nir_foreach_shader_out_variable(var, s) {
199                 const int array_len = MAX2(glsl_get_length(var->type), 1);
200                 for (int j = 0; j < array_len; j++) {
201                         const int slot = var->data.location + j;
202                         const int num_components =
203                                 glsl_get_components(var->type);
204                         for (int i = 0; i < num_components; i++) {
205                                 const int swiz = var->data.location_frac + i;
206                                 outputs[(*num_outputs)++] =
207                                         v3d_slot_from_slot_and_component(slot,
208                                                                          swiz);
209                         }
210                 }
211         }
212 }
213 
214 /**
215  * Precompiles a shader variant at shader state creation time if
216  * V3D_DEBUG=precompile is set.  Used for shader-db
217  * (https://gitlab.freedesktop.org/mesa/shader-db)
218  */
219 static void
v3d_shader_precompile(struct v3d_context * v3d,struct v3d_uncompiled_shader * so)220 v3d_shader_precompile(struct v3d_context *v3d,
221                       struct v3d_uncompiled_shader *so)
222 {
223         nir_shader *s = so->base.ir.nir;
224 
225         if (s->info.stage == MESA_SHADER_FRAGMENT) {
226                 struct v3d_fs_key key = {
227                 };
228 
229                 nir_foreach_shader_out_variable(var, s) {
230                         if (var->data.location == FRAG_RESULT_COLOR) {
231                                 key.cbufs |= 1 << 0;
232                         } else if (var->data.location >= FRAG_RESULT_DATA0) {
233                                 key.cbufs |= 1 << (var->data.location -
234                                                    FRAG_RESULT_DATA0);
235                         }
236                 }
237 
238                 key.logicop_func = PIPE_LOGICOP_COPY;
239 
240                 v3d_setup_shared_precompile_key(so, &key.base);
241                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
242         } else if (s->info.stage == MESA_SHADER_GEOMETRY) {
243                 struct v3d_gs_key key = {
244                         .base.is_last_geometry_stage = true,
245                 };
246 
247                 v3d_setup_shared_precompile_key(so, &key.base);
248 
249                 precompile_all_outputs(s,
250                                        key.used_outputs,
251                                        &key.num_used_outputs);
252 
253                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
254 
255                 /* Compile GS bin shader: only position (XXX: include TF) */
256                 key.is_coord = true;
257                 key.num_used_outputs = 0;
258                 for (int i = 0; i < 4; i++) {
259                         key.used_outputs[key.num_used_outputs++] =
260                                 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
261                                                                  i);
262                 }
263                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
264         } else {
265                 assert(s->info.stage == MESA_SHADER_VERTEX);
266                 struct v3d_vs_key key = {
267                         /* Emit fixed function outputs */
268                         .base.is_last_geometry_stage = true,
269                 };
270 
271                 v3d_setup_shared_precompile_key(so, &key.base);
272 
273                 precompile_all_outputs(s,
274                                        key.used_outputs,
275                                        &key.num_used_outputs);
276 
277                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
278 
279                 /* Compile VS bin shader: only position (XXX: include TF) */
280                 key.is_coord = true;
281                 key.num_used_outputs = 0;
282                 for (int i = 0; i < 4; i++) {
283                         key.used_outputs[key.num_used_outputs++] =
284                                 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
285                                                                  i);
286                 }
287                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
288         }
289 }
290 
291 static bool
lower_uniform_offset_to_bytes_cb(nir_builder * b,nir_intrinsic_instr * intr,void * _state)292 lower_uniform_offset_to_bytes_cb(nir_builder *b, nir_intrinsic_instr *intr,
293                                  void *_state)
294 {
295         if (intr->intrinsic != nir_intrinsic_load_uniform)
296                 return false;
297 
298         b->cursor = nir_before_instr(&intr->instr);
299         nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 16);
300         nir_src_rewrite(&intr->src[0], nir_ishl_imm(b, intr->src[0].ssa, 4));
301         return true;
302 }
303 
304 static bool
lower_textures_cb(nir_builder * b,nir_instr * instr,void * _state)305 lower_textures_cb(nir_builder *b, nir_instr *instr, void *_state)
306 {
307         if (instr->type != nir_instr_type_tex)
308                 return false;
309 
310         nir_tex_instr *tex = nir_instr_as_tex(instr);
311         if (nir_tex_instr_need_sampler(tex))
312                 return false;
313 
314         /* Use the texture index as sampler index for the purposes of
315          * lower_tex_packing, since in GL we currently make packing
316          * decisions based on texture format.
317          */
318         tex->backend_flags = tex->texture_index;
319         return true;
320 }
321 
322 static bool
v3d_nir_lower_uniform_offset_to_bytes(nir_shader * s)323 v3d_nir_lower_uniform_offset_to_bytes(nir_shader *s)
324 {
325         return nir_shader_intrinsics_pass(s, lower_uniform_offset_to_bytes_cb,
326                                             nir_metadata_block_index |
327                                             nir_metadata_dominance, NULL);
328 }
329 
330 static bool
v3d_nir_lower_textures(nir_shader * s)331 v3d_nir_lower_textures(nir_shader *s)
332 {
333         return nir_shader_instructions_pass(s, lower_textures_cb,
334                                             nir_metadata_block_index |
335                                             nir_metadata_dominance, NULL);
336 }
337 
338 static void *
v3d_uncompiled_shader_create(struct pipe_context * pctx,enum pipe_shader_ir type,void * ir)339 v3d_uncompiled_shader_create(struct pipe_context *pctx,
340                              enum pipe_shader_ir type, void *ir)
341 {
342         struct v3d_context *v3d = v3d_context(pctx);
343         struct v3d_uncompiled_shader *so = CALLOC_STRUCT(v3d_uncompiled_shader);
344         if (!so)
345                 return NULL;
346 
347         so->program_id = v3d->next_uncompiled_program_id++;
348 
349         nir_shader *s;
350 
351         if (type == PIPE_SHADER_IR_NIR) {
352                 /* The backend takes ownership of the NIR shader on state
353                  * creation.
354                  */
355                 s = ir;
356         } else {
357                 assert(type == PIPE_SHADER_IR_TGSI);
358 
359                 if (V3D_DBG(TGSI)) {
360                         fprintf(stderr, "prog %d TGSI:\n",
361                                 so->program_id);
362                         tgsi_dump(ir, 0);
363                         fprintf(stderr, "\n");
364                 }
365                 s = tgsi_to_nir(ir, pctx->screen, false);
366         }
367 
368         if (s->info.stage != MESA_SHADER_VERTEX &&
369             s->info.stage != MESA_SHADER_GEOMETRY) {
370                 NIR_PASS(_, s, nir_lower_io,
371                          nir_var_shader_in | nir_var_shader_out,
372                          type_size, (nir_lower_io_options)0);
373         }
374 
375         NIR_PASS(_, s, nir_normalize_cubemap_coords);
376 
377         NIR_PASS(_, s, nir_lower_load_const_to_scalar);
378 
379         v3d_optimize_nir(NULL, s);
380 
381         NIR_PASS(_, s, nir_lower_var_copies);
382 
383         /* Get rid of split copies */
384         v3d_optimize_nir(NULL, s);
385 
386         NIR_PASS(_, s, nir_remove_dead_variables, nir_var_function_temp, NULL);
387 
388         NIR_PASS(_, s, nir_lower_frexp);
389 
390         /* Since we can't expose PIPE_CAP_PACKED_UNIFORMS the state tracker
391          * will produce uniform intrinsics with offsets in vec4 units but
392          * our compiler expects to work in units of bytes.
393          */
394         NIR_PASS(_, s, v3d_nir_lower_uniform_offset_to_bytes);
395 
396         NIR_PASS(_, s, v3d_nir_lower_textures);
397 
398         /* Garbage collect dead instructions */
399         nir_sweep(s);
400 
401         so->base.type = PIPE_SHADER_IR_NIR;
402         so->base.ir.nir = s;
403 
404         /* Generate sha1 from NIR for caching */
405         struct blob blob;
406         blob_init(&blob);
407         nir_serialize(&blob, s, true);
408         assert(!blob.out_of_memory);
409         _mesa_sha1_compute(blob.data, blob.size, so->sha1);
410         blob_finish(&blob);
411 
412         if (V3D_DBG(NIR) || v3d_debug_flag_for_shader_stage(s->info.stage)) {
413                 fprintf(stderr, "%s prog %d NIR:\n",
414                         gl_shader_stage_name(s->info.stage),
415                         so->program_id);
416                 nir_print_shader(s, stderr);
417                 fprintf(stderr, "\n");
418         }
419 
420         if (V3D_DBG(PRECOMPILE))
421                 v3d_shader_precompile(v3d, so);
422 
423         return so;
424 }
425 
426 static void
v3d_shader_debug_output(const char * message,void * data)427 v3d_shader_debug_output(const char *message, void *data)
428 {
429         struct pipe_context *ctx = data;
430 
431         util_debug_message(&ctx->debug, SHADER_INFO, "%s", message);
432 }
433 
434 static void *
v3d_shader_state_create(struct pipe_context * pctx,const struct pipe_shader_state * cso)435 v3d_shader_state_create(struct pipe_context *pctx,
436                         const struct pipe_shader_state *cso)
437 {
438         struct v3d_uncompiled_shader *so =
439                 v3d_uncompiled_shader_create(pctx,
440                                              cso->type,
441                                              (cso->type == PIPE_SHADER_IR_TGSI ?
442                                               (void *)cso->tokens :
443                                               cso->ir.nir));
444 
445         v3d_set_transform_feedback_outputs(so, &cso->stream_output);
446 
447         return so;
448 }
449 
450 /* Key ued with the RAM cache */
451 struct v3d_cache_key {
452         struct v3d_key *key;
453         unsigned char sha1[20];
454 };
455 
456 struct v3d_compiled_shader *
v3d_get_compiled_shader(struct v3d_context * v3d,struct v3d_key * key,size_t key_size,struct v3d_uncompiled_shader * uncompiled)457 v3d_get_compiled_shader(struct v3d_context *v3d,
458                         struct v3d_key *key,
459                         size_t key_size,
460                         struct v3d_uncompiled_shader *uncompiled)
461 {
462         nir_shader *s = uncompiled->base.ir.nir;
463         struct hash_table *ht = v3d->prog.cache[s->info.stage];
464         struct v3d_cache_key cache_key;
465         cache_key.key = key;
466         memcpy(cache_key.sha1, uncompiled->sha1, sizeof(cache_key.sha1));
467         struct hash_entry *entry = _mesa_hash_table_search(ht, &cache_key);
468         if (entry)
469                 return entry->data;
470 
471         int variant_id =
472                 p_atomic_inc_return(&uncompiled->compiled_variant_count);
473 
474         struct v3d_compiled_shader *shader = NULL;
475 
476 #ifdef ENABLE_SHADER_CACHE
477         shader = v3d_disk_cache_retrieve(v3d, key, uncompiled);
478 #endif
479         if (!shader) {
480                 shader = rzalloc(NULL, struct v3d_compiled_shader);
481 
482                 int program_id = uncompiled->program_id;
483                 uint64_t *qpu_insts;
484                 uint32_t shader_size;
485 
486                 qpu_insts = v3d_compile(v3d->screen->compiler, key,
487                                         &shader->prog_data.base, s,
488                                         v3d_shader_debug_output,
489                                         v3d,
490                                         program_id, variant_id, &shader_size);
491 
492                 /* qpu_insts being NULL can happen if the register allocation
493                  * failed. At this point we can't really trigger an OpenGL API
494                  * error, as the final compilation could happen on the draw
495                  * call. So let's at least assert, so debug builds finish at
496                  * this point.
497                  */
498                 assert(qpu_insts);
499                 ralloc_steal(shader, shader->prog_data.base);
500 
501                 if (shader_size) {
502                         u_upload_data(v3d->state_uploader, 0, shader_size, 8,
503                                       qpu_insts, &shader->offset, &shader->resource);
504                 }
505 
506 #ifdef ENABLE_SHADER_CACHE
507                 v3d_disk_cache_store(v3d, key, uncompiled,
508                                      shader, qpu_insts, shader_size);
509 #endif
510 
511                 free(qpu_insts);
512         }
513 
514         v3d_set_shader_uniform_dirty_flags(shader);
515 
516         if (ht) {
517                 struct v3d_cache_key *dup_cache_key =
518                         ralloc_size(shader, sizeof(struct v3d_cache_key));
519                 dup_cache_key->key = ralloc_memdup(shader, cache_key.key,
520                                                    key_size);
521                 memcpy(dup_cache_key->sha1, cache_key.sha1 ,sizeof(dup_cache_key->sha1));
522                 _mesa_hash_table_insert(ht, dup_cache_key, shader);
523         }
524 
525         if (shader->prog_data.base->spill_size >
526             v3d->prog.spill_size_per_thread) {
527                 /* The TIDX register we use for choosing the area to access
528                  * for scratch space is: (core << 6) | (qpu << 2) | thread.
529                  * Even at minimum threadcount in a particular shader, that
530                  * means we still multiply by qpus by 4.
531                  */
532                 int total_spill_size = (v3d->screen->devinfo.qpu_count * 4 *
533                                         shader->prog_data.base->spill_size);
534 
535                 v3d_bo_unreference(&v3d->prog.spill_bo);
536                 v3d->prog.spill_bo = v3d_bo_alloc(v3d->screen,
537                                                   total_spill_size, "spill");
538                 v3d->prog.spill_size_per_thread =
539                         shader->prog_data.base->spill_size;
540         }
541 
542         return shader;
543 }
544 
545 static void
v3d_free_compiled_shader(struct v3d_compiled_shader * shader)546 v3d_free_compiled_shader(struct v3d_compiled_shader *shader)
547 {
548         pipe_resource_reference(&shader->resource, NULL);
549         ralloc_free(shader);
550 }
551 
552 static void
v3d_setup_shared_key(struct v3d_context * v3d,struct v3d_key * key,struct v3d_texture_stateobj * texstate)553 v3d_setup_shared_key(struct v3d_context *v3d, struct v3d_key *key,
554                      struct v3d_texture_stateobj *texstate)
555 {
556         const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
557 
558         key->num_tex_used = texstate->num_textures;
559         key->num_samplers_used = texstate->num_textures;
560         assert(key->num_tex_used == key->num_samplers_used);
561         for (int i = 0; i < texstate->num_textures; i++) {
562                 struct pipe_sampler_view *sampler = texstate->textures[i];
563 
564                 if (!sampler)
565                         continue;
566 
567                 key->sampler[i].return_size =
568                         v3d_get_tex_return_size(devinfo, sampler->format);
569 
570                 /* For 16-bit, we set up the sampler to always return 2
571                  * channels (meaning no recompiles for most statechanges),
572                  * while for 32 we actually scale the returns with channels.
573                  */
574                 if (key->sampler[i].return_size == 16) {
575                         key->sampler[i].return_channels = 2;
576                 } else {
577                         key->sampler[i].return_channels = 4;
578                 }
579 
580                 /* We let the sampler state handle the swizzle.
581                  */
582                 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
583                 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
584                 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
585                 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
586         }
587 }
588 
589 static void
v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader * uncompiled,struct v3d_key * key)590 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
591                                 struct v3d_key *key)
592 {
593         nir_shader *s = uncompiled->base.ir.nir;
594 
595         /* Note that below we access they key's texture and sampler fields
596          * using the same index. On OpenGL they are the same (they are
597          * combined)
598          */
599         key->num_tex_used = s->info.num_textures;
600         key->num_samplers_used = s->info.num_textures;
601         for (int i = 0; i < s->info.num_textures; i++) {
602                 key->sampler[i].return_size = 16;
603                 key->sampler[i].return_channels = 2;
604 
605                 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
606                 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
607                 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
608                 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
609         }
610 }
611 
612 static void
v3d_update_compiled_fs(struct v3d_context * v3d,uint8_t prim_mode)613 v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
614 {
615         struct v3d_job *job = v3d->job;
616         struct v3d_fs_key local_key;
617         struct v3d_fs_key *key = &local_key;
618         nir_shader *s = v3d->prog.bind_fs->base.ir.nir;
619 
620         if (!(v3d->dirty & (V3D_DIRTY_PRIM_MODE |
621                             V3D_DIRTY_BLEND |
622                             V3D_DIRTY_FRAMEBUFFER |
623                             V3D_DIRTY_ZSA |
624                             V3D_DIRTY_RASTERIZER |
625                             V3D_DIRTY_SAMPLE_STATE |
626                             V3D_DIRTY_FRAGTEX |
627                             V3D_DIRTY_UNCOMPILED_FS))) {
628                 return;
629         }
630 
631         memset(key, 0, sizeof(*key));
632         v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_FRAGMENT]);
633         key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
634         key->is_points = (prim_mode == MESA_PRIM_POINTS);
635         key->is_lines = (prim_mode >= MESA_PRIM_LINES &&
636                          prim_mode <= MESA_PRIM_LINE_STRIP);
637         key->line_smoothing = (key->is_lines &&
638                                v3d_line_smoothing_enabled(v3d));
639         key->has_gs = v3d->prog.bind_gs != NULL;
640         if (v3d->blend->base.logicop_enable) {
641                 key->logicop_func = v3d->blend->base.logicop_func;
642         } else {
643                 key->logicop_func = PIPE_LOGICOP_COPY;
644         }
645         if (job->msaa) {
646                 key->msaa = v3d->rasterizer->base.multisample;
647                 key->sample_alpha_to_coverage = v3d->blend->base.alpha_to_coverage;
648                 key->sample_alpha_to_one = v3d->blend->base.alpha_to_one;
649         }
650 
651         key->swap_color_rb = v3d->swap_color_rb;
652 
653         for (int i = 0; i < v3d->framebuffer.nr_cbufs; i++) {
654                 struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
655                 if (!cbuf)
656                         continue;
657 
658                 /* gl_FragColor's propagation to however many bound color
659                  * buffers there are means that the shader compile needs to
660                  * know what buffers are present.
661                  */
662                 key->cbufs |= 1 << i;
663 
664                 /* If logic operations are enabled then we might emit color
665                  * reads and we need to know the color buffer format and
666                  * swizzle for that.
667                  */
668                 if (key->logicop_func != PIPE_LOGICOP_COPY) {
669                         key->color_fmt[i].format = cbuf->format;
670                         memcpy(key->color_fmt[i].swizzle,
671                                v3d_get_format_swizzle(&v3d->screen->devinfo,
672                                                        cbuf->format),
673                                sizeof(key->color_fmt[i].swizzle));
674                 }
675 
676                 const struct util_format_description *desc =
677                         util_format_description(cbuf->format);
678 
679                 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT &&
680                     desc->channel[0].size == 32) {
681                         key->f32_color_rb |= 1 << i;
682                 }
683 
684                 if (s->info.fs.untyped_color_outputs) {
685                         if (util_format_is_pure_uint(cbuf->format))
686                                 key->uint_color_rb |= 1 << i;
687                         else if (util_format_is_pure_sint(cbuf->format))
688                                 key->int_color_rb |= 1 << i;
689                 }
690         }
691 
692         if (key->is_points) {
693                 key->point_sprite_mask =
694                         v3d->rasterizer->base.sprite_coord_enable;
695                 /* this is handled by lower_wpos_pntc */
696                 key->point_coord_upper_left = false;
697         }
698 
699         struct v3d_compiled_shader *old_fs = v3d->prog.fs;
700         v3d->prog.fs = v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
701                                                v3d->prog.bind_fs);
702         if (v3d->prog.fs == old_fs)
703                 return;
704 
705         v3d->dirty |= V3D_DIRTY_COMPILED_FS;
706 
707         if (old_fs) {
708                 if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
709                     old_fs->prog_data.fs->flat_shade_flags) {
710                         v3d->dirty |= V3D_DIRTY_FLAT_SHADE_FLAGS;
711                 }
712 
713                 if (v3d->prog.fs->prog_data.fs->noperspective_flags !=
714                     old_fs->prog_data.fs->noperspective_flags) {
715                         v3d->dirty |= V3D_DIRTY_NOPERSPECTIVE_FLAGS;
716                 }
717 
718                 if (v3d->prog.fs->prog_data.fs->centroid_flags !=
719                     old_fs->prog_data.fs->centroid_flags) {
720                         v3d->dirty |= V3D_DIRTY_CENTROID_FLAGS;
721                 }
722         }
723 
724         if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
725                              old_fs->prog_data.fs->input_slots,
726                              sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
727                 v3d->dirty |= V3D_DIRTY_FS_INPUTS;
728         }
729 }
730 
731 static void
v3d_update_compiled_gs(struct v3d_context * v3d,uint8_t prim_mode)732 v3d_update_compiled_gs(struct v3d_context *v3d, uint8_t prim_mode)
733 {
734         struct v3d_gs_key local_key;
735         struct v3d_gs_key *key = &local_key;
736 
737         if (!(v3d->dirty & (V3D_DIRTY_GEOMTEX |
738                             V3D_DIRTY_RASTERIZER |
739                             V3D_DIRTY_UNCOMPILED_GS |
740                             V3D_DIRTY_PRIM_MODE |
741                             V3D_DIRTY_FS_INPUTS))) {
742                 return;
743         }
744 
745         if (!v3d->prog.bind_gs) {
746                 v3d->prog.gs = NULL;
747                 v3d->prog.gs_bin = NULL;
748                 return;
749         }
750 
751         memset(key, 0, sizeof(*key));
752         v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_GEOMETRY]);
753         key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
754         key->base.is_last_geometry_stage = true;
755         key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
756         STATIC_ASSERT(sizeof(key->used_outputs) ==
757                       sizeof(v3d->prog.fs->prog_data.fs->input_slots));
758         memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
759                sizeof(key->used_outputs));
760 
761         key->per_vertex_point_size =
762                 (prim_mode == MESA_PRIM_POINTS &&
763                  v3d->rasterizer->base.point_size_per_vertex);
764 
765         struct v3d_uncompiled_shader *uncompiled = v3d->prog.bind_gs;
766         struct v3d_compiled_shader *gs =
767                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
768                                         uncompiled);
769         if (gs != v3d->prog.gs) {
770                 v3d->prog.gs = gs;
771                 v3d->dirty |= V3D_DIRTY_COMPILED_GS;
772         }
773 
774         key->is_coord = true;
775 
776         /* The last bin-mode shader in the geometry pipeline only outputs
777          * varyings used by transform feedback.
778          */
779         memcpy(key->used_outputs, uncompiled->tf_outputs,
780                sizeof(*key->used_outputs) * uncompiled->num_tf_outputs);
781         if (uncompiled->num_tf_outputs < key->num_used_outputs) {
782                 uint32_t size = sizeof(*key->used_outputs) *
783                                 (key->num_used_outputs -
784                                  uncompiled->num_tf_outputs);
785                 memset(&key->used_outputs[uncompiled->num_tf_outputs],
786                        0, size);
787         }
788         key->num_used_outputs = uncompiled->num_tf_outputs;
789 
790         struct v3d_compiled_shader *old_gs = v3d->prog.gs;
791         struct v3d_compiled_shader *gs_bin =
792                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
793                                         uncompiled);
794         if (gs_bin != old_gs) {
795                 v3d->prog.gs_bin = gs_bin;
796                 v3d->dirty |= V3D_DIRTY_COMPILED_GS_BIN;
797         }
798 
799         if (old_gs && memcmp(v3d->prog.gs->prog_data.gs->input_slots,
800                              old_gs->prog_data.gs->input_slots,
801                              sizeof(v3d->prog.gs->prog_data.gs->input_slots))) {
802                 v3d->dirty |= V3D_DIRTY_GS_INPUTS;
803         }
804 }
805 
806 static void
v3d_update_compiled_vs(struct v3d_context * v3d,uint8_t prim_mode)807 v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
808 {
809         struct v3d_vs_key local_key;
810         struct v3d_vs_key *key = &local_key;
811 
812         if (!(v3d->dirty & (V3D_DIRTY_VERTTEX |
813                             V3D_DIRTY_VTXSTATE |
814                             V3D_DIRTY_UNCOMPILED_VS |
815                             (v3d->prog.bind_gs ? 0 : V3D_DIRTY_RASTERIZER) |
816                             (v3d->prog.bind_gs ? 0 : V3D_DIRTY_PRIM_MODE) |
817                             (v3d->prog.bind_gs ? V3D_DIRTY_GS_INPUTS :
818                                                  V3D_DIRTY_FS_INPUTS)))) {
819                 return;
820         }
821 
822         memset(key, 0, sizeof(*key));
823         v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_VERTEX]);
824         key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
825         key->base.is_last_geometry_stage = !v3d->prog.bind_gs;
826 
827         if (!v3d->prog.bind_gs) {
828             key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
829             STATIC_ASSERT(sizeof(key->used_outputs) ==
830                           sizeof(v3d->prog.fs->prog_data.fs->input_slots));
831             memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
832                    sizeof(key->used_outputs));
833         } else {
834             key->num_used_outputs = v3d->prog.gs->prog_data.gs->num_inputs;
835             STATIC_ASSERT(sizeof(key->used_outputs) ==
836                           sizeof(v3d->prog.gs->prog_data.gs->input_slots));
837             memcpy(key->used_outputs, v3d->prog.gs->prog_data.gs->input_slots,
838                    sizeof(key->used_outputs));
839         }
840 
841         key->per_vertex_point_size =
842                 (prim_mode == MESA_PRIM_POINTS &&
843                  v3d->rasterizer->base.point_size_per_vertex);
844 
845         nir_shader *s = v3d->prog.bind_vs->base.ir.nir;
846         uint64_t inputs_read = s->info.inputs_read;
847         assert(util_bitcount(inputs_read) <= v3d->vtx->num_elements);
848 
849         while (inputs_read) {
850                 int location = u_bit_scan64(&inputs_read);
851                 nir_variable *var =
852                         nir_find_variable_with_location(s, nir_var_shader_in, location);
853                 assert (var != NULL);
854                 int driver_location = var->data.driver_location;
855                 switch (v3d->vtx->pipe[driver_location].src_format) {
856                 case PIPE_FORMAT_B8G8R8A8_UNORM:
857                 case PIPE_FORMAT_B10G10R10A2_UNORM:
858                 case PIPE_FORMAT_B10G10R10A2_SNORM:
859                 case PIPE_FORMAT_B10G10R10A2_USCALED:
860                 case PIPE_FORMAT_B10G10R10A2_SSCALED:
861                         key->va_swap_rb_mask |= 1 << location;
862                         break;
863                 default:
864                         break;
865                 }
866         }
867 
868         struct v3d_uncompiled_shader *shader_state = v3d->prog.bind_vs;
869         struct v3d_compiled_shader *vs =
870                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
871                                         shader_state);
872         if (vs != v3d->prog.vs) {
873                 v3d->prog.vs = vs;
874                 v3d->dirty |= V3D_DIRTY_COMPILED_VS;
875         }
876 
877         key->is_coord = true;
878 
879         /* Coord shaders only output varyings used by transform feedback,
880          * unless they are linked to other shaders in the geometry side
881          * of the pipeline, since in that case any of the output varyings
882          * could be required in later geometry stages to compute
883          * gl_Position or TF outputs.
884          */
885         if (!v3d->prog.bind_gs) {
886                 memcpy(key->used_outputs, shader_state->tf_outputs,
887                        sizeof(*key->used_outputs) *
888                        shader_state->num_tf_outputs);
889                 if (shader_state->num_tf_outputs < key->num_used_outputs) {
890                         uint32_t tail_bytes =
891                                 sizeof(*key->used_outputs) *
892                                 (key->num_used_outputs -
893                                  shader_state->num_tf_outputs);
894                         memset(&key->used_outputs[shader_state->num_tf_outputs],
895                                0, tail_bytes);
896                 }
897                 key->num_used_outputs = shader_state->num_tf_outputs;
898         } else {
899                 key->num_used_outputs = v3d->prog.gs_bin->prog_data.gs->num_inputs;
900                 STATIC_ASSERT(sizeof(key->used_outputs) ==
901                               sizeof(v3d->prog.gs_bin->prog_data.gs->input_slots));
902                 memcpy(key->used_outputs, v3d->prog.gs_bin->prog_data.gs->input_slots,
903                        sizeof(key->used_outputs));
904         }
905 
906         struct v3d_compiled_shader *cs =
907                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
908                                         shader_state);
909         if (cs != v3d->prog.cs) {
910                 v3d->prog.cs = cs;
911                 v3d->dirty |= V3D_DIRTY_COMPILED_CS;
912         }
913 }
914 
915 void
v3d_update_compiled_shaders(struct v3d_context * v3d,uint8_t prim_mode)916 v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode)
917 {
918         v3d_update_compiled_fs(v3d, prim_mode);
919         v3d_update_compiled_gs(v3d, prim_mode);
920         v3d_update_compiled_vs(v3d, prim_mode);
921 }
922 
923 void
v3d_update_compiled_cs(struct v3d_context * v3d)924 v3d_update_compiled_cs(struct v3d_context *v3d)
925 {
926         struct v3d_key local_key;
927         struct v3d_key *key = &local_key;
928 
929         if (!(v3d->dirty & (V3D_DIRTY_UNCOMPILED_CS |
930                             V3D_DIRTY_COMPTEX))) {
931                 return;
932         }
933 
934         memset(key, 0, sizeof(*key));
935         v3d_setup_shared_key(v3d, key, &v3d->tex[PIPE_SHADER_COMPUTE]);
936 
937         struct v3d_compiled_shader *cs =
938                 v3d_get_compiled_shader(v3d, key, sizeof(*key),
939                                         v3d->prog.bind_compute);
940         if (cs != v3d->prog.compute) {
941                 v3d->prog.compute = cs;
942                 v3d->dirty |= V3D_DIRTY_COMPILED_CS; /* XXX */
943         }
944 }
945 
946 static inline uint32_t
cache_hash(const void * _key,uint32_t key_size)947 cache_hash(const void *_key, uint32_t key_size)
948 {
949         const struct v3d_cache_key *key = (struct v3d_cache_key *) _key;
950 
951         struct mesa_sha1 ctx;
952         unsigned char sha1[20];
953         _mesa_sha1_init(&ctx);
954         _mesa_sha1_update(&ctx, key->key, key_size);
955         _mesa_sha1_update(&ctx, key->sha1, 20);
956         _mesa_sha1_final(&ctx, sha1);
957         return _mesa_hash_data(sha1, 20);
958 }
959 
960 static inline bool
cache_compare(const void * _key1,const void * _key2,uint32_t key_size)961 cache_compare(const void *_key1, const void *_key2, uint32_t key_size)
962 {
963         const struct v3d_cache_key *key1 = (struct v3d_cache_key *) _key1;
964         const struct v3d_cache_key *key2 = (struct v3d_cache_key *) _key2;
965 
966         if (memcmp(key1->key, key2->key, key_size) != 0)
967             return false;
968 
969         return memcmp(key1->sha1, key2->sha1, 20) == 0;
970 }
971 
972 static uint32_t
fs_cache_hash(const void * key)973 fs_cache_hash(const void *key)
974 {
975         return cache_hash(key, sizeof(struct v3d_fs_key));
976 }
977 
978 static uint32_t
gs_cache_hash(const void * key)979 gs_cache_hash(const void *key)
980 {
981         return cache_hash(key, sizeof(struct v3d_gs_key));
982 }
983 
984 static uint32_t
vs_cache_hash(const void * key)985 vs_cache_hash(const void *key)
986 {
987         return cache_hash(key, sizeof(struct v3d_vs_key));
988 }
989 
990 static uint32_t
cs_cache_hash(const void * key)991 cs_cache_hash(const void *key)
992 {
993         return cache_hash(key, sizeof(struct v3d_key));
994 }
995 
996 static bool
fs_cache_compare(const void * key1,const void * key2)997 fs_cache_compare(const void *key1, const void *key2)
998 {
999         return cache_compare(key1, key2, sizeof(struct v3d_fs_key));
1000 }
1001 
1002 static bool
gs_cache_compare(const void * key1,const void * key2)1003 gs_cache_compare(const void *key1, const void *key2)
1004 {
1005         return cache_compare(key1, key2, sizeof(struct v3d_gs_key));
1006 }
1007 
1008 static bool
vs_cache_compare(const void * key1,const void * key2)1009 vs_cache_compare(const void *key1, const void *key2)
1010 {
1011         return cache_compare(key1, key2, sizeof(struct v3d_vs_key));
1012 }
1013 
1014 static bool
cs_cache_compare(const void * key1,const void * key2)1015 cs_cache_compare(const void *key1, const void *key2)
1016 {
1017         return cache_compare(key1, key2, sizeof(struct v3d_key));
1018 }
1019 
1020 static void
v3d_shader_state_delete(struct pipe_context * pctx,void * hwcso)1021 v3d_shader_state_delete(struct pipe_context *pctx, void *hwcso)
1022 {
1023         struct v3d_context *v3d = v3d_context(pctx);
1024         struct v3d_uncompiled_shader *so = hwcso;
1025         nir_shader *s = so->base.ir.nir;
1026 
1027         hash_table_foreach(v3d->prog.cache[s->info.stage], entry) {
1028                 const struct v3d_cache_key *cache_key = entry->key;
1029                 struct v3d_compiled_shader *shader = entry->data;
1030 
1031                 if (memcmp(cache_key->sha1, so->sha1, 20) != 0)
1032                         continue;
1033 
1034                 if (v3d->prog.fs == shader)
1035                         v3d->prog.fs = NULL;
1036                 if (v3d->prog.vs == shader)
1037                         v3d->prog.vs = NULL;
1038                 if (v3d->prog.cs == shader)
1039                         v3d->prog.cs = NULL;
1040                 if (v3d->prog.compute == shader)
1041                         v3d->prog.compute = NULL;
1042 
1043                 _mesa_hash_table_remove(v3d->prog.cache[s->info.stage], entry);
1044                 v3d_free_compiled_shader(shader);
1045         }
1046 
1047         ralloc_free(so->base.ir.nir);
1048         free(so);
1049 }
1050 
1051 static void
v3d_fp_state_bind(struct pipe_context * pctx,void * hwcso)1052 v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
1053 {
1054         struct v3d_context *v3d = v3d_context(pctx);
1055         v3d->prog.bind_fs = hwcso;
1056         v3d->dirty |= V3D_DIRTY_UNCOMPILED_FS;
1057 }
1058 
1059 static void
v3d_gp_state_bind(struct pipe_context * pctx,void * hwcso)1060 v3d_gp_state_bind(struct pipe_context *pctx, void *hwcso)
1061 {
1062         struct v3d_context *v3d = v3d_context(pctx);
1063         v3d->prog.bind_gs = hwcso;
1064         v3d->dirty |= V3D_DIRTY_UNCOMPILED_GS;
1065 }
1066 
1067 static void
v3d_vp_state_bind(struct pipe_context * pctx,void * hwcso)1068 v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
1069 {
1070         struct v3d_context *v3d = v3d_context(pctx);
1071         v3d->prog.bind_vs = hwcso;
1072         v3d->dirty |= V3D_DIRTY_UNCOMPILED_VS;
1073 }
1074 
1075 static void
v3d_compute_state_bind(struct pipe_context * pctx,void * state)1076 v3d_compute_state_bind(struct pipe_context *pctx, void *state)
1077 {
1078         struct v3d_context *v3d = v3d_context(pctx);
1079 
1080         v3d->prog.bind_compute = state;
1081         v3d->dirty |= V3D_DIRTY_UNCOMPILED_CS;
1082 }
1083 
1084 static void *
v3d_create_compute_state(struct pipe_context * pctx,const struct pipe_compute_state * cso)1085 v3d_create_compute_state(struct pipe_context *pctx,
1086                          const struct pipe_compute_state *cso)
1087 {
1088         return v3d_uncompiled_shader_create(pctx, cso->ir_type,
1089                                             (void *)cso->prog);
1090 }
1091 
1092 void
v3d_program_init(struct pipe_context * pctx)1093 v3d_program_init(struct pipe_context *pctx)
1094 {
1095         struct v3d_context *v3d = v3d_context(pctx);
1096 
1097         pctx->create_vs_state = v3d_shader_state_create;
1098         pctx->delete_vs_state = v3d_shader_state_delete;
1099 
1100         pctx->create_gs_state = v3d_shader_state_create;
1101         pctx->delete_gs_state = v3d_shader_state_delete;
1102 
1103         pctx->create_fs_state = v3d_shader_state_create;
1104         pctx->delete_fs_state = v3d_shader_state_delete;
1105 
1106         pctx->bind_fs_state = v3d_fp_state_bind;
1107         pctx->bind_gs_state = v3d_gp_state_bind;
1108         pctx->bind_vs_state = v3d_vp_state_bind;
1109 
1110         if (v3d->screen->has_csd) {
1111                 pctx->create_compute_state = v3d_create_compute_state;
1112                 pctx->delete_compute_state = v3d_shader_state_delete;
1113                 pctx->bind_compute_state = v3d_compute_state_bind;
1114         }
1115 
1116         v3d->prog.cache[MESA_SHADER_VERTEX] =
1117                 _mesa_hash_table_create(pctx, vs_cache_hash, vs_cache_compare);
1118         v3d->prog.cache[MESA_SHADER_GEOMETRY] =
1119                 _mesa_hash_table_create(pctx, gs_cache_hash, gs_cache_compare);
1120         v3d->prog.cache[MESA_SHADER_FRAGMENT] =
1121                 _mesa_hash_table_create(pctx, fs_cache_hash, fs_cache_compare);
1122         v3d->prog.cache[MESA_SHADER_COMPUTE] =
1123                 _mesa_hash_table_create(pctx, cs_cache_hash, cs_cache_compare);
1124 }
1125 
1126 void
v3d_program_fini(struct pipe_context * pctx)1127 v3d_program_fini(struct pipe_context *pctx)
1128 {
1129         struct v3d_context *v3d = v3d_context(pctx);
1130 
1131         for (int i = 0; i < MESA_SHADER_STAGES; i++) {
1132                 struct hash_table *cache = v3d->prog.cache[i];
1133                 if (!cache)
1134                         continue;
1135 
1136                 hash_table_foreach(cache, entry) {
1137                         struct v3d_compiled_shader *shader = entry->data;
1138                         v3d_free_compiled_shader(shader);
1139                         _mesa_hash_table_remove(cache, entry);
1140                 }
1141         }
1142 
1143         v3d_bo_unreference(&v3d->prog.spill_bo);
1144 }
1145