• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014-2017 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <inttypes.h>
25 #include "util/format/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "util/u_upload_mgr.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "compiler/nir/nir_serialize.h"
35 #include "nir/tgsi_to_nir.h"
36 #include "compiler/v3d_compiler.h"
37 #include "v3d_context.h"
38 /* packets here are the same across V3D versions. */
39 #include "broadcom/cle/v3d_packet_v42_pack.h"
40 
41 static struct v3d_compiled_shader *
42 v3d_get_compiled_shader(struct v3d_context *v3d,
43                         struct v3d_key *key, size_t key_size,
44                         struct v3d_uncompiled_shader *uncompiled);
45 
46 static void
47 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
48                                 struct v3d_key *key);
49 
50 static gl_varying_slot
v3d_get_slot_for_driver_location(nir_shader * s,uint32_t driver_location)51 v3d_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
52 {
53         nir_foreach_shader_out_variable(var, s) {
54                 if (var->data.driver_location == driver_location) {
55                         return var->data.location;
56                 }
57 
58                 /* For compact arrays, we have more than one location to
59                  * check.
60                  */
61                 if (var->data.compact) {
62                         assert(glsl_type_is_array(var->type));
63                         for (int i = 0; i < DIV_ROUND_UP(glsl_array_size(var->type), 4); i++) {
64                                 if ((var->data.driver_location + i) == driver_location) {
65                                         return var->data.location;
66                                 }
67                         }
68                 }
69         }
70 
71         return -1;
72 }
73 
74 /**
75  * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
76  *
77  * A shader can have 16 of these specs, and each one of them can write up to
78  * 16 dwords.  Since we allow a total of 64 transform feedback output
79  * components (not 16 vectors), we have to group the writes of multiple
80  * varyings together in a single data spec.
81  */
82 static void
v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader * so,const struct pipe_stream_output_info * stream_output)83 v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader *so,
84                                    const struct pipe_stream_output_info *stream_output)
85 {
86         if (!stream_output->num_outputs)
87                 return;
88 
89         struct v3d_varying_slot slots[PIPE_MAX_SO_OUTPUTS * 4];
90         int slot_count = 0;
91 
92         for (int buffer = 0; buffer < PIPE_MAX_SO_BUFFERS; buffer++) {
93                 uint32_t buffer_offset = 0;
94                 uint32_t vpm_start = slot_count;
95 
96                 for (int i = 0; i < stream_output->num_outputs; i++) {
97                         const struct pipe_stream_output *output =
98                                 &stream_output->output[i];
99 
100                         if (output->output_buffer != buffer)
101                                 continue;
102 
103                         /* We assume that the SO outputs appear in increasing
104                          * order in the buffer.
105                          */
106                         assert(output->dst_offset >= buffer_offset);
107 
108                         /* Pad any undefined slots in the output */
109                         for (int j = buffer_offset; j < output->dst_offset; j++) {
110                                 slots[slot_count] =
111                                         v3d_slot_from_slot_and_component(VARYING_SLOT_POS, 0);
112                                 slot_count++;
113                                 buffer_offset++;
114                         }
115 
116                         /* Set the coordinate shader up to output the
117                          * components of this varying.
118                          */
119                         for (int j = 0; j < output->num_components; j++) {
120                                 gl_varying_slot slot =
121                                         v3d_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
122 
123                                 slots[slot_count] =
124                                         v3d_slot_from_slot_and_component(slot,
125                                                                          output->start_component + j);
126                                 slot_count++;
127                                 buffer_offset++;
128                         }
129                 }
130 
131                 uint32_t vpm_size = slot_count - vpm_start;
132                 if (!vpm_size)
133                         continue;
134 
135                 uint32_t vpm_start_offset = vpm_start + 6;
136 
137                 while (vpm_size) {
138                         uint32_t write_size = MIN2(vpm_size, 1 << 4);
139 
140                         struct V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked = {
141                                 /* We need the offset from the coordinate shader's VPM
142                                  * output block, which has the [X, Y, Z, W, Xs, Ys]
143                                  * values at the start.
144                                  */
145                                 .first_shaded_vertex_value_to_output = vpm_start_offset,
146                                 .number_of_consecutive_vertex_values_to_output_as_32_bit_values = write_size,
147                                 .output_buffer_to_write_to = buffer,
148                         };
149 
150                         /* GFXH-1559 */
151                         assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
152                                so->num_tf_specs != 0);
153 
154                         assert(so->num_tf_specs != ARRAY_SIZE(so->tf_specs));
155                         V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
156                                                                        (void *)&so->tf_specs[so->num_tf_specs],
157                                                                        &unpacked);
158 
159                         /* If point size is being written by the shader, then
160                          * all the VPM start offsets are shifted up by one.
161                          * We won't know that until the variant is compiled,
162                          * though.
163                          */
164                         unpacked.first_shaded_vertex_value_to_output++;
165 
166                         /* GFXH-1559 */
167                         assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
168                                so->num_tf_specs != 0);
169 
170                         V3D42_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
171                                                                        (void *)&so->tf_specs_psiz[so->num_tf_specs],
172                                                                        &unpacked);
173                         so->num_tf_specs++;
174                         vpm_start_offset += write_size;
175                         vpm_size -= write_size;
176                 }
177                 so->base.stream_output.stride[buffer] =
178                         stream_output->stride[buffer];
179         }
180 
181         so->num_tf_outputs = slot_count;
182         so->tf_outputs = ralloc_array(so->base.ir.nir, struct v3d_varying_slot,
183                                       slot_count);
184         memcpy(so->tf_outputs, slots, sizeof(*slots) * slot_count);
185 }
186 
187 static int
type_size(const struct glsl_type * type,bool bindless)188 type_size(const struct glsl_type *type, bool bindless)
189 {
190         return glsl_count_attribute_slots(type, false);
191 }
192 
193 static void
precompile_all_outputs(nir_shader * s,struct v3d_varying_slot * outputs,uint8_t * num_outputs)194 precompile_all_outputs(nir_shader *s,
195                        struct v3d_varying_slot *outputs,
196                        uint8_t *num_outputs)
197 {
198         nir_foreach_shader_out_variable(var, s) {
199                 const int array_len = glsl_type_is_vector_or_scalar(var->type) ?
200                         1 : MAX2(glsl_get_length(var->type), 1);
201                 for (int j = 0; j < array_len; j++) {
202                         const int slot = var->data.location + j;
203                         const int num_components =
204                                 glsl_get_components(var->type);
205                         for (int i = 0; i < num_components; i++) {
206                                 const int swiz = var->data.location_frac + i;
207                                 outputs[(*num_outputs)++] =
208                                         v3d_slot_from_slot_and_component(slot,
209                                                                          swiz);
210                         }
211                 }
212         }
213 }
214 
215 /**
216  * Precompiles a shader variant at shader state creation time if
217  * V3D_DEBUG=precompile is set.  Used for shader-db
218  * (https://gitlab.freedesktop.org/mesa/shader-db)
219  */
220 static void
v3d_shader_precompile(struct v3d_context * v3d,struct v3d_uncompiled_shader * so)221 v3d_shader_precompile(struct v3d_context *v3d,
222                       struct v3d_uncompiled_shader *so)
223 {
224         nir_shader *s = so->base.ir.nir;
225 
226         if (s->info.stage == MESA_SHADER_FRAGMENT) {
227                 struct v3d_fs_key key = {
228                 };
229 
230                 nir_foreach_shader_out_variable(var, s) {
231                         if (var->data.location == FRAG_RESULT_COLOR) {
232                                 key.cbufs |= 1 << 0;
233                         } else if (var->data.location >= FRAG_RESULT_DATA0) {
234                                 key.cbufs |= 1 << (var->data.location -
235                                                    FRAG_RESULT_DATA0);
236                         }
237                 }
238 
239                 key.logicop_func = PIPE_LOGICOP_COPY;
240 
241                 v3d_setup_shared_precompile_key(so, &key.base);
242                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
243         } else if (s->info.stage == MESA_SHADER_GEOMETRY) {
244                 struct v3d_gs_key key = {
245                         .base.is_last_geometry_stage = true,
246                 };
247 
248                 v3d_setup_shared_precompile_key(so, &key.base);
249 
250                 precompile_all_outputs(s,
251                                        key.used_outputs,
252                                        &key.num_used_outputs);
253 
254                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
255 
256                 /* Compile GS bin shader: only position (XXX: include TF) */
257                 key.is_coord = true;
258                 key.num_used_outputs = 0;
259                 for (int i = 0; i < 4; i++) {
260                         key.used_outputs[key.num_used_outputs++] =
261                                 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
262                                                                  i);
263                 }
264                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
265         } else if (s->info.stage == MESA_SHADER_VERTEX) {
266                 struct v3d_vs_key key = {
267                         /* Emit fixed function outputs */
268                         .base.is_last_geometry_stage = true,
269                 };
270 
271                 v3d_setup_shared_precompile_key(so, &key.base);
272 
273                 precompile_all_outputs(s,
274                                        key.used_outputs,
275                                        &key.num_used_outputs);
276 
277                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
278 
279                 /* Compile VS bin shader: only position (XXX: include TF) */
280                 key.is_coord = true;
281                 key.num_used_outputs = 0;
282                 for (int i = 0; i < 4; i++) {
283                         key.used_outputs[key.num_used_outputs++] =
284                                 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
285                                                                  i);
286                 }
287                 v3d_get_compiled_shader(v3d, &key.base, sizeof(key), so);
288         } else {
289                 assert(s->info.stage == MESA_SHADER_COMPUTE);
290                 struct v3d_key key = { 0 };
291                 v3d_setup_shared_precompile_key(so, &key);
292                 v3d_get_compiled_shader(v3d, &key, sizeof(key), so);
293         }
294 }
295 
296 static bool
lower_uniform_offset_to_bytes_cb(nir_builder * b,nir_intrinsic_instr * intr,void * _state)297 lower_uniform_offset_to_bytes_cb(nir_builder *b, nir_intrinsic_instr *intr,
298                                  void *_state)
299 {
300         if (intr->intrinsic != nir_intrinsic_load_uniform)
301                 return false;
302 
303         b->cursor = nir_before_instr(&intr->instr);
304         nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 16);
305         nir_src_rewrite(&intr->src[0], nir_ishl_imm(b, intr->src[0].ssa, 4));
306         return true;
307 }
308 
309 static bool
lower_textures_cb(nir_builder * b,nir_instr * instr,void * _state)310 lower_textures_cb(nir_builder *b, nir_instr *instr, void *_state)
311 {
312         if (instr->type != nir_instr_type_tex)
313                 return false;
314 
315         nir_tex_instr *tex = nir_instr_as_tex(instr);
316         if (nir_tex_instr_need_sampler(tex))
317                 return false;
318 
319         /* Use the texture index as sampler index for the purposes of
320          * lower_tex_packing, since in GL we currently make packing
321          * decisions based on texture format.
322          */
323         tex->backend_flags = tex->texture_index;
324         return true;
325 }
326 
327 static bool
v3d_nir_lower_uniform_offset_to_bytes(nir_shader * s)328 v3d_nir_lower_uniform_offset_to_bytes(nir_shader *s)
329 {
330         return nir_shader_intrinsics_pass(s, lower_uniform_offset_to_bytes_cb,
331                                             nir_metadata_control_flow, NULL);
332 }
333 
334 static bool
v3d_nir_lower_textures(nir_shader * s)335 v3d_nir_lower_textures(nir_shader *s)
336 {
337         return nir_shader_instructions_pass(s, lower_textures_cb,
338                                             nir_metadata_control_flow, NULL);
339 }
340 
341 static void *
v3d_uncompiled_shader_create(struct pipe_context * pctx,enum pipe_shader_ir type,void * ir)342 v3d_uncompiled_shader_create(struct pipe_context *pctx,
343                              enum pipe_shader_ir type, void *ir)
344 {
345         struct v3d_context *v3d = v3d_context(pctx);
346         struct v3d_uncompiled_shader *so = CALLOC_STRUCT(v3d_uncompiled_shader);
347         if (!so)
348                 return NULL;
349 
350         so->program_id = v3d->next_uncompiled_program_id++;
351 
352         nir_shader *s;
353 
354         if (type == PIPE_SHADER_IR_NIR) {
355                 /* The backend takes ownership of the NIR shader on state
356                  * creation.
357                  */
358                 s = ir;
359         } else {
360                 assert(type == PIPE_SHADER_IR_TGSI);
361 
362                 if (V3D_DBG(TGSI)) {
363                         fprintf(stderr, "prog %d TGSI:\n",
364                                 so->program_id);
365                         tgsi_dump(ir, 0);
366                         fprintf(stderr, "\n");
367                 }
368                 s = tgsi_to_nir(ir, pctx->screen, false);
369         }
370 
371         if (s->info.stage == MESA_SHADER_KERNEL)
372                 s->info.stage = MESA_SHADER_COMPUTE;
373 
374         if (s->info.stage != MESA_SHADER_VERTEX &&
375             s->info.stage != MESA_SHADER_GEOMETRY) {
376                 NIR_PASS(_, s, nir_lower_io,
377                          nir_var_shader_in | nir_var_shader_out,
378                          type_size, (nir_lower_io_options)0);
379         }
380 
381         NIR_PASS(_, s, nir_normalize_cubemap_coords);
382 
383         NIR_PASS(_, s, nir_lower_load_const_to_scalar);
384 
385         v3d_optimize_nir(NULL, s);
386 
387         NIR_PASS(_, s, nir_lower_var_copies);
388 
389         /* Get rid of base CS sys vals */
390         if (s->info.stage == MESA_SHADER_COMPUTE) {
391                 struct nir_lower_compute_system_values_options cs_options = {
392                         .has_base_global_invocation_id = false,
393                         .has_base_workgroup_id = false,
394                 };
395                 NIR_PASS(_, s, nir_lower_compute_system_values, &cs_options);
396         }
397 
398         /* Get rid of split copies */
399         v3d_optimize_nir(NULL, s);
400 
401         NIR_PASS(_, s, nir_remove_dead_variables, nir_var_function_temp, NULL);
402 
403         NIR_PASS(_, s, nir_lower_frexp);
404 
405         /* Since we can't expose pipe_caps.packed_uniforms the state tracker
406          * will produce uniform intrinsics with offsets in vec4 units but
407          * our compiler expects to work in units of bytes.
408          */
409         NIR_PASS(_, s, v3d_nir_lower_uniform_offset_to_bytes);
410 
411         NIR_PASS(_, s, v3d_nir_lower_textures);
412 
413         /* Garbage collect dead instructions */
414         nir_sweep(s);
415 
416         so->base.type = PIPE_SHADER_IR_NIR;
417         so->base.ir.nir = s;
418 
419         /* Generate sha1 from NIR for caching */
420         struct blob blob;
421         blob_init(&blob);
422         nir_serialize(&blob, s, true);
423         assert(!blob.out_of_memory);
424         _mesa_sha1_compute(blob.data, blob.size, so->sha1);
425         blob_finish(&blob);
426 
427         if (V3D_DBG(NIR) || v3d_debug_flag_for_shader_stage(s->info.stage)) {
428                 fprintf(stderr, "%s prog %d NIR:\n",
429                         gl_shader_stage_name(s->info.stage),
430                         so->program_id);
431                 nir_print_shader(s, stderr);
432                 fprintf(stderr, "\n");
433         }
434 
435         if (V3D_DBG(PRECOMPILE))
436                 v3d_shader_precompile(v3d, so);
437 
438         return so;
439 }
440 
441 static void
v3d_shader_debug_output(const char * message,void * data)442 v3d_shader_debug_output(const char *message, void *data)
443 {
444         struct pipe_context *ctx = data;
445 
446         util_debug_message(&ctx->debug, SHADER_INFO, "%s", message);
447 }
448 
449 static void *
v3d_shader_state_create(struct pipe_context * pctx,const struct pipe_shader_state * cso)450 v3d_shader_state_create(struct pipe_context *pctx,
451                         const struct pipe_shader_state *cso)
452 {
453         struct v3d_uncompiled_shader *so =
454                 v3d_uncompiled_shader_create(pctx,
455                                              cso->type,
456                                              (cso->type == PIPE_SHADER_IR_TGSI ?
457                                               (void *)cso->tokens :
458                                               cso->ir.nir));
459 
460         v3d_set_transform_feedback_outputs(so, &cso->stream_output);
461 
462         return so;
463 }
464 
465 /* Key ued with the RAM cache */
466 struct v3d_cache_key {
467         struct v3d_key *key;
468         unsigned char sha1[20];
469 };
470 
471 struct v3d_compiled_shader *
v3d_get_compiled_shader(struct v3d_context * v3d,struct v3d_key * key,size_t key_size,struct v3d_uncompiled_shader * uncompiled)472 v3d_get_compiled_shader(struct v3d_context *v3d,
473                         struct v3d_key *key,
474                         size_t key_size,
475                         struct v3d_uncompiled_shader *uncompiled)
476 {
477         nir_shader *s = uncompiled->base.ir.nir;
478         struct hash_table *ht = v3d->prog.cache[s->info.stage];
479         struct v3d_cache_key cache_key;
480         cache_key.key = key;
481         memcpy(cache_key.sha1, uncompiled->sha1, sizeof(cache_key.sha1));
482         struct hash_entry *entry = _mesa_hash_table_search(ht, &cache_key);
483         if (entry)
484                 return entry->data;
485 
486         int variant_id =
487                 p_atomic_inc_return(&uncompiled->compiled_variant_count);
488 
489         struct v3d_compiled_shader *shader = NULL;
490 
491 #ifdef ENABLE_SHADER_CACHE
492         shader = v3d_disk_cache_retrieve(v3d, key, uncompiled);
493 #endif
494         if (!shader) {
495                 shader = rzalloc(NULL, struct v3d_compiled_shader);
496 
497                 int program_id = uncompiled->program_id;
498                 uint64_t *qpu_insts;
499 
500                 qpu_insts = v3d_compile(v3d->screen->compiler, key,
501                                         &shader->prog_data.base, s,
502                                         v3d_shader_debug_output,
503                                         v3d,
504                                         program_id, variant_id,
505                                         &shader->qpu_size);
506 
507                 /* qpu_insts being NULL can happen if the register allocation
508                  * failed. At this point we can't really trigger an OpenGL API
509                  * error, as the final compilation could happen on the draw
510                  * call. So let's at least assert, so debug builds finish at
511                  * this point.
512                  */
513                 assert(qpu_insts);
514                 ralloc_steal(shader, shader->prog_data.base);
515 
516                 if (shader->qpu_size) {
517                         u_upload_data(v3d->state_uploader, 0, shader->qpu_size, 8,
518                                       qpu_insts, &shader->offset, &shader->resource);
519                 }
520 
521 #ifdef ENABLE_SHADER_CACHE
522                 v3d_disk_cache_store(v3d, key, uncompiled,
523                                      shader, qpu_insts, shader->qpu_size);
524 #endif
525 
526                 free(qpu_insts);
527         }
528 
529         v3d_set_shader_uniform_dirty_flags(shader);
530 
531         if (ht) {
532                 struct v3d_cache_key *dup_cache_key =
533                         ralloc_size(shader, sizeof(struct v3d_cache_key));
534                 dup_cache_key->key = ralloc_memdup(shader, cache_key.key,
535                                                    key_size);
536                 memcpy(dup_cache_key->sha1, cache_key.sha1 ,sizeof(dup_cache_key->sha1));
537                 _mesa_hash_table_insert(ht, dup_cache_key, shader);
538         }
539 
540         if (shader->prog_data.base->spill_size >
541             v3d->prog.spill_size_per_thread) {
542                 /* The TIDX register we use for choosing the area to access
543                  * for scratch space is: (core << 6) | (qpu << 2) | thread.
544                  * Even at minimum threadcount in a particular shader, that
545                  * means we still multiply by qpus by 4.
546                  */
547                 int total_spill_size = (v3d->screen->devinfo.qpu_count * 4 *
548                                         shader->prog_data.base->spill_size);
549 
550                 v3d_bo_unreference(&v3d->prog.spill_bo);
551                 v3d->prog.spill_bo = v3d_bo_alloc(v3d->screen,
552                                                   total_spill_size, "spill");
553                 v3d->prog.spill_size_per_thread =
554                         shader->prog_data.base->spill_size;
555         }
556 
557         return shader;
558 }
559 
560 static void
v3d_free_compiled_shader(struct v3d_compiled_shader * shader)561 v3d_free_compiled_shader(struct v3d_compiled_shader *shader)
562 {
563         pipe_resource_reference(&shader->resource, NULL);
564         ralloc_free(shader);
565 }
566 
567 static void
v3d_setup_shared_key(struct v3d_context * v3d,struct v3d_key * key,struct v3d_texture_stateobj * texstate)568 v3d_setup_shared_key(struct v3d_context *v3d, struct v3d_key *key,
569                      struct v3d_texture_stateobj *texstate)
570 {
571         const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
572 
573         key->num_tex_used = texstate->num_textures;
574         key->num_samplers_used = texstate->num_textures;
575         assert(key->num_tex_used == key->num_samplers_used);
576         for (int i = 0; i < texstate->num_textures; i++) {
577                 struct pipe_sampler_view *sampler = texstate->textures[i];
578 
579                 if (!sampler)
580                         continue;
581 
582                 key->sampler[i].return_size =
583                         v3d_get_tex_return_size(devinfo, sampler->format);
584 
585                 /* For 16-bit, we set up the sampler to always return 2
586                  * channels (meaning no recompiles for most statechanges),
587                  * while for 32 we actually scale the returns with channels.
588                  */
589                 if (key->sampler[i].return_size == 16) {
590                         key->sampler[i].return_channels = 2;
591                 } else {
592                         key->sampler[i].return_channels = 4;
593                 }
594 
595                 /* We let the sampler state handle the swizzle.
596                  */
597                 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
598                 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
599                 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
600                 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
601         }
602 }
603 
604 static void
v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader * uncompiled,struct v3d_key * key)605 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
606                                 struct v3d_key *key)
607 {
608         nir_shader *s = uncompiled->base.ir.nir;
609 
610         /* The shader may have gaps in the texture bindings, so figure out
611          * the largest binding in use and setup the number of textures and
612          * samplers from there instead of just the texture count from shader
613          * info.
614          */
615         key->num_tex_used = 0;
616         key->num_samplers_used = 0;
617         for (int i = V3D_MAX_TEXTURE_SAMPLERS - 1; i >= 0; i--) {
618                 if (s->info.textures_used[0] & (1 << i)) {
619                         key->num_tex_used = i + 1;
620                         key->num_samplers_used = i + 1;
621                         break;
622                 }
623         }
624 
625         /* Note that below we access they key's texture and sampler fields
626          * using the same index. On OpenGL they are the same (they are
627          * combined)
628          */
629         for (int i = 0; i < s->info.num_textures; i++) {
630                 key->sampler[i].return_size = 16;
631                 key->sampler[i].return_channels = 2;
632 
633                 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
634                 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
635                 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
636                 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
637         }
638 }
639 
640 static void
v3d_update_compiled_fs(struct v3d_context * v3d,uint8_t prim_mode)641 v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
642 {
643         struct v3d_job *job = v3d->job;
644         struct v3d_fs_key local_key;
645         struct v3d_fs_key *key = &local_key;
646         nir_shader *s = v3d->prog.bind_fs->base.ir.nir;
647 
648         if (!(v3d->dirty & (V3D_DIRTY_PRIM_MODE |
649                             V3D_DIRTY_BLEND |
650                             V3D_DIRTY_FRAMEBUFFER |
651                             V3D_DIRTY_ZSA |
652                             V3D_DIRTY_OQ |
653                             V3D_DIRTY_RASTERIZER |
654                             V3D_DIRTY_SAMPLE_STATE |
655                             V3D_DIRTY_FRAGTEX |
656                             V3D_DIRTY_UNCOMPILED_FS))) {
657                 return;
658         }
659 
660         memset(key, 0, sizeof(*key));
661         v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_FRAGMENT]);
662         key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
663         key->is_points = (prim_mode == MESA_PRIM_POINTS);
664         key->is_lines = (prim_mode >= MESA_PRIM_LINES &&
665                          prim_mode <= MESA_PRIM_LINE_STRIP);
666         key->line_smoothing = (key->is_lines &&
667                                v3d_line_smoothing_enabled(v3d));
668         key->has_gs = v3d->prog.bind_gs != NULL;
669         if (v3d->blend->base.logicop_enable) {
670                 key->logicop_func = v3d->blend->base.logicop_func;
671         } else {
672                 key->logicop_func = PIPE_LOGICOP_COPY;
673         }
674         if (job->msaa) {
675                 key->msaa = v3d->rasterizer->base.multisample;
676                 key->sample_alpha_to_coverage = v3d->blend->base.alpha_to_coverage;
677                 key->sample_alpha_to_one = v3d->blend->base.alpha_to_one;
678         }
679 
680         key->swap_color_rb = v3d->swap_color_rb;
681         key->can_earlyz_with_discard = s->info.fs.uses_discard &&
682                 (!v3d->zsa || !job->zsbuf || !v3d->zsa->base.depth_enabled ||
683                  !v3d->zsa->base.depth_writemask) &&
684                 !(v3d->active_queries && v3d->current_oq);
685 
686         for (int i = 0; i < v3d->framebuffer.nr_cbufs; i++) {
687                 struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
688                 if (!cbuf)
689                         continue;
690 
691                 /* gl_FragColor's propagation to however many bound color
692                  * buffers there are means that the shader compile needs to
693                  * know what buffers are present.
694                  */
695                 key->cbufs |= 1 << i;
696 
697                 /* If logic operations are enabled then we might emit color
698                  * reads and we need to know the color buffer format and
699                  * swizzle for that.
700                  */
701                 if (key->logicop_func != PIPE_LOGICOP_COPY) {
702                         key->color_fmt[i].format = cbuf->format;
703                         memcpy(key->color_fmt[i].swizzle,
704                                v3d_get_format_swizzle(&v3d->screen->devinfo,
705                                                        cbuf->format),
706                                sizeof(key->color_fmt[i].swizzle));
707                 }
708 
709                 const struct util_format_description *desc =
710                         util_format_description(cbuf->format);
711 
712                 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT &&
713                     desc->channel[0].size == 32) {
714                         key->f32_color_rb |= 1 << i;
715                 }
716 
717                 if (s->info.fs.untyped_color_outputs) {
718                         if (util_format_is_pure_uint(cbuf->format))
719                                 key->uint_color_rb |= 1 << i;
720                         else if (util_format_is_pure_sint(cbuf->format))
721                                 key->int_color_rb |= 1 << i;
722                 }
723         }
724 
725         if (key->is_points) {
726                 key->point_sprite_mask =
727                         v3d->rasterizer->base.sprite_coord_enable;
728                 /* this is handled by lower_wpos_pntc */
729                 key->point_coord_upper_left = false;
730         }
731 
732         struct v3d_compiled_shader *old_fs = v3d->prog.fs;
733         v3d->prog.fs = v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
734                                                v3d->prog.bind_fs);
735         if (v3d->prog.fs == old_fs)
736                 return;
737 
738         v3d->dirty |= V3D_DIRTY_COMPILED_FS;
739 
740         if (old_fs) {
741                 if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
742                     old_fs->prog_data.fs->flat_shade_flags) {
743                         v3d->dirty |= V3D_DIRTY_FLAT_SHADE_FLAGS;
744                 }
745 
746                 if (v3d->prog.fs->prog_data.fs->noperspective_flags !=
747                     old_fs->prog_data.fs->noperspective_flags) {
748                         v3d->dirty |= V3D_DIRTY_NOPERSPECTIVE_FLAGS;
749                 }
750 
751                 if (v3d->prog.fs->prog_data.fs->centroid_flags !=
752                     old_fs->prog_data.fs->centroid_flags) {
753                         v3d->dirty |= V3D_DIRTY_CENTROID_FLAGS;
754                 }
755         }
756 
757         if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
758                              old_fs->prog_data.fs->input_slots,
759                              sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
760                 v3d->dirty |= V3D_DIRTY_FS_INPUTS;
761         }
762 }
763 
764 static void
v3d_update_compiled_gs(struct v3d_context * v3d,uint8_t prim_mode)765 v3d_update_compiled_gs(struct v3d_context *v3d, uint8_t prim_mode)
766 {
767         struct v3d_gs_key local_key;
768         struct v3d_gs_key *key = &local_key;
769 
770         if (!(v3d->dirty & (V3D_DIRTY_GEOMTEX |
771                             V3D_DIRTY_RASTERIZER |
772                             V3D_DIRTY_UNCOMPILED_GS |
773                             V3D_DIRTY_PRIM_MODE |
774                             V3D_DIRTY_FS_INPUTS))) {
775                 return;
776         }
777 
778         if (!v3d->prog.bind_gs) {
779                 v3d->prog.gs = NULL;
780                 v3d->prog.gs_bin = NULL;
781                 return;
782         }
783 
784         memset(key, 0, sizeof(*key));
785         v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_GEOMETRY]);
786         key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
787         key->base.is_last_geometry_stage = true;
788         key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
789         STATIC_ASSERT(sizeof(key->used_outputs) ==
790                       sizeof(v3d->prog.fs->prog_data.fs->input_slots));
791         memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
792                sizeof(key->used_outputs));
793 
794         key->per_vertex_point_size =
795                 (prim_mode == MESA_PRIM_POINTS &&
796                  v3d->rasterizer->base.point_size_per_vertex);
797 
798         struct v3d_uncompiled_shader *uncompiled = v3d->prog.bind_gs;
799         struct v3d_compiled_shader *gs =
800                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
801                                         uncompiled);
802         if (gs != v3d->prog.gs) {
803                 v3d->prog.gs = gs;
804                 v3d->dirty |= V3D_DIRTY_COMPILED_GS;
805         }
806 
807         key->is_coord = true;
808 
809         /* The last bin-mode shader in the geometry pipeline only outputs
810          * varyings used by transform feedback.
811          */
812         if (uncompiled->num_tf_outputs > 0) {
813                 memcpy(key->used_outputs, uncompiled->tf_outputs,
814                        sizeof(*key->used_outputs) * uncompiled->num_tf_outputs);
815         }
816         if (uncompiled->num_tf_outputs < key->num_used_outputs) {
817                 uint32_t size = sizeof(*key->used_outputs) *
818                                 (key->num_used_outputs -
819                                  uncompiled->num_tf_outputs);
820                 memset(&key->used_outputs[uncompiled->num_tf_outputs],
821                        0, size);
822         }
823         key->num_used_outputs = uncompiled->num_tf_outputs;
824 
825         struct v3d_compiled_shader *old_gs = v3d->prog.gs;
826         struct v3d_compiled_shader *gs_bin =
827                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
828                                         uncompiled);
829         if (gs_bin != old_gs) {
830                 v3d->prog.gs_bin = gs_bin;
831                 v3d->dirty |= V3D_DIRTY_COMPILED_GS_BIN;
832         }
833 
834         if (old_gs && memcmp(v3d->prog.gs->prog_data.gs->input_slots,
835                              old_gs->prog_data.gs->input_slots,
836                              sizeof(v3d->prog.gs->prog_data.gs->input_slots))) {
837                 v3d->dirty |= V3D_DIRTY_GS_INPUTS;
838         }
839 }
840 
841 static void
v3d_update_compiled_vs(struct v3d_context * v3d,uint8_t prim_mode)842 v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
843 {
844         struct v3d_vs_key local_key;
845         struct v3d_vs_key *key = &local_key;
846 
847         if (!(v3d->dirty & (V3D_DIRTY_VERTTEX |
848                             V3D_DIRTY_VTXSTATE |
849                             V3D_DIRTY_UNCOMPILED_VS |
850                             (v3d->prog.bind_gs ? 0 : V3D_DIRTY_RASTERIZER) |
851                             (v3d->prog.bind_gs ? 0 : V3D_DIRTY_PRIM_MODE) |
852                             (v3d->prog.bind_gs ? V3D_DIRTY_GS_INPUTS :
853                                                  V3D_DIRTY_FS_INPUTS)))) {
854                 return;
855         }
856 
857         memset(key, 0, sizeof(*key));
858         v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_VERTEX]);
859         key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
860         key->base.is_last_geometry_stage = !v3d->prog.bind_gs;
861 
862         if (!v3d->prog.bind_gs) {
863             key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
864             STATIC_ASSERT(sizeof(key->used_outputs) ==
865                           sizeof(v3d->prog.fs->prog_data.fs->input_slots));
866             memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
867                    sizeof(key->used_outputs));
868         } else {
869             key->num_used_outputs = v3d->prog.gs->prog_data.gs->num_inputs;
870             STATIC_ASSERT(sizeof(key->used_outputs) ==
871                           sizeof(v3d->prog.gs->prog_data.gs->input_slots));
872             memcpy(key->used_outputs, v3d->prog.gs->prog_data.gs->input_slots,
873                    sizeof(key->used_outputs));
874         }
875 
876         key->per_vertex_point_size =
877                 (prim_mode == MESA_PRIM_POINTS &&
878                  v3d->rasterizer->base.point_size_per_vertex);
879 
880         nir_shader *s = v3d->prog.bind_vs->base.ir.nir;
881         uint64_t inputs_read = s->info.inputs_read;
882         assert(util_bitcount(inputs_read) <= v3d->vtx->num_elements);
883 
884         while (inputs_read) {
885                 int location = u_bit_scan64(&inputs_read);
886                 nir_variable *var =
887                         nir_find_variable_with_location(s, nir_var_shader_in, location);
888                 assert (var != NULL);
889                 int driver_location = var->data.driver_location;
890                 switch (v3d->vtx->pipe[driver_location].src_format) {
891                 case PIPE_FORMAT_B8G8R8A8_UNORM:
892                 case PIPE_FORMAT_B10G10R10A2_UNORM:
893                 case PIPE_FORMAT_B10G10R10A2_SNORM:
894                 case PIPE_FORMAT_B10G10R10A2_USCALED:
895                 case PIPE_FORMAT_B10G10R10A2_SSCALED:
896                         key->va_swap_rb_mask |= 1 << location;
897                         break;
898                 default:
899                         break;
900                 }
901         }
902 
903         struct v3d_uncompiled_shader *shader_state = v3d->prog.bind_vs;
904         struct v3d_compiled_shader *vs =
905                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
906                                         shader_state);
907         if (vs != v3d->prog.vs) {
908                 v3d->prog.vs = vs;
909                 v3d->dirty |= V3D_DIRTY_COMPILED_VS;
910         }
911 
912         key->is_coord = true;
913 
914         /* Coord shaders only output varyings used by transform feedback,
915          * unless they are linked to other shaders in the geometry side
916          * of the pipeline, since in that case any of the output varyings
917          * could be required in later geometry stages to compute
918          * gl_Position or TF outputs.
919          */
920         if (!v3d->prog.bind_gs) {
921                 if (shader_state->num_tf_outputs > 0) {
922                         memcpy(key->used_outputs, shader_state->tf_outputs,
923                                sizeof(*key->used_outputs) *
924                                shader_state->num_tf_outputs);
925                 }
926                 if (shader_state->num_tf_outputs < key->num_used_outputs) {
927                         uint32_t tail_bytes =
928                                 sizeof(*key->used_outputs) *
929                                 (key->num_used_outputs -
930                                  shader_state->num_tf_outputs);
931                         memset(&key->used_outputs[shader_state->num_tf_outputs],
932                                0, tail_bytes);
933                 }
934                 key->num_used_outputs = shader_state->num_tf_outputs;
935         } else {
936                 key->num_used_outputs = v3d->prog.gs_bin->prog_data.gs->num_inputs;
937                 STATIC_ASSERT(sizeof(key->used_outputs) ==
938                               sizeof(v3d->prog.gs_bin->prog_data.gs->input_slots));
939                 memcpy(key->used_outputs, v3d->prog.gs_bin->prog_data.gs->input_slots,
940                        sizeof(key->used_outputs));
941         }
942 
943         struct v3d_compiled_shader *cs =
944                 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key),
945                                         shader_state);
946         if (cs != v3d->prog.cs) {
947                 v3d->prog.cs = cs;
948                 v3d->dirty |= V3D_DIRTY_COMPILED_CS;
949         }
950 }
951 
952 void
v3d_update_compiled_shaders(struct v3d_context * v3d,uint8_t prim_mode)953 v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode)
954 {
955         v3d_update_compiled_fs(v3d, prim_mode);
956         v3d_update_compiled_gs(v3d, prim_mode);
957         v3d_update_compiled_vs(v3d, prim_mode);
958 }
959 
960 void
v3d_update_compiled_cs(struct v3d_context * v3d)961 v3d_update_compiled_cs(struct v3d_context *v3d)
962 {
963         struct v3d_key local_key;
964         struct v3d_key *key = &local_key;
965 
966         if (!(v3d->dirty & (V3D_DIRTY_UNCOMPILED_CS |
967                             V3D_DIRTY_COMPTEX))) {
968                 return;
969         }
970 
971         memset(key, 0, sizeof(*key));
972         v3d_setup_shared_key(v3d, key, &v3d->tex[PIPE_SHADER_COMPUTE]);
973 
974         struct v3d_compiled_shader *cs =
975                 v3d_get_compiled_shader(v3d, key, sizeof(*key),
976                                         v3d->prog.bind_compute);
977         if (cs != v3d->prog.compute) {
978                 v3d->prog.compute = cs;
979                 v3d->dirty |= V3D_DIRTY_COMPILED_CS; /* XXX */
980         }
981 }
982 
983 static inline uint32_t
cache_hash(const void * _key,uint32_t key_size)984 cache_hash(const void *_key, uint32_t key_size)
985 {
986         const struct v3d_cache_key *key = (struct v3d_cache_key *) _key;
987 
988         struct mesa_sha1 ctx;
989         unsigned char sha1[20];
990         _mesa_sha1_init(&ctx);
991         _mesa_sha1_update(&ctx, key->key, key_size);
992         _mesa_sha1_update(&ctx, key->sha1, 20);
993         _mesa_sha1_final(&ctx, sha1);
994         return _mesa_hash_data(sha1, 20);
995 }
996 
997 static inline bool
cache_compare(const void * _key1,const void * _key2,uint32_t key_size)998 cache_compare(const void *_key1, const void *_key2, uint32_t key_size)
999 {
1000         const struct v3d_cache_key *key1 = (struct v3d_cache_key *) _key1;
1001         const struct v3d_cache_key *key2 = (struct v3d_cache_key *) _key2;
1002 
1003         if (memcmp(key1->key, key2->key, key_size) != 0)
1004             return false;
1005 
1006         return memcmp(key1->sha1, key2->sha1, 20) == 0;
1007 }
1008 
1009 static uint32_t
fs_cache_hash(const void * key)1010 fs_cache_hash(const void *key)
1011 {
1012         return cache_hash(key, sizeof(struct v3d_fs_key));
1013 }
1014 
1015 static uint32_t
gs_cache_hash(const void * key)1016 gs_cache_hash(const void *key)
1017 {
1018         return cache_hash(key, sizeof(struct v3d_gs_key));
1019 }
1020 
1021 static uint32_t
vs_cache_hash(const void * key)1022 vs_cache_hash(const void *key)
1023 {
1024         return cache_hash(key, sizeof(struct v3d_vs_key));
1025 }
1026 
1027 static uint32_t
cs_cache_hash(const void * key)1028 cs_cache_hash(const void *key)
1029 {
1030         return cache_hash(key, sizeof(struct v3d_key));
1031 }
1032 
1033 static bool
fs_cache_compare(const void * key1,const void * key2)1034 fs_cache_compare(const void *key1, const void *key2)
1035 {
1036         return cache_compare(key1, key2, sizeof(struct v3d_fs_key));
1037 }
1038 
1039 static bool
gs_cache_compare(const void * key1,const void * key2)1040 gs_cache_compare(const void *key1, const void *key2)
1041 {
1042         return cache_compare(key1, key2, sizeof(struct v3d_gs_key));
1043 }
1044 
1045 static bool
vs_cache_compare(const void * key1,const void * key2)1046 vs_cache_compare(const void *key1, const void *key2)
1047 {
1048         return cache_compare(key1, key2, sizeof(struct v3d_vs_key));
1049 }
1050 
1051 static bool
cs_cache_compare(const void * key1,const void * key2)1052 cs_cache_compare(const void *key1, const void *key2)
1053 {
1054         return cache_compare(key1, key2, sizeof(struct v3d_key));
1055 }
1056 
1057 static void
v3d_shader_state_delete(struct pipe_context * pctx,void * hwcso)1058 v3d_shader_state_delete(struct pipe_context *pctx, void *hwcso)
1059 {
1060         struct v3d_context *v3d = v3d_context(pctx);
1061         struct v3d_uncompiled_shader *so = hwcso;
1062         nir_shader *s = so->base.ir.nir;
1063 
1064         hash_table_foreach(v3d->prog.cache[s->info.stage], entry) {
1065                 const struct v3d_cache_key *cache_key = entry->key;
1066                 struct v3d_compiled_shader *shader = entry->data;
1067 
1068                 if (memcmp(cache_key->sha1, so->sha1, 20) != 0)
1069                         continue;
1070 
1071                 if (v3d->prog.fs == shader)
1072                         v3d->prog.fs = NULL;
1073                 if (v3d->prog.vs == shader)
1074                         v3d->prog.vs = NULL;
1075                 if (v3d->prog.cs == shader)
1076                         v3d->prog.cs = NULL;
1077                 if (v3d->prog.compute == shader)
1078                         v3d->prog.compute = NULL;
1079 
1080                 _mesa_hash_table_remove(v3d->prog.cache[s->info.stage], entry);
1081                 v3d_free_compiled_shader(shader);
1082         }
1083 
1084         ralloc_free(so->base.ir.nir);
1085         free(so);
1086 }
1087 
1088 static void
v3d_fp_state_bind(struct pipe_context * pctx,void * hwcso)1089 v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
1090 {
1091         struct v3d_context *v3d = v3d_context(pctx);
1092         v3d->prog.bind_fs = hwcso;
1093         v3d->dirty |= V3D_DIRTY_UNCOMPILED_FS;
1094 }
1095 
1096 static void
v3d_gp_state_bind(struct pipe_context * pctx,void * hwcso)1097 v3d_gp_state_bind(struct pipe_context *pctx, void *hwcso)
1098 {
1099         struct v3d_context *v3d = v3d_context(pctx);
1100         v3d->prog.bind_gs = hwcso;
1101         v3d->dirty |= V3D_DIRTY_UNCOMPILED_GS;
1102 }
1103 
1104 static void
v3d_vp_state_bind(struct pipe_context * pctx,void * hwcso)1105 v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
1106 {
1107         struct v3d_context *v3d = v3d_context(pctx);
1108         v3d->prog.bind_vs = hwcso;
1109         v3d->dirty |= V3D_DIRTY_UNCOMPILED_VS;
1110 }
1111 
1112 static void
v3d_compute_state_bind(struct pipe_context * pctx,void * state)1113 v3d_compute_state_bind(struct pipe_context *pctx, void *state)
1114 {
1115         struct v3d_context *v3d = v3d_context(pctx);
1116 
1117         v3d->prog.bind_compute = state;
1118         v3d->dirty |= V3D_DIRTY_UNCOMPILED_CS;
1119 }
1120 
1121 static void *
v3d_create_compute_state(struct pipe_context * pctx,const struct pipe_compute_state * cso)1122 v3d_create_compute_state(struct pipe_context *pctx,
1123                          const struct pipe_compute_state *cso)
1124 {
1125         return v3d_uncompiled_shader_create(pctx, cso->ir_type,
1126                                             (void *)cso->prog);
1127 }
1128 
1129 static void
v3d_get_compute_state_info(struct pipe_context * pctx,void * cso,struct pipe_compute_state_object_info * info)1130 v3d_get_compute_state_info(struct pipe_context *pctx,
1131                            void *cso,
1132                            struct pipe_compute_state_object_info *info)
1133 {
1134         struct v3d_context *v3d = v3d_context(pctx);
1135 
1136         /* this API requires compiled shaders */
1137         v3d_compute_state_bind(pctx, cso);
1138         v3d_update_compiled_cs(v3d);
1139 
1140         info->max_threads = V3D_CHANNELS * v3d->prog.compute->prog_data.base->threads;
1141         info->preferred_simd_size = V3D_CHANNELS;
1142         info->private_memory = 0;
1143 }
1144 
1145 void
v3d_program_init(struct pipe_context * pctx)1146 v3d_program_init(struct pipe_context *pctx)
1147 {
1148         struct v3d_context *v3d = v3d_context(pctx);
1149 
1150         pctx->create_vs_state = v3d_shader_state_create;
1151         pctx->delete_vs_state = v3d_shader_state_delete;
1152 
1153         pctx->create_gs_state = v3d_shader_state_create;
1154         pctx->delete_gs_state = v3d_shader_state_delete;
1155 
1156         pctx->create_fs_state = v3d_shader_state_create;
1157         pctx->delete_fs_state = v3d_shader_state_delete;
1158 
1159         pctx->bind_fs_state = v3d_fp_state_bind;
1160         pctx->bind_gs_state = v3d_gp_state_bind;
1161         pctx->bind_vs_state = v3d_vp_state_bind;
1162 
1163         if (v3d->screen->has_csd) {
1164                 pctx->create_compute_state = v3d_create_compute_state;
1165                 pctx->delete_compute_state = v3d_shader_state_delete;
1166                 pctx->bind_compute_state = v3d_compute_state_bind;
1167                 pctx->get_compute_state_info = v3d_get_compute_state_info;
1168         }
1169 
1170         v3d->prog.cache[MESA_SHADER_VERTEX] =
1171                 _mesa_hash_table_create(pctx, vs_cache_hash, vs_cache_compare);
1172         v3d->prog.cache[MESA_SHADER_GEOMETRY] =
1173                 _mesa_hash_table_create(pctx, gs_cache_hash, gs_cache_compare);
1174         v3d->prog.cache[MESA_SHADER_FRAGMENT] =
1175                 _mesa_hash_table_create(pctx, fs_cache_hash, fs_cache_compare);
1176         v3d->prog.cache[MESA_SHADER_COMPUTE] =
1177                 _mesa_hash_table_create(pctx, cs_cache_hash, cs_cache_compare);
1178 }
1179 
1180 void
v3d_program_fini(struct pipe_context * pctx)1181 v3d_program_fini(struct pipe_context *pctx)
1182 {
1183         struct v3d_context *v3d = v3d_context(pctx);
1184 
1185         for (int i = 0; i < MESA_SHADER_STAGES; i++) {
1186                 struct hash_table *cache = v3d->prog.cache[i];
1187                 if (!cache)
1188                         continue;
1189 
1190                 hash_table_foreach(cache, entry) {
1191                         struct v3d_compiled_shader *shader = entry->data;
1192                         v3d_free_compiled_shader(shader);
1193                         _mesa_hash_table_remove(cache, entry);
1194                 }
1195         }
1196 
1197         v3d_bo_unreference(&v3d->prog.spill_bo);
1198 }
1199