• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012-2015 Etnaviv Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Wladimir J. van der Laan <laanwj@gmail.com>
25  */
26 
27 #include "etnaviv_shader.h"
28 
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_context.h"
31 #include "etnaviv_debug.h"
32 #include "etnaviv_disasm.h"
33 #include "etnaviv_disk_cache.h"
34 #include "etnaviv_screen.h"
35 #include "etnaviv_util.h"
36 
37 #include "nir/tgsi_to_nir.h"
38 #include "util/u_atomic.h"
39 #include "util/u_cpu_detect.h"
40 #include "util/u_math.h"
41 #include "util/u_memory.h"
42 
43 /* Upload shader code to bo, if not already done */
etna_icache_upload_shader(struct etna_context * ctx,struct etna_shader_variant * v)44 static bool etna_icache_upload_shader(struct etna_context *ctx, struct etna_shader_variant *v)
45 {
46    if (v->bo)
47       return true;
48    v->bo = etna_bo_new(ctx->screen->dev, v->code_size*4, DRM_ETNA_GEM_CACHE_WC);
49    if (!v->bo)
50       return false;
51 
52    void *buf = etna_bo_map(v->bo);
53    etna_bo_cpu_prep(v->bo, DRM_ETNA_PREP_WRITE);
54    memcpy(buf, v->code, v->code_size*4);
55    etna_bo_cpu_fini(v->bo);
56    DBG("Uploaded %s of %u words to bo %p", v->stage == MESA_SHADER_FRAGMENT ? "fs":"vs", v->code_size, v->bo);
57    return true;
58 }
59 
60 void
etna_dump_shader(const struct etna_shader_variant * shader)61 etna_dump_shader(const struct etna_shader_variant *shader)
62 {
63    if (shader->stage == MESA_SHADER_VERTEX)
64       printf("VERT\n");
65    else
66       printf("FRAG\n");
67 
68    etna_disasm(shader->code, shader->code_size, PRINT_RAW);
69 
70    printf("num loops: %i\n", shader->num_loops);
71    printf("num temps: %i\n", shader->num_temps);
72    printf("immediates:\n");
73    for (int idx = 0; idx < shader->uniforms.count; ++idx) {
74       printf(" [%i].%c = %f (0x%08x) (%d)\n",
75              idx / 4,
76              "xyzw"[idx % 4],
77              *((float *)&shader->uniforms.data[idx]),
78              shader->uniforms.data[idx],
79              shader->uniforms.contents[idx]);
80    }
81    printf("inputs:\n");
82    for (int idx = 0; idx < shader->infile.num_reg; ++idx) {
83       printf(" [%i] name=%s comps=%i\n", shader->infile.reg[idx].reg,
84                (shader->stage == MESA_SHADER_VERTEX) ?
85                gl_vert_attrib_name(shader->infile.reg[idx].slot) :
86                gl_varying_slot_name_for_stage(shader->infile.reg[idx].slot, shader->stage),
87                shader->infile.reg[idx].num_components);
88    }
89    printf("outputs:\n");
90    for (int idx = 0; idx < shader->outfile.num_reg; ++idx) {
91       printf(" [%i] name=%s comps=%i\n", shader->outfile.reg[idx].reg,
92                (shader->stage == MESA_SHADER_VERTEX) ?
93                gl_varying_slot_name_for_stage(shader->outfile.reg[idx].slot, shader->stage) :
94                gl_frag_result_name(shader->outfile.reg[idx].slot),
95                shader->outfile.reg[idx].num_components);
96    }
97    printf("special:\n");
98    if (shader->stage == MESA_SHADER_VERTEX) {
99       printf("  vs_pos_out_reg=%i\n", shader->vs_pos_out_reg);
100       printf("  vs_pointsize_out_reg=%i\n", shader->vs_pointsize_out_reg);
101       printf("  vs_load_balancing=0x%08x\n", shader->vs_load_balancing);
102    } else {
103       for (int idx = 0; idx < ARRAY_SIZE(shader->ps_color_out_reg); idx++)
104          printf("  ps_color_out_reg[%u]=%i\n", idx, shader->ps_color_out_reg[idx]);
105 
106       printf("  ps_depth_out_reg=%i\n", shader->ps_depth_out_reg);
107    }
108    printf("  input_count_unk8=0x%08x\n", shader->input_count_unk8);
109 }
110 
111 /* Link vs and fs together: fill in shader_state from vs and fs
112  * as this function is called every time a new fs or vs is bound, the goal is to
113  * do little processing as possible here, and to precompute as much as possible in
114  * the vs/fs shader_object.
115  *
116  * XXX we could cache the link result for a certain set of VS/PS; usually a pair
117  * of VS and PS will be used together anyway.
118  */
119 static bool
etna_link_shaders(struct etna_context * ctx,struct compiled_shader_state * cs,struct etna_shader_variant * vs,struct etna_shader_variant * fs)120 etna_link_shaders(struct etna_context *ctx, struct compiled_shader_state *cs,
121                   struct etna_shader_variant *vs, struct etna_shader_variant *fs)
122 {
123    struct etna_shader_link_info link = { };
124 
125    assert(vs->stage == MESA_SHADER_VERTEX);
126    assert(fs->stage == MESA_SHADER_FRAGMENT);
127 
128    etna_link_shader(&link, vs, fs);
129 
130    if (DBG_ENABLED(ETNA_DBG_LINKER_MSGS)) {
131       debug_printf("link result:\n");
132       debug_printf("  vs  -> fs  comps use     pa_attr\n");
133 
134       for (int idx = 0; idx < link.num_varyings; ++idx)
135          debug_printf("  t%-2u -> t%-2u %-5.*s %u,%u,%u,%u 0x%08x\n",
136                       link.varyings[idx].reg, idx + 1,
137                       link.varyings[idx].num_components, "xyzw",
138                       link.varyings[idx].use[0], link.varyings[idx].use[1],
139                       link.varyings[idx].use[2], link.varyings[idx].use[3],
140                       link.varyings[idx].pa_attributes);
141    }
142 
143    /* set last_varying_2x flag if the last varying has 1 or 2 components */
144    bool last_varying_2x = false;
145    if (link.num_varyings > 0 && link.varyings[link.num_varyings - 1].num_components <= 2)
146       last_varying_2x = true;
147 
148    cs->RA_CONTROL = VIVS_RA_CONTROL_UNK0 |
149                     COND(last_varying_2x, VIVS_RA_CONTROL_LAST_VARYING_2X);
150 
151    cs->PA_ATTRIBUTE_ELEMENT_COUNT = VIVS_PA_ATTRIBUTE_ELEMENT_COUNT_COUNT(link.num_varyings);
152    STATIC_ASSERT(VIVS_PA_SHADER_ATTRIBUTES__LEN >= ETNA_NUM_VARYINGS);
153    for (int idx = 0; idx < link.num_varyings; ++idx)
154       cs->PA_SHADER_ATTRIBUTES[idx] = link.varyings[idx].pa_attributes;
155    cs->pa_shader_attributes_states = link.num_varyings;
156 
157    cs->VS_END_PC = vs->code_size / 4;
158    cs->VS_OUTPUT_COUNT = 1 + link.num_varyings; /* position + varyings */
159 
160    /* vs outputs (varyings) */
161    DEFINE_ETNA_BITARRAY(vs_output, ARRAY_SIZE(cs->VS_OUTPUT) * 4, 8) = {0};
162    int varid = 0;
163    etna_bitarray_set(vs_output, 8, varid++, vs->vs_pos_out_reg);
164    for (int idx = 0; idx < link.num_varyings; ++idx)
165       etna_bitarray_set(vs_output, 8, varid++, link.varyings[idx].reg);
166    if (vs->vs_pointsize_out_reg >= 0)
167       etna_bitarray_set(vs_output, 8, varid++, vs->vs_pointsize_out_reg); /* pointsize is last */
168 
169    for (int idx = 0; idx < ARRAY_SIZE(cs->VS_OUTPUT); ++idx)
170       cs->VS_OUTPUT[idx] = vs_output[idx];
171 
172    if (vs->vs_pointsize_out_reg != -1) {
173       /* vertex shader outputs point coordinate, provide extra output and make
174        * sure PA config is
175        * not masked */
176       cs->PA_CONFIG = ~0;
177       cs->VS_OUTPUT_COUNT_PSIZE = cs->VS_OUTPUT_COUNT + 1;
178    } else {
179       /* vertex shader does not output point coordinate, make sure thate
180        * POINT_SIZE_ENABLE is masked
181        * and no extra output is given */
182       cs->PA_CONFIG = ~VIVS_PA_CONFIG_POINT_SIZE_ENABLE;
183       cs->VS_OUTPUT_COUNT_PSIZE = cs->VS_OUTPUT_COUNT;
184    }
185 
186    /* if fragment shader doesn't read pointcoord, disable it */
187    if (link.pcoord_varying_comp_ofs == -1)
188       cs->PA_CONFIG &= ~VIVS_PA_CONFIG_POINT_SPRITE_ENABLE;
189 
190    cs->VS_LOAD_BALANCING = vs->vs_load_balancing;
191    cs->VS_START_PC = 0;
192 
193    cs->PS_END_PC = fs->code_size / 4;
194 
195    /* apply output remapping based on current framebuffer state */
196    int ps_color_out_reg[PIPE_MAX_COLOR_BUFS];
197 
198    for (unsigned i = 0; i < ARRAY_SIZE(ctx->framebuffer.ps_output_remap); i++)
199       ps_color_out_reg[i] = fs->ps_color_out_reg[ctx->framebuffer.ps_output_remap[i]];
200 
201    cs->PS_OUTPUT_REG[0] =
202       VIVS_PS_OUTPUT_REG_0(ps_color_out_reg[0]) |
203       VIVS_PS_OUTPUT_REG_1(ps_color_out_reg[1]) |
204       VIVS_PS_OUTPUT_REG_2(ps_color_out_reg[2]) |
205       VIVS_PS_OUTPUT_REG_3(ps_color_out_reg[3]);
206 
207    cs->PS_OUTPUT_REG[1] =
208       VIVS_PS_OUTPUT_REG2_4(ps_color_out_reg[4]) |
209       VIVS_PS_OUTPUT_REG2_5(ps_color_out_reg[5]) |
210       VIVS_PS_OUTPUT_REG2_6(ps_color_out_reg[6]) |
211       VIVS_PS_OUTPUT_REG2_7(ps_color_out_reg[7]);
212 
213    /* apply saturation information from current framebuffer state */
214    cs->PS_OUTPUT_REG[1] |= ctx->framebuffer.PS_OUTPUT_REG2;
215 
216    cs->PS_INPUT_COUNT =
217       VIVS_PS_INPUT_COUNT_COUNT(link.num_varyings + 1) | /* Number of inputs plus position */
218       VIVS_PS_INPUT_COUNT_UNK8(fs->input_count_unk8);
219    cs->PS_TEMP_REGISTER_CONTROL =
220       VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs->num_temps, link.num_varyings + 1));
221    cs->PS_START_PC = 0;
222 
223    /* Precompute PS_INPUT_COUNT and TEMP_REGISTER_CONTROL in the case of MSAA
224     * mode, avoids some fumbling in sync_context. */
225    /* MSAA adds another input */
226    cs->PS_INPUT_COUNT_MSAA =
227       VIVS_PS_INPUT_COUNT_COUNT(link.num_varyings + 2) |
228       VIVS_PS_INPUT_COUNT_UNK8(fs->input_count_unk8);
229    /* MSAA adds another temp */
230    cs->PS_TEMP_REGISTER_CONTROL_MSAA =
231       VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs->num_temps + 1, link.num_varyings + 2));
232 
233    uint32_t total_components = 0;
234    DEFINE_ETNA_BITARRAY(num_components, ETNA_NUM_VARYINGS, 4) = {0};
235    DEFINE_ETNA_BITARRAY(component_use, 4 * ETNA_NUM_VARYINGS, 2) = {0};
236    DEFINE_ETNA_BITARRAY(halti5_varying_semantic, 4 * 32, 4) = {0};
237    for (int idx = 0; idx < link.num_varyings; ++idx) {
238       const struct etna_varying *varying = &link.varyings[idx];
239 
240       etna_bitarray_set(num_components, 4, idx, varying->num_components);
241       for (int comp = 0; comp < varying->num_components; ++comp) {
242          if (ctx->screen->info->halti >= 5)
243             etna_bitarray_set(halti5_varying_semantic, 4, total_components, varying->semantic);
244          else
245             etna_bitarray_set(component_use, 2, total_components, varying->use[comp]);
246          total_components += 1;
247       }
248    }
249 
250    cs->GL_VARYING_TOTAL_COMPONENTS =
251       VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(align(total_components, 2));
252    memcpy(cs->GL_VARYING_NUM_COMPONENTS, num_components, sizeof(uint32_t) * 2);
253    memcpy(cs->GL_VARYING_COMPONENT_USE, component_use, sizeof(uint32_t) * 4);
254    memcpy(cs->GL_HALTI5_SHADER_ATTRIBUTES, halti5_varying_semantic,
255           sizeof(uint32_t) * VIVS_GL_HALTI5_SHADER_ATTRIBUTES__LEN);
256    cs->halti5_shader_attributes_states = DIV_ROUND_UP(total_components, 8);
257 
258    cs->GL_HALTI5_SH_SPECIALS =
259       0x7f7f0000 | /* unknown bits, probably other PS inputs */
260       /* pointsize is last (see above) */
261       VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT((vs->vs_pointsize_out_reg != -1) ?
262                                               cs->VS_OUTPUT_COUNT * 4 : 0x00) |
263       VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN((link.pcoord_varying_comp_ofs != -1) ?
264                                               link.pcoord_varying_comp_ofs : 0x7f);
265 
266    cs->writes_z = fs->ps_depth_out_reg >= 0;
267    cs->uses_discard = fs->uses_discard;
268 
269    /* reference instruction memory */
270    cs->vs_inst_mem_size = vs->code_size;
271    cs->VS_INST_MEM = vs->code;
272 
273    cs->ps_inst_mem_size = fs->code_size;
274    cs->PS_INST_MEM = fs->code;
275 
276    if (vs->needs_icache || fs->needs_icache) {
277       /* If either of the shaders needs ICACHE, we use it for both. It is
278        * either switched on or off for the entire shader processor.
279        */
280       if (!etna_icache_upload_shader(ctx, vs) ||
281           !etna_icache_upload_shader(ctx, fs)) {
282          assert(0);
283          return false;
284       }
285 
286       cs->VS_INST_ADDR.bo = vs->bo;
287       cs->VS_INST_ADDR.offset = 0;
288       cs->VS_INST_ADDR.flags = ETNA_RELOC_READ;
289       cs->PS_INST_ADDR.bo = fs->bo;
290       cs->PS_INST_ADDR.offset = 0;
291       cs->PS_INST_ADDR.flags = ETNA_RELOC_READ;
292    } else {
293       /* clear relocs */
294       memset(&cs->VS_INST_ADDR, 0, sizeof(cs->VS_INST_ADDR));
295       memset(&cs->PS_INST_ADDR, 0, sizeof(cs->PS_INST_ADDR));
296    }
297 
298    return true;
299 }
300 
301 bool
etna_shader_link(struct etna_context * ctx)302 etna_shader_link(struct etna_context *ctx)
303 {
304    if (!ctx->shader.vs || !ctx->shader.fs)
305       return false;
306 
307    /* re-link vs and fs if needed */
308    return etna_link_shaders(ctx, &ctx->shader_state, ctx->shader.vs, ctx->shader.fs);
309 }
310 
311 void
etna_destroy_shader(struct etna_shader_variant * shader)312 etna_destroy_shader(struct etna_shader_variant *shader)
313 {
314    assert(shader);
315 
316    FREE(shader->code);
317    FREE(shader->uniforms.data);
318    FREE(shader->uniforms.contents);
319    FREE(shader);
320 }
321 
322 static bool
etna_shader_update_vs_inputs(struct compiled_shader_state * cs,const struct etna_shader_variant * vs,const struct compiled_vertex_elements_state * ves)323 etna_shader_update_vs_inputs(struct compiled_shader_state *cs,
324                              const struct etna_shader_variant *vs,
325                              const struct compiled_vertex_elements_state *ves)
326 {
327    unsigned num_temps, cur_temp, num_vs_inputs;
328 
329    if (!vs)
330       return false;
331 
332    /* Number of vertex elements determines number of VS inputs. Otherwise,
333     * the GPU crashes. Allocate any unused vertex elements to VS temporary
334     * registers. */
335    num_vs_inputs = MAX2(ves->num_elements, vs->infile.num_reg);
336    if (num_vs_inputs != ves->num_elements) {
337       BUG("Number of elements %u does not match the number of VS inputs %zu",
338           ves->num_elements, vs->infile.num_reg);
339       return false;
340    }
341 
342    cur_temp = vs->num_temps;
343    num_temps = num_vs_inputs - vs->infile.num_reg + cur_temp;
344 
345    cs->VS_INPUT_COUNT = VIVS_VS_INPUT_COUNT_COUNT(num_vs_inputs) |
346                         VIVS_VS_INPUT_COUNT_UNK8(vs->input_count_unk8);
347    cs->VS_TEMP_REGISTER_CONTROL =
348       VIVS_VS_TEMP_REGISTER_CONTROL_NUM_TEMPS(num_temps);
349 
350    /* vs inputs (attributes) */
351    DEFINE_ETNA_BITARRAY(vs_input, 16, 8) = {0};
352    for (int idx = 0; idx < num_vs_inputs; ++idx) {
353       if (idx < vs->infile.num_reg)
354          etna_bitarray_set(vs_input, 8, idx, vs->infile.reg[idx].reg);
355       else
356          etna_bitarray_set(vs_input, 8, idx, cur_temp++);
357    }
358 
359    if (vs->vs_id_in_reg >= 0) {
360       cs->VS_INPUT_COUNT = VIVS_VS_INPUT_COUNT_COUNT(num_vs_inputs + 1) |
361                            VIVS_VS_INPUT_COUNT_UNK8(vs->input_count_unk8) |
362                            VIVS_VS_INPUT_COUNT_ID_ENABLE;
363 
364       etna_bitarray_set(vs_input, 8, num_vs_inputs, vs->vs_id_in_reg);
365 
366       cs->FE_HALTI5_ID_CONFIG =
367          VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_ENABLE |
368          VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_ENABLE |
369          VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_REG(vs->vs_id_in_reg * 4) |
370          VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_REG(vs->vs_id_in_reg * 4 + 1);
371    }
372 
373    for (int idx = 0; idx < ARRAY_SIZE(cs->VS_INPUT); ++idx)
374       cs->VS_INPUT[idx] = vs_input[idx];
375 
376    return true;
377 }
378 
379 static inline const char *
etna_shader_stage(struct etna_shader * shader)380 etna_shader_stage(struct etna_shader *shader)
381 {
382    switch (shader->nir->info.stage) {
383    case MESA_SHADER_VERTEX:     return "VERT";
384    case MESA_SHADER_FRAGMENT:   return "FRAG";
385    case MESA_SHADER_COMPUTE:    return "CL";
386    default:
387       unreachable("invalid type");
388       return NULL;
389    }
390 }
391 
392 static void
dump_shader_info(struct etna_shader_variant * v,struct util_debug_callback * debug)393 dump_shader_info(struct etna_shader_variant *v, struct util_debug_callback *debug)
394 {
395    if (!DBG_ENABLED(ETNA_DBG_SHADERDB))
396       return;
397 
398    util_debug_message(debug, SHADER_INFO,
399          "%s shader: %u instructions, %u temps, "
400          "%u immediates, %u loops",
401          etna_shader_stage(v->shader),
402          v->code_size / 4,
403          v->num_temps,
404          v->uniforms.count,
405          v->num_loops);
406 }
407 
408 bool
etna_shader_update_vertex(struct etna_context * ctx)409 etna_shader_update_vertex(struct etna_context *ctx)
410 {
411    return etna_shader_update_vs_inputs(&ctx->shader_state, ctx->shader.vs,
412                                        ctx->vertex_elements);
413 }
414 
415 static struct etna_shader_variant *
create_variant(struct etna_shader * shader,const struct etna_shader_key * const key)416 create_variant(struct etna_shader *shader,
417                const struct etna_shader_key* const key)
418 {
419    struct etna_shader_variant *v = CALLOC_STRUCT(etna_shader_variant);
420    int ret;
421 
422    if (!v)
423       return NULL;
424 
425    v->shader = shader;
426    v->key = *key;
427    v->id = ++shader->variant_count;
428 
429    if (etna_disk_cache_retrieve(shader->compiler, v))
430       return v;
431 
432    ret = etna_compile_shader(v);
433    if (!ret) {
434       debug_error("compile failed!");
435       goto fail;
436    }
437 
438    etna_disk_cache_store(shader->compiler, v);
439 
440    if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
441       etna_dump_shader(v);
442 
443    return v;
444 
445 fail:
446    FREE(v);
447    return NULL;
448 }
449 
450 struct etna_shader_variant *
etna_shader_variant(struct etna_shader * shader,const struct etna_shader_key * const key,struct util_debug_callback * debug,bool called_from_draw)451 etna_shader_variant(struct etna_shader *shader,
452                     const struct etna_shader_key* const key,
453                     struct util_debug_callback *debug,
454                     bool called_from_draw)
455 {
456    struct etna_shader_variant *v;
457 
458    assert(shader->specs->fragment_sampler_count <= ARRAY_SIZE(key->tex_swizzle));
459 
460    for (v = shader->variants; v; v = v->next)
461       if (etna_shader_key_equal(key, &v->key))
462          return v;
463 
464    /* compile new variant if it doesn't exist already */
465    v = create_variant(shader, key);
466    if (v) {
467       v->next = shader->variants;
468       shader->variants = v;
469       dump_shader_info(v, debug);
470    }
471 
472    if (called_from_draw) {
473       perf_debug_message(debug, SHADER_INFO,
474                          "%s shader: recompiling at draw time: global "
475                          "0x%08x\n",
476                          etna_shader_stage(shader), key->global);
477    }
478 
479    return v;
480 }
481 
482 /**
483  * Should initial variants be compiled synchronously?
484  *
485  * The only case where pipe_debug_message() is used in the initial-variants
486  * path is with ETNA_MESA_DEBUG=shaderdb. So if either debug is disabled (ie.
487  * debug.debug_message==NULL), or shaderdb stats are not enabled, we can
488  * compile the initial shader variant asynchronously.
489  */
490 static inline bool
initial_variants_synchronous(struct etna_context * ctx)491 initial_variants_synchronous(struct etna_context *ctx)
492 {
493    return unlikely(ctx->base.debug.debug_message) ||
494                    DBG_ENABLED(ETNA_DBG_SHADERDB) ||
495                    DBG_ENABLED(ETNA_DBG_DUMP_SHADERS);
496 }
497 
498 static void
create_initial_variants_async(void * job,void * gdata,int thread_index)499 create_initial_variants_async(void *job, void *gdata, int thread_index)
500 {
501    struct etna_shader *shader = job;
502    struct util_debug_callback debug = {};
503    static struct etna_shader_key key;
504 
505    etna_shader_variant(shader, &key, &debug, false);
506 }
507 
508 static void *
etna_create_shader_state(struct pipe_context * pctx,const struct pipe_shader_state * pss)509 etna_create_shader_state(struct pipe_context *pctx,
510                          const struct pipe_shader_state *pss)
511 {
512    struct etna_context *ctx = etna_context(pctx);
513    struct etna_screen *screen = ctx->screen;
514    struct etna_compiler *compiler = screen->compiler;
515    struct etna_shader *shader = CALLOC_STRUCT(etna_shader);
516 
517    if (!shader)
518       return NULL;
519 
520    shader->id = p_atomic_inc_return(&compiler->shader_count);
521    shader->info = screen->info;
522    shader->specs = &screen->specs;
523    shader->compiler = screen->compiler;
524    util_queue_fence_init(&shader->ready);
525 
526    shader->nir = (pss->type == PIPE_SHADER_IR_NIR) ? pss->ir.nir :
527                   tgsi_to_nir(pss->tokens, pctx->screen, false);
528 
529    etna_disk_cache_init_shader_key(compiler, shader);
530 
531    if (initial_variants_synchronous(ctx)) {
532       struct etna_shader_key key = {};
533       etna_shader_variant(shader, &key, &ctx->base.debug, false);
534    } else {
535       struct etna_screen *screen = ctx->screen;
536       util_queue_add_job(&screen->shader_compiler_queue, shader, &shader->ready,
537                          create_initial_variants_async, NULL, 0);
538    }
539 
540    return shader;
541 }
542 
543 static void
etna_delete_shader_state(struct pipe_context * pctx,void * ss)544 etna_delete_shader_state(struct pipe_context *pctx, void *ss)
545 {
546    struct etna_context *ctx = etna_context(pctx);
547    struct etna_screen *screen = ctx->screen;
548    struct etna_shader *shader = ss;
549    struct etna_shader_variant *v, *t;
550 
551    util_queue_drop_job(&screen->shader_compiler_queue, &shader->ready);
552 
553    v = shader->variants;
554    while (v) {
555       t = v;
556       v = v->next;
557       if (t->bo)
558          etna_bo_del(t->bo);
559 
560       etna_destroy_shader(t);
561    }
562 
563    ralloc_free(shader->nir);
564    util_queue_fence_destroy(&shader->ready);
565    FREE(shader);
566 }
567 
568 static void
etna_bind_fs_state(struct pipe_context * pctx,void * hwcso)569 etna_bind_fs_state(struct pipe_context *pctx, void *hwcso)
570 {
571    struct etna_context *ctx = etna_context(pctx);
572 
573    ctx->shader.bind_fs = hwcso;
574    ctx->dirty |= ETNA_DIRTY_SHADER;
575 }
576 
577 static void
etna_bind_vs_state(struct pipe_context * pctx,void * hwcso)578 etna_bind_vs_state(struct pipe_context *pctx, void *hwcso)
579 {
580    struct etna_context *ctx = etna_context(pctx);
581 
582    ctx->shader.bind_vs = hwcso;
583    ctx->dirty |= ETNA_DIRTY_SHADER;
584 }
585 
586 static void
etna_set_max_shader_compiler_threads(struct pipe_screen * pscreen,unsigned max_threads)587 etna_set_max_shader_compiler_threads(struct pipe_screen *pscreen,
588                                      unsigned max_threads)
589 {
590    struct etna_screen *screen = etna_screen(pscreen);
591 
592    util_queue_adjust_num_threads(&screen->shader_compiler_queue, max_threads,
593                                  false);
594 }
595 
596 static bool
etna_is_parallel_shader_compilation_finished(struct pipe_screen * pscreen,void * hwcso,enum pipe_shader_type shader_type)597 etna_is_parallel_shader_compilation_finished(struct pipe_screen *pscreen,
598                                              void *hwcso,
599                                              enum pipe_shader_type shader_type)
600 {
601    struct etna_shader *shader = (struct etna_shader *)hwcso;
602 
603    return util_queue_fence_is_signalled(&shader->ready);
604 }
605 
606 void
etna_shader_init(struct pipe_context * pctx)607 etna_shader_init(struct pipe_context *pctx)
608 {
609    pctx->create_fs_state = etna_create_shader_state;
610    pctx->bind_fs_state = etna_bind_fs_state;
611    pctx->delete_fs_state = etna_delete_shader_state;
612    pctx->create_vs_state = etna_create_shader_state;
613    pctx->bind_vs_state = etna_bind_vs_state;
614    pctx->delete_vs_state = etna_delete_shader_state;
615 }
616 
617 bool
etna_shader_screen_init(struct pipe_screen * pscreen)618 etna_shader_screen_init(struct pipe_screen *pscreen)
619 {
620    struct etna_screen *screen = etna_screen(pscreen);
621    unsigned num_threads = util_get_cpu_caps()->nr_cpus - 1;
622 
623    /* Create at least one thread - even on single core CPU systems. */
624    num_threads = MAX2(1, num_threads);
625 
626    screen->compiler = etna_compiler_create(pscreen->get_name(pscreen), screen->info);
627    if (!screen->compiler)
628       return false;
629 
630    pscreen->set_max_shader_compiler_threads = etna_set_max_shader_compiler_threads;
631    pscreen->is_parallel_shader_compilation_finished = etna_is_parallel_shader_compilation_finished;
632 
633    return util_queue_init(&screen->shader_compiler_queue, "sh", 64, num_threads,
634                           UTIL_QUEUE_INIT_RESIZE_IF_FULL | UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY,
635                           NULL);
636 }
637 
638 void
etna_shader_screen_fini(struct pipe_screen * pscreen)639 etna_shader_screen_fini(struct pipe_screen *pscreen)
640 {
641    struct etna_screen *screen = etna_screen(pscreen);
642 
643    util_queue_destroy(&screen->shader_compiler_queue);
644    etna_compiler_destroy(screen->compiler);
645 }
646