1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <laanwj@gmail.com>
25 */
26
27 #include "etnaviv_shader.h"
28
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_context.h"
31 #include "etnaviv_debug.h"
32 #include "etnaviv_disasm.h"
33 #include "etnaviv_disk_cache.h"
34 #include "etnaviv_screen.h"
35 #include "etnaviv_util.h"
36
37 #include "tgsi/tgsi_parse.h"
38 #include "nir/tgsi_to_nir.h"
39 #include "util/u_atomic.h"
40 #include "util/u_cpu_detect.h"
41 #include "util/u_math.h"
42 #include "util/u_memory.h"
43
44 /* Upload shader code to bo, if not already done */
etna_icache_upload_shader(struct etna_context * ctx,struct etna_shader_variant * v)45 static bool etna_icache_upload_shader(struct etna_context *ctx, struct etna_shader_variant *v)
46 {
47 if (v->bo)
48 return true;
49 v->bo = etna_bo_new(ctx->screen->dev, v->code_size*4, DRM_ETNA_GEM_CACHE_WC);
50 if (!v->bo)
51 return false;
52
53 void *buf = etna_bo_map(v->bo);
54 etna_bo_cpu_prep(v->bo, DRM_ETNA_PREP_WRITE);
55 memcpy(buf, v->code, v->code_size*4);
56 etna_bo_cpu_fini(v->bo);
57 DBG("Uploaded %s of %u words to bo %p", v->stage == MESA_SHADER_FRAGMENT ? "fs":"vs", v->code_size, v->bo);
58 return true;
59 }
60
61 extern const char *tgsi_swizzle_names[];
62 void
etna_dump_shader(const struct etna_shader_variant * shader)63 etna_dump_shader(const struct etna_shader_variant *shader)
64 {
65 if (shader->stage == MESA_SHADER_VERTEX)
66 printf("VERT\n");
67 else
68 printf("FRAG\n");
69
70 etna_disasm(shader->code, shader->code_size, PRINT_RAW);
71
72 printf("num loops: %i\n", shader->num_loops);
73 printf("num temps: %i\n", shader->num_temps);
74 printf("immediates:\n");
75 for (int idx = 0; idx < shader->uniforms.count; ++idx) {
76 printf(" [%i].%s = %f (0x%08x) (%d)\n",
77 idx / 4,
78 tgsi_swizzle_names[idx % 4],
79 *((float *)&shader->uniforms.data[idx]),
80 shader->uniforms.data[idx],
81 shader->uniforms.contents[idx]);
82 }
83 printf("inputs:\n");
84 for (int idx = 0; idx < shader->infile.num_reg; ++idx) {
85 printf(" [%i] name=%s comps=%i\n", shader->infile.reg[idx].reg,
86 (shader->stage == MESA_SHADER_VERTEX) ?
87 gl_vert_attrib_name(shader->infile.reg[idx].slot) :
88 gl_varying_slot_name_for_stage(shader->infile.reg[idx].slot, shader->stage),
89 shader->infile.reg[idx].num_components);
90 }
91 printf("outputs:\n");
92 for (int idx = 0; idx < shader->outfile.num_reg; ++idx) {
93 printf(" [%i] name=%s comps=%i\n", shader->outfile.reg[idx].reg,
94 (shader->stage == MESA_SHADER_VERTEX) ?
95 gl_varying_slot_name_for_stage(shader->outfile.reg[idx].slot, shader->stage) :
96 gl_frag_result_name(shader->outfile.reg[idx].slot),
97 shader->outfile.reg[idx].num_components);
98 }
99 printf("special:\n");
100 if (shader->stage == MESA_SHADER_VERTEX) {
101 printf(" vs_pos_out_reg=%i\n", shader->vs_pos_out_reg);
102 printf(" vs_pointsize_out_reg=%i\n", shader->vs_pointsize_out_reg);
103 printf(" vs_load_balancing=0x%08x\n", shader->vs_load_balancing);
104 } else {
105 printf(" ps_color_out_reg=%i\n", shader->ps_color_out_reg);
106 printf(" ps_depth_out_reg=%i\n", shader->ps_depth_out_reg);
107 }
108 printf(" input_count_unk8=0x%08x\n", shader->input_count_unk8);
109 }
110
111 /* Link vs and fs together: fill in shader_state from vs and fs
112 * as this function is called every time a new fs or vs is bound, the goal is to
113 * do little processing as possible here, and to precompute as much as possible in
114 * the vs/fs shader_object.
115 *
116 * XXX we could cache the link result for a certain set of VS/PS; usually a pair
117 * of VS and PS will be used together anyway.
118 */
119 static bool
etna_link_shaders(struct etna_context * ctx,struct compiled_shader_state * cs,struct etna_shader_variant * vs,struct etna_shader_variant * fs)120 etna_link_shaders(struct etna_context *ctx, struct compiled_shader_state *cs,
121 struct etna_shader_variant *vs, struct etna_shader_variant *fs)
122 {
123 struct etna_shader_link_info link = { };
124 bool failed;
125
126 assert(vs->stage == MESA_SHADER_VERTEX);
127 assert(fs->stage == MESA_SHADER_FRAGMENT);
128
129 #ifdef DEBUG
130 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS)) {
131 etna_dump_shader(vs);
132 etna_dump_shader(fs);
133 }
134 #endif
135
136 failed = etna_link_shader(&link, vs, fs);
137
138 if (failed) {
139 /* linking failed: some fs inputs do not have corresponding
140 * vs outputs */
141 assert(0);
142
143 return false;
144 }
145
146 if (DBG_ENABLED(ETNA_DBG_LINKER_MSGS)) {
147 debug_printf("link result:\n");
148 debug_printf(" vs -> fs comps use pa_attr\n");
149
150 for (int idx = 0; idx < link.num_varyings; ++idx)
151 debug_printf(" t%-2u -> t%-2u %-5.*s %u,%u,%u,%u 0x%08x\n",
152 link.varyings[idx].reg, idx + 1,
153 link.varyings[idx].num_components, "xyzw",
154 link.varyings[idx].use[0], link.varyings[idx].use[1],
155 link.varyings[idx].use[2], link.varyings[idx].use[3],
156 link.varyings[idx].pa_attributes);
157 }
158
159 /* set last_varying_2x flag if the last varying has 1 or 2 components */
160 bool last_varying_2x = false;
161 if (link.num_varyings > 0 && link.varyings[link.num_varyings - 1].num_components <= 2)
162 last_varying_2x = true;
163
164 cs->RA_CONTROL = VIVS_RA_CONTROL_UNK0 |
165 COND(last_varying_2x, VIVS_RA_CONTROL_LAST_VARYING_2X);
166
167 cs->PA_ATTRIBUTE_ELEMENT_COUNT = VIVS_PA_ATTRIBUTE_ELEMENT_COUNT_COUNT(link.num_varyings);
168 for (int idx = 0; idx < link.num_varyings; ++idx)
169 cs->PA_SHADER_ATTRIBUTES[idx] = link.varyings[idx].pa_attributes;
170
171 cs->VS_END_PC = vs->code_size / 4;
172 cs->VS_OUTPUT_COUNT = 1 + link.num_varyings; /* position + varyings */
173
174 /* vs outputs (varyings) */
175 DEFINE_ETNA_BITARRAY(vs_output, 16, 8) = {0};
176 int varid = 0;
177 etna_bitarray_set(vs_output, 8, varid++, vs->vs_pos_out_reg);
178 for (int idx = 0; idx < link.num_varyings; ++idx)
179 etna_bitarray_set(vs_output, 8, varid++, link.varyings[idx].reg);
180 if (vs->vs_pointsize_out_reg >= 0)
181 etna_bitarray_set(vs_output, 8, varid++, vs->vs_pointsize_out_reg); /* pointsize is last */
182
183 for (int idx = 0; idx < ARRAY_SIZE(cs->VS_OUTPUT); ++idx)
184 cs->VS_OUTPUT[idx] = vs_output[idx];
185
186 if (vs->vs_pointsize_out_reg != -1) {
187 /* vertex shader outputs point coordinate, provide extra output and make
188 * sure PA config is
189 * not masked */
190 cs->PA_CONFIG = ~0;
191 cs->VS_OUTPUT_COUNT_PSIZE = cs->VS_OUTPUT_COUNT + 1;
192 } else {
193 /* vertex shader does not output point coordinate, make sure thate
194 * POINT_SIZE_ENABLE is masked
195 * and no extra output is given */
196 cs->PA_CONFIG = ~VIVS_PA_CONFIG_POINT_SIZE_ENABLE;
197 cs->VS_OUTPUT_COUNT_PSIZE = cs->VS_OUTPUT_COUNT;
198 }
199
200 /* if fragment shader doesn't read pointcoord, disable it */
201 if (link.pcoord_varying_comp_ofs == -1)
202 cs->PA_CONFIG &= ~VIVS_PA_CONFIG_POINT_SPRITE_ENABLE;
203
204 cs->VS_LOAD_BALANCING = vs->vs_load_balancing;
205 cs->VS_START_PC = 0;
206
207 cs->PS_END_PC = fs->code_size / 4;
208 cs->PS_OUTPUT_REG = fs->ps_color_out_reg;
209 cs->PS_INPUT_COUNT =
210 VIVS_PS_INPUT_COUNT_COUNT(link.num_varyings + 1) | /* Number of inputs plus position */
211 VIVS_PS_INPUT_COUNT_UNK8(fs->input_count_unk8);
212 cs->PS_TEMP_REGISTER_CONTROL =
213 VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs->num_temps, link.num_varyings + 1));
214 cs->PS_START_PC = 0;
215
216 /* Precompute PS_INPUT_COUNT and TEMP_REGISTER_CONTROL in the case of MSAA
217 * mode, avoids some fumbling in sync_context. */
218 cs->PS_INPUT_COUNT_MSAA =
219 VIVS_PS_INPUT_COUNT_COUNT(link.num_varyings + 2) | /* MSAA adds another input */
220 VIVS_PS_INPUT_COUNT_UNK8(fs->input_count_unk8);
221 cs->PS_TEMP_REGISTER_CONTROL_MSAA =
222 VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs->num_temps, link.num_varyings + 2));
223
224 uint32_t total_components = 0;
225 DEFINE_ETNA_BITARRAY(num_components, ETNA_NUM_VARYINGS, 4) = {0};
226 DEFINE_ETNA_BITARRAY(component_use, 4 * ETNA_NUM_VARYINGS, 2) = {0};
227 for (int idx = 0; idx < link.num_varyings; ++idx) {
228 const struct etna_varying *varying = &link.varyings[idx];
229
230 etna_bitarray_set(num_components, 4, idx, varying->num_components);
231 for (int comp = 0; comp < varying->num_components; ++comp) {
232 etna_bitarray_set(component_use, 2, total_components, varying->use[comp]);
233 total_components += 1;
234 }
235 }
236
237 cs->GL_VARYING_TOTAL_COMPONENTS =
238 VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(align(total_components, 2));
239 cs->GL_VARYING_NUM_COMPONENTS[0] = num_components[0];
240 cs->GL_VARYING_NUM_COMPONENTS[1] = num_components[1];
241 cs->GL_VARYING_COMPONENT_USE[0] = component_use[0];
242 cs->GL_VARYING_COMPONENT_USE[1] = component_use[1];
243
244 cs->GL_HALTI5_SH_SPECIALS =
245 0x7f7f0000 | /* unknown bits, probably other PS inputs */
246 /* pointsize is last (see above) */
247 VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT((vs->vs_pointsize_out_reg != -1) ?
248 cs->VS_OUTPUT_COUNT * 4 : 0x00) |
249 VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN((link.pcoord_varying_comp_ofs != -1) ?
250 link.pcoord_varying_comp_ofs : 0x7f);
251
252 cs->writes_z = fs->ps_depth_out_reg >= 0;
253 cs->uses_discard = fs->uses_discard;
254
255 /* reference instruction memory */
256 cs->vs_inst_mem_size = vs->code_size;
257 cs->VS_INST_MEM = vs->code;
258
259 cs->ps_inst_mem_size = fs->code_size;
260 cs->PS_INST_MEM = fs->code;
261
262 if (vs->needs_icache || fs->needs_icache) {
263 /* If either of the shaders needs ICACHE, we use it for both. It is
264 * either switched on or off for the entire shader processor.
265 */
266 if (!etna_icache_upload_shader(ctx, vs) ||
267 !etna_icache_upload_shader(ctx, fs)) {
268 assert(0);
269 return false;
270 }
271
272 cs->VS_INST_ADDR.bo = vs->bo;
273 cs->VS_INST_ADDR.offset = 0;
274 cs->VS_INST_ADDR.flags = ETNA_RELOC_READ;
275 cs->PS_INST_ADDR.bo = fs->bo;
276 cs->PS_INST_ADDR.offset = 0;
277 cs->PS_INST_ADDR.flags = ETNA_RELOC_READ;
278 } else {
279 /* clear relocs */
280 memset(&cs->VS_INST_ADDR, 0, sizeof(cs->VS_INST_ADDR));
281 memset(&cs->PS_INST_ADDR, 0, sizeof(cs->PS_INST_ADDR));
282 }
283
284 return true;
285 }
286
287 bool
etna_shader_link(struct etna_context * ctx)288 etna_shader_link(struct etna_context *ctx)
289 {
290 if (!ctx->shader.vs || !ctx->shader.fs)
291 return false;
292
293 /* re-link vs and fs if needed */
294 return etna_link_shaders(ctx, &ctx->shader_state, ctx->shader.vs, ctx->shader.fs);
295 }
296
297 void
etna_destroy_shader(struct etna_shader_variant * shader)298 etna_destroy_shader(struct etna_shader_variant *shader)
299 {
300 assert(shader);
301
302 FREE(shader->code);
303 FREE(shader->uniforms.data);
304 FREE(shader->uniforms.contents);
305 FREE(shader);
306 }
307
308 static bool
etna_shader_update_vs_inputs(struct compiled_shader_state * cs,const struct etna_shader_variant * vs,const struct compiled_vertex_elements_state * ves)309 etna_shader_update_vs_inputs(struct compiled_shader_state *cs,
310 const struct etna_shader_variant *vs,
311 const struct compiled_vertex_elements_state *ves)
312 {
313 unsigned num_temps, cur_temp, num_vs_inputs;
314
315 if (!vs)
316 return false;
317
318 /* Number of vertex elements determines number of VS inputs. Otherwise,
319 * the GPU crashes. Allocate any unused vertex elements to VS temporary
320 * registers. */
321 num_vs_inputs = MAX2(ves->num_elements, vs->infile.num_reg);
322 if (num_vs_inputs != ves->num_elements) {
323 BUG("Number of elements %u does not match the number of VS inputs %zu",
324 ves->num_elements, vs->infile.num_reg);
325 return false;
326 }
327
328 cur_temp = vs->num_temps;
329 num_temps = num_vs_inputs - vs->infile.num_reg + cur_temp;
330
331 cs->VS_INPUT_COUNT = VIVS_VS_INPUT_COUNT_COUNT(num_vs_inputs) |
332 VIVS_VS_INPUT_COUNT_UNK8(vs->input_count_unk8);
333 cs->VS_TEMP_REGISTER_CONTROL =
334 VIVS_VS_TEMP_REGISTER_CONTROL_NUM_TEMPS(num_temps);
335
336 /* vs inputs (attributes) */
337 DEFINE_ETNA_BITARRAY(vs_input, 16, 8) = {0};
338 for (int idx = 0; idx < num_vs_inputs; ++idx) {
339 if (idx < vs->infile.num_reg)
340 etna_bitarray_set(vs_input, 8, idx, vs->infile.reg[idx].reg);
341 else
342 etna_bitarray_set(vs_input, 8, idx, cur_temp++);
343 }
344
345 if (vs->vs_id_in_reg >= 0) {
346 cs->VS_INPUT_COUNT = VIVS_VS_INPUT_COUNT_COUNT(num_vs_inputs + 1) |
347 VIVS_VS_INPUT_COUNT_UNK8(vs->input_count_unk8) |
348 VIVS_VS_INPUT_COUNT_ID_ENABLE;
349
350 etna_bitarray_set(vs_input, 8, num_vs_inputs, vs->vs_id_in_reg);
351
352 cs->FE_HALTI5_ID_CONFIG =
353 VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_ENABLE |
354 VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_ENABLE |
355 VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_REG(vs->vs_id_in_reg * 4) |
356 VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_REG(vs->vs_id_in_reg * 4 + 1);
357 }
358
359 for (int idx = 0; idx < ARRAY_SIZE(cs->VS_INPUT); ++idx)
360 cs->VS_INPUT[idx] = vs_input[idx];
361
362 return true;
363 }
364
365 static inline const char *
etna_shader_stage(struct etna_shader_variant * shader)366 etna_shader_stage(struct etna_shader_variant *shader)
367 {
368 switch (shader->stage) {
369 case MESA_SHADER_VERTEX: return "VERT";
370 case MESA_SHADER_FRAGMENT: return "FRAG";
371 case MESA_SHADER_COMPUTE: return "CL";
372 default:
373 unreachable("invalid type");
374 return NULL;
375 }
376 }
377
378 static void
dump_shader_info(struct etna_shader_variant * v,struct util_debug_callback * debug)379 dump_shader_info(struct etna_shader_variant *v, struct util_debug_callback *debug)
380 {
381 if (!unlikely(etna_mesa_debug & ETNA_DBG_SHADERDB))
382 return;
383
384 util_debug_message(debug, SHADER_INFO,
385 "%s shader: %u instructions, %u temps, "
386 "%u immediates, %u loops",
387 etna_shader_stage(v),
388 v->code_size,
389 v->num_temps,
390 v->uniforms.count,
391 v->num_loops);
392 }
393
394 bool
etna_shader_update_vertex(struct etna_context * ctx)395 etna_shader_update_vertex(struct etna_context *ctx)
396 {
397 return etna_shader_update_vs_inputs(&ctx->shader_state, ctx->shader.vs,
398 ctx->vertex_elements);
399 }
400
401 static struct etna_shader_variant *
create_variant(struct etna_shader * shader,struct etna_shader_key key)402 create_variant(struct etna_shader *shader, struct etna_shader_key key)
403 {
404 struct etna_shader_variant *v = CALLOC_STRUCT(etna_shader_variant);
405 int ret;
406
407 if (!v)
408 return NULL;
409
410 v->shader = shader;
411 v->key = key;
412 v->id = ++shader->variant_count;
413
414 if (etna_disk_cache_retrieve(shader->compiler, v))
415 return v;
416
417 ret = etna_compile_shader(v);
418 if (!ret) {
419 debug_error("compile failed!");
420 goto fail;
421 }
422
423 etna_disk_cache_store(shader->compiler, v);
424
425 return v;
426
427 fail:
428 FREE(v);
429 return NULL;
430 }
431
432 struct etna_shader_variant *
etna_shader_variant(struct etna_shader * shader,struct etna_shader_key key,struct util_debug_callback * debug)433 etna_shader_variant(struct etna_shader *shader, struct etna_shader_key key,
434 struct util_debug_callback *debug)
435 {
436 struct etna_shader_variant *v;
437
438 for (v = shader->variants; v; v = v->next)
439 if (etna_shader_key_equal(&key, &v->key))
440 return v;
441
442 /* compile new variant if it doesn't exist already */
443 v = create_variant(shader, key);
444 if (v) {
445 v->next = shader->variants;
446 shader->variants = v;
447 dump_shader_info(v, debug);
448 }
449
450 return v;
451 }
452
453 /**
454 * Should initial variants be compiled synchronously?
455 *
456 * The only case where pipe_debug_message() is used in the initial-variants
457 * path is with ETNA_MESA_DEBUG=shaderdb. So if either debug is disabled (ie.
458 * debug.debug_message==NULL), or shaderdb stats are not enabled, we can
459 * compile the initial shader variant asynchronously.
460 */
461 static inline bool
initial_variants_synchronous(struct etna_context * ctx)462 initial_variants_synchronous(struct etna_context *ctx)
463 {
464 return unlikely(ctx->debug.debug_message) || (etna_mesa_debug & ETNA_DBG_SHADERDB);
465 }
466
467 static void
create_initial_variants_async(void * job,void * gdata,int thread_index)468 create_initial_variants_async(void *job, void *gdata, int thread_index)
469 {
470 struct etna_shader *shader = job;
471 struct util_debug_callback debug = {};
472 static struct etna_shader_key key;
473
474 etna_shader_variant(shader, key, &debug);
475 }
476
477 static void *
etna_create_shader_state(struct pipe_context * pctx,const struct pipe_shader_state * pss)478 etna_create_shader_state(struct pipe_context *pctx,
479 const struct pipe_shader_state *pss)
480 {
481 struct etna_context *ctx = etna_context(pctx);
482 struct etna_screen *screen = ctx->screen;
483 struct etna_compiler *compiler = screen->compiler;
484 struct etna_shader *shader = CALLOC_STRUCT(etna_shader);
485
486 if (!shader)
487 return NULL;
488
489 shader->id = p_atomic_inc_return(&compiler->shader_count);
490 shader->specs = &screen->specs;
491 shader->compiler = screen->compiler;
492 util_queue_fence_init(&shader->ready);
493
494 shader->nir = (pss->type == PIPE_SHADER_IR_NIR) ? pss->ir.nir :
495 tgsi_to_nir(pss->tokens, pctx->screen, false);
496
497 etna_disk_cache_init_shader_key(compiler, shader);
498
499 if (initial_variants_synchronous(ctx)) {
500 struct etna_shader_key key = {};
501 etna_shader_variant(shader, key, &ctx->debug);
502 } else {
503 struct etna_screen *screen = ctx->screen;
504 util_queue_add_job(&screen->shader_compiler_queue, shader, &shader->ready,
505 create_initial_variants_async, NULL, 0);
506 }
507
508 return shader;
509 }
510
511 static void
etna_delete_shader_state(struct pipe_context * pctx,void * ss)512 etna_delete_shader_state(struct pipe_context *pctx, void *ss)
513 {
514 struct etna_context *ctx = etna_context(pctx);
515 struct etna_screen *screen = ctx->screen;
516 struct etna_shader *shader = ss;
517 struct etna_shader_variant *v, *t;
518
519 util_queue_drop_job(&screen->shader_compiler_queue, &shader->ready);
520
521 v = shader->variants;
522 while (v) {
523 t = v;
524 v = v->next;
525 if (t->bo)
526 etna_bo_del(t->bo);
527
528 etna_destroy_shader(t);
529 }
530
531 tgsi_free_tokens(shader->tokens);
532 ralloc_free(shader->nir);
533 util_queue_fence_destroy(&shader->ready);
534 FREE(shader);
535 }
536
537 static void
etna_bind_fs_state(struct pipe_context * pctx,void * hwcso)538 etna_bind_fs_state(struct pipe_context *pctx, void *hwcso)
539 {
540 struct etna_context *ctx = etna_context(pctx);
541
542 ctx->shader.bind_fs = hwcso;
543 ctx->dirty |= ETNA_DIRTY_SHADER;
544 }
545
546 static void
etna_bind_vs_state(struct pipe_context * pctx,void * hwcso)547 etna_bind_vs_state(struct pipe_context *pctx, void *hwcso)
548 {
549 struct etna_context *ctx = etna_context(pctx);
550
551 ctx->shader.bind_vs = hwcso;
552 ctx->dirty |= ETNA_DIRTY_SHADER;
553 }
554
555 static void
etna_set_max_shader_compiler_threads(struct pipe_screen * pscreen,unsigned max_threads)556 etna_set_max_shader_compiler_threads(struct pipe_screen *pscreen,
557 unsigned max_threads)
558 {
559 struct etna_screen *screen = etna_screen(pscreen);
560
561 util_queue_adjust_num_threads(&screen->shader_compiler_queue, max_threads);
562 }
563
564 static bool
etna_is_parallel_shader_compilation_finished(struct pipe_screen * pscreen,void * hwcso,enum pipe_shader_type shader_type)565 etna_is_parallel_shader_compilation_finished(struct pipe_screen *pscreen,
566 void *hwcso,
567 enum pipe_shader_type shader_type)
568 {
569 struct etna_shader *shader = (struct etna_shader *)hwcso;
570
571 return util_queue_fence_is_signalled(&shader->ready);
572 }
573
574 void
etna_shader_init(struct pipe_context * pctx)575 etna_shader_init(struct pipe_context *pctx)
576 {
577 pctx->create_fs_state = etna_create_shader_state;
578 pctx->bind_fs_state = etna_bind_fs_state;
579 pctx->delete_fs_state = etna_delete_shader_state;
580 pctx->create_vs_state = etna_create_shader_state;
581 pctx->bind_vs_state = etna_bind_vs_state;
582 pctx->delete_vs_state = etna_delete_shader_state;
583 }
584
585 bool
etna_shader_screen_init(struct pipe_screen * pscreen)586 etna_shader_screen_init(struct pipe_screen *pscreen)
587 {
588 struct etna_screen *screen = etna_screen(pscreen);
589 unsigned num_threads = util_get_cpu_caps()->nr_cpus - 1;
590
591 /* Create at least one thread - even on single core CPU systems. */
592 num_threads = MAX2(1, num_threads);
593
594 screen->compiler = etna_compiler_create(pscreen->get_name(pscreen), &screen->specs);
595 if (!screen->compiler)
596 return false;
597
598 pscreen->set_max_shader_compiler_threads = etna_set_max_shader_compiler_threads;
599 pscreen->is_parallel_shader_compilation_finished = etna_is_parallel_shader_compilation_finished;
600
601 return util_queue_init(&screen->shader_compiler_queue, "sh", 64, num_threads,
602 UTIL_QUEUE_INIT_RESIZE_IF_FULL | UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY,
603 NULL);
604 }
605
606 void
etna_shader_screen_fini(struct pipe_screen * pscreen)607 etna_shader_screen_fini(struct pipe_screen *pscreen)
608 {
609 struct etna_screen *screen = etna_screen(pscreen);
610
611 util_queue_destroy(&screen->shader_compiler_queue);
612 etna_compiler_destroy(screen->compiler);
613 }
614