• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012-2019 Etnaviv Project
3  * Copyright (c) 2019 Zodiac Inflight Innovations
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sub license,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jonathan Marek <jonathan@marek.ca>
26  *    Wladimir J. van der Laan <laanwj@gmail.com>
27  */
28 
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_compiler_nir.h"
31 #include "etnaviv_asm.h"
32 #include "etnaviv_context.h"
33 #include "etnaviv_debug.h"
34 #include "etnaviv_nir.h"
35 #include "etnaviv_uniforms.h"
36 #include "etnaviv_util.h"
37 #include "nir.h"
38 
39 #include <math.h>
40 #include "util/u_memory.h"
41 #include "util/register_allocate.h"
42 #include "compiler/nir/nir_builder.h"
43 
44 #include "util/compiler.h"
45 #include "util/half_float.h"
46 
47 static bool
etna_alu_to_scalar_filter_cb(const nir_instr * instr,const void * data)48 etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
49 {
50    const struct etna_specs *specs = data;
51 
52    if (instr->type != nir_instr_type_alu)
53       return false;
54 
55    nir_alu_instr *alu = nir_instr_as_alu(instr);
56    switch (alu->op) {
57    case nir_op_frsq:
58    case nir_op_frcp:
59    case nir_op_flog2:
60    case nir_op_fexp2:
61    case nir_op_fsqrt:
62    case nir_op_fcos:
63    case nir_op_fsin:
64    case nir_op_fdiv:
65    case nir_op_imul:
66       return true;
67    /* TODO: can do better than alu_to_scalar for vector compares */
68    case nir_op_b32all_fequal2:
69    case nir_op_b32all_fequal3:
70    case nir_op_b32all_fequal4:
71    case nir_op_b32any_fnequal2:
72    case nir_op_b32any_fnequal3:
73    case nir_op_b32any_fnequal4:
74    case nir_op_b32all_iequal2:
75    case nir_op_b32all_iequal3:
76    case nir_op_b32all_iequal4:
77    case nir_op_b32any_inequal2:
78    case nir_op_b32any_inequal3:
79    case nir_op_b32any_inequal4:
80       return true;
81    case nir_op_fdot2:
82       if (!specs->has_halti2_instructions)
83          return true;
84       break;
85    default:
86       break;
87    }
88 
89    return false;
90 }
91 
92 static void
etna_emit_block_start(struct etna_compile * c,unsigned block)93 etna_emit_block_start(struct etna_compile *c, unsigned block)
94 {
95    c->block_ptr[block] = c->inst_ptr;
96 }
97 
98 static void
etna_emit_output(struct etna_compile * c,nir_variable * var,struct etna_inst_src src)99 etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
100 {
101    struct etna_shader_io_file *sf = &c->variant->outfile;
102 
103    if (is_fs(c)) {
104       switch (var->data.location) {
105       case FRAG_RESULT_COLOR:
106       case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
107          c->variant->ps_color_out_reg = src.reg;
108          break;
109       case FRAG_RESULT_DEPTH:
110          c->variant->ps_depth_out_reg = src.reg;
111          break;
112       default:
113          unreachable("Unsupported fs output");
114       }
115       return;
116    }
117 
118    switch (var->data.location) {
119    case VARYING_SLOT_POS:
120       c->variant->vs_pos_out_reg = src.reg;
121       break;
122    case VARYING_SLOT_PSIZ:
123       c->variant->vs_pointsize_out_reg = src.reg;
124       break;
125    default:
126       assert(sf->num_reg < ETNA_NUM_INPUTS);
127       sf->reg[sf->num_reg].reg = src.reg;
128       sf->reg[sf->num_reg].slot = var->data.location;
129       sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
130       sf->num_reg++;
131       break;
132    }
133 }
134 
135 #define OPT(nir, pass, ...) ({                             \
136    bool this_progress = false;                             \
137    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
138    this_progress;                                          \
139 })
140 
141 static void
etna_optimize_loop(nir_shader * s)142 etna_optimize_loop(nir_shader *s)
143 {
144    bool progress;
145    do {
146       progress = false;
147 
148       NIR_PASS_V(s, nir_lower_vars_to_ssa);
149       progress |= OPT(s, nir_opt_copy_prop_vars);
150       progress |= OPT(s, nir_opt_shrink_stores, true);
151       progress |= OPT(s, nir_opt_shrink_vectors);
152       progress |= OPT(s, nir_copy_prop);
153       progress |= OPT(s, nir_opt_dce);
154       progress |= OPT(s, nir_opt_cse);
155       progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
156       progress |= OPT(s, nir_opt_intrinsics);
157       progress |= OPT(s, nir_opt_algebraic);
158       progress |= OPT(s, nir_opt_constant_folding);
159       progress |= OPT(s, nir_opt_dead_cf);
160       if (OPT(s, nir_opt_loop)) {
161          progress = true;
162          /* If nir_opt_loop makes progress, then we need to clean
163           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
164           * to make progress.
165           */
166          OPT(s, nir_copy_prop);
167          OPT(s, nir_opt_dce);
168       }
169       progress |= OPT(s, nir_opt_loop_unroll);
170       progress |= OPT(s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
171       progress |= OPT(s, nir_opt_remove_phis);
172       progress |= OPT(s, nir_opt_undef);
173    }
174    while (progress);
175 }
176 
177 static int
etna_glsl_type_size(const struct glsl_type * type,bool bindless)178 etna_glsl_type_size(const struct glsl_type *type, bool bindless)
179 {
180    return glsl_count_attribute_slots(type, false);
181 }
182 
183 static void
copy_uniform_state_to_shader(struct etna_shader_variant * sobj,uint64_t * consts,unsigned count)184 copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
185 {
186    struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
187 
188    uinfo->count = count * 4;
189    uinfo->data = MALLOC(uinfo->count * sizeof(*uinfo->data));
190    uinfo->contents = MALLOC(uinfo->count * sizeof(*uinfo->contents));
191 
192    for (unsigned i = 0; i < uinfo->count; i++) {
193       uinfo->data[i] = consts[i];
194       uinfo->contents[i] = consts[i] >> 32;
195    }
196 
197    etna_set_shader_uniforms_dirty_flags(sobj);
198 }
199 
200 #define ALU_SWIZ(s) INST_SWIZ((s)->swizzle[0], (s)->swizzle[1], (s)->swizzle[2], (s)->swizzle[3])
201 #define SRC_DISABLE ((hw_src){})
202 #define SRC_CONST(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_UNIFORM_0, .reg=idx, .swiz=s})
203 #define SRC_REG(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_TEMP, .reg=idx, .swiz=s})
204 
205 typedef struct etna_inst_dst hw_dst;
206 typedef struct etna_inst_src hw_src;
207 
208 static inline hw_src
src_swizzle(hw_src src,unsigned swizzle)209 src_swizzle(hw_src src, unsigned swizzle)
210 {
211    if (src.rgroup != INST_RGROUP_IMMEDIATE)
212       src.swiz = inst_swiz_compose(src.swiz, swizzle);
213 
214    return src;
215 }
216 
217 /* constants are represented as 64-bit ints
218  * 32-bit for the value and 32-bit for the type (imm, uniform, etc)
219  */
220 
221 #define CONST_VAL(a, b) (nir_const_value) {.u64 = (uint64_t)(a) << 32 | (uint64_t)(b)}
222 #define CONST(x) CONST_VAL(ETNA_UNIFORM_CONSTANT, x)
223 #define UNIFORM(x) CONST_VAL(ETNA_UNIFORM_UNIFORM, x)
224 #define TEXSCALE(x, i) CONST_VAL(ETNA_UNIFORM_TEXRECT_SCALE_X + (i), x)
225 #define TEXSIZE(x, i) CONST_VAL(ETNA_UNIFORM_TEXTURE_WIDTH + (i), x)
226 
227 static int
const_add(uint64_t * c,uint64_t value)228 const_add(uint64_t *c, uint64_t value)
229 {
230    for (unsigned i = 0; i < 4; i++) {
231       if (c[i] == value || !c[i]) {
232          c[i] = value;
233          return i;
234       }
235    }
236    return -1;
237 }
238 
239 static hw_src
const_src(struct etna_compile * c,nir_const_value * value,unsigned num_components)240 const_src(struct etna_compile *c, nir_const_value *value, unsigned num_components)
241 {
242    /* use inline immediates if possible */
243    if (c->specs->halti >= 2 && num_components == 1 &&
244        value[0].u64 >> 32 == ETNA_UNIFORM_CONSTANT) {
245       uint32_t bits = value[0].u32;
246 
247       /* "float" - shifted by 12 */
248       if ((bits & 0xfff) == 0)
249          return etna_immediate_src(0, bits >> 12);
250 
251       /* "unsigned" - raw 20 bit value */
252       if (bits < (1 << 20))
253          return etna_immediate_src(2, bits);
254 
255       /* "signed" - sign extended 20-bit (sign included) value */
256       if (bits >= 0xfff80000)
257          return etna_immediate_src(1, bits);
258    }
259 
260    unsigned i;
261    int swiz = -1;
262    for (i = 0; swiz < 0; i++) {
263       uint64_t *a = &c->consts[i*4];
264       uint64_t save[4];
265       memcpy(save, a, sizeof(save));
266       swiz = 0;
267       for (unsigned j = 0; j < num_components; j++) {
268          int c = const_add(a, value[j].u64);
269          if (c < 0) {
270             memcpy(a, save, sizeof(save));
271             swiz = -1;
272             break;
273          }
274          swiz |= c << j * 2;
275       }
276    }
277 
278    assert(i <= ETNA_MAX_IMM / 4);
279    c->const_count = MAX2(c->const_count, i);
280 
281    return SRC_CONST(i - 1, swiz);
282 }
283 
284 /* how to swizzle when used as a src */
285 static const uint8_t
286 reg_swiz[NUM_REG_TYPES] = {
287    [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
288    [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
289    [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(Y, Y, Y, Y),
290    [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
291    [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
292    [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
293    [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(Z, Z, Z, Z),
294    [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, Z, X, Z),
295    [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(Y, Z, Y, Z),
296    [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(Y, Z, Y, Z),
297    [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
298    [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
299    [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(W, W, W, W),
300    [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, W, X, W),
301    [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(Y, W, Y, W),
302    [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, W, X),
303    [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(Z, W, Z, W),
304    [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(Z, W, Z, W),
305    [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(Z, W, Z, W),
306    [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Z, W, X),
307    [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(Y, Z, W, X),
308    [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(Y, Z, W, X),
309 };
310 
311 /* how to swizzle when used as a dest */
312 static const uint8_t
313 reg_dst_swiz[NUM_REG_TYPES] = {
314    [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
315    [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
316    [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(X, X, X, X),
317    [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
318    [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
319    [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
320    [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(X, X, X, X),
321    [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, X, Y, Y),
322    [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(X, X, Y, Y),
323    [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(X, X, Y, Y),
324    [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
325    [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
326    [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(X, X, X, X),
327    [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, X, Y, Y),
328    [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(X, X, Y, Y),
329    [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, Z, Z),
330    [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(X, X, X, Y),
331    [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(X, X, X, Y),
332    [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(X, X, X, Y),
333    [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Y, Y, Z),
334    [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(X, X, Y, Z),
335    [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(X, X, Y, Z),
336 };
337 
338 /* nir_src to allocated register */
339 static hw_src
ra_src(struct etna_compile * c,nir_src * src)340 ra_src(struct etna_compile *c, nir_src *src)
341 {
342    unsigned reg = ra_get_node_reg(c->g, c->live_map[src_index(c->impl, src)]);
343    return SRC_REG(reg_get_base(c, reg), reg_swiz[reg_get_type(reg)]);
344 }
345 
346 static hw_src
get_src(struct etna_compile * c,nir_src * src)347 get_src(struct etna_compile *c, nir_src *src)
348 {
349    nir_instr *instr = src->ssa->parent_instr;
350 
351    if (instr->pass_flags & BYPASS_SRC) {
352       assert(instr->type == nir_instr_type_alu);
353       nir_alu_instr *alu = nir_instr_as_alu(instr);
354       assert(alu->op == nir_op_mov);
355       return src_swizzle(get_src(c, &alu->src[0].src), ALU_SWIZ(&alu->src[0]));
356    }
357 
358    switch (instr->type) {
359    case nir_instr_type_load_const:
360       return const_src(c, nir_instr_as_load_const(instr)->value, src->ssa->num_components);
361    case nir_instr_type_intrinsic: {
362       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
363       switch (intr->intrinsic) {
364       case nir_intrinsic_load_input:
365       case nir_intrinsic_load_instance_id:
366       case nir_intrinsic_load_uniform:
367       case nir_intrinsic_load_ubo:
368       case nir_intrinsic_load_reg:
369          return ra_src(c, src);
370       case nir_intrinsic_load_front_face:
371          return (hw_src) { .use = 1, .rgroup = INST_RGROUP_INTERNAL };
372       case nir_intrinsic_load_frag_coord:
373          return SRC_REG(0, INST_SWIZ_IDENTITY);
374       case nir_intrinsic_load_texture_scale: {
375          int sampler = nir_src_as_int(intr->src[0]);
376          nir_const_value values[] = {
377             TEXSCALE(sampler, 0),
378             TEXSCALE(sampler, 1),
379          };
380 
381          return src_swizzle(const_src(c, values, 2), SWIZZLE(X,Y,X,X));
382       }
383       case nir_intrinsic_load_texture_size_etna: {
384          int sampler = nir_src_as_int(intr->src[0]);
385          nir_const_value values[] = {
386             TEXSIZE(sampler, 0),
387             TEXSIZE(sampler, 1),
388             TEXSIZE(sampler, 2),
389          };
390 
391          return src_swizzle(const_src(c, values, 3), SWIZZLE(X,Y,Z,X));
392       }
393       default:
394          compile_error(c, "Unhandled NIR intrinsic type: %s\n",
395                        nir_intrinsic_infos[intr->intrinsic].name);
396          break;
397       }
398    } break;
399    case nir_instr_type_alu:
400    case nir_instr_type_tex:
401       return ra_src(c, src);
402    case nir_instr_type_undef: {
403       /* return zero to deal with broken Blur demo */
404       nir_const_value value = CONST(0);
405       return src_swizzle(const_src(c, &value, 1), SWIZZLE(X,X,X,X));
406    }
407    default:
408       compile_error(c, "Unhandled NIR instruction type: %d\n", instr->type);
409       break;
410    }
411 
412    return SRC_DISABLE;
413 }
414 
415 static bool
vec_dest_has_swizzle(nir_alu_instr * vec,nir_def * ssa)416 vec_dest_has_swizzle(nir_alu_instr *vec, nir_def *ssa)
417 {
418    for (unsigned i = 0; i < vec->def.num_components; i++) {
419       if (vec->src[i].src.ssa != ssa)
420          continue;
421 
422       if (vec->src[i].swizzle[0] != i)
423          return true;
424    }
425 
426    /* don't deal with possible bypassed vec/mov chain */
427    nir_foreach_use(use_src, ssa) {
428       nir_instr *instr = nir_src_parent_instr(use_src);
429       if (instr->type != nir_instr_type_alu)
430          continue;
431 
432       nir_alu_instr *alu = nir_instr_as_alu(instr);
433 
434       switch (alu->op) {
435       case nir_op_mov:
436       case nir_op_vec2:
437       case nir_op_vec3:
438       case nir_op_vec4:
439          return true;
440       default:
441          break;
442       }
443    }
444    return false;
445 }
446 
447 /* get allocated dest register for nir_def
448  * *p_swiz tells how the components need to be placed into register
449  */
450 static hw_dst
ra_def(struct etna_compile * c,nir_def * def,unsigned * p_swiz)451 ra_def(struct etna_compile *c, nir_def *def, unsigned *p_swiz)
452 {
453    unsigned swiz = INST_SWIZ_IDENTITY, mask = 0xf;
454    def = real_def(def, &swiz, &mask);
455 
456    unsigned r = ra_get_node_reg(c->g, c->live_map[def_index(c->impl, def)]);
457    unsigned t = reg_get_type(r);
458 
459    *p_swiz = inst_swiz_compose(swiz, reg_dst_swiz[t]);
460 
461    return (hw_dst) {
462       .use = 1,
463       .reg = reg_get_base(c, r),
464       .write_mask = inst_write_mask_compose(mask, reg_writemask[t]),
465    };
466 }
467 
468 static void
emit_alu(struct etna_compile * c,nir_alu_instr * alu)469 emit_alu(struct etna_compile *c, nir_alu_instr * alu)
470 {
471    const nir_op_info *info = &nir_op_infos[alu->op];
472 
473    /* marked as dead instruction (vecN and other bypassed instr) */
474    if (is_dead_instruction(&alu->instr))
475       return;
476 
477    assert(!(alu->op >= nir_op_vec2 && alu->op <= nir_op_vec4));
478 
479    unsigned dst_swiz;
480    hw_dst dst = ra_def(c, &alu->def, &dst_swiz);
481 
482    switch (alu->op) {
483    case nir_op_fdot2:
484    case nir_op_fdot3:
485    case nir_op_fdot4:
486       /* not per-component - don't compose dst_swiz */
487       dst_swiz = INST_SWIZ_IDENTITY;
488       break;
489    default:
490       break;
491    }
492 
493    hw_src srcs[3];
494 
495    for (int i = 0; i < info->num_inputs; i++) {
496       nir_alu_src *asrc = &alu->src[i];
497       hw_src src;
498 
499       src = src_swizzle(get_src(c, &asrc->src), ALU_SWIZ(asrc));
500       src = src_swizzle(src, dst_swiz);
501 
502       if (src.rgroup != INST_RGROUP_IMMEDIATE) {
503          src.neg = is_src_mod_neg(&alu->instr, i) || (alu->op == nir_op_fneg);
504          src.abs = is_src_mod_abs(&alu->instr, i) || (alu->op == nir_op_fabs);
505       } else {
506          assert(alu->op != nir_op_fabs);
507          assert(!is_src_mod_abs(&alu->instr, i) && alu->op != nir_op_fabs);
508 
509          if (src.imm_type > 0)
510             assert(!is_src_mod_neg(&alu->instr, i));
511 
512          if (is_src_mod_neg(&alu->instr, i) && src.imm_type == 0)
513             src.imm_val ^= 0x80000;
514       }
515 
516       srcs[i] = src;
517    }
518 
519    etna_emit_alu(c, alu->op, dst, srcs, alu->op == nir_op_fsat);
520 }
521 
522 static void
emit_tex(struct etna_compile * c,nir_tex_instr * tex)523 emit_tex(struct etna_compile *c, nir_tex_instr * tex)
524 {
525    unsigned dst_swiz;
526    hw_dst dst = ra_def(c, &tex->def, &dst_swiz);
527    nir_src *coord = NULL, *src1 = NULL, *src2 = NULL;
528 
529    for (unsigned i = 0; i < tex->num_srcs; i++) {
530       switch (tex->src[i].src_type) {
531       case nir_tex_src_coord:
532          coord = &tex->src[i].src;
533          break;
534       case nir_tex_src_bias:
535       case nir_tex_src_lod:
536       case nir_tex_src_ddx:
537          assert(!src1);
538          src1 = &tex->src[i].src;
539          break;
540       case nir_tex_src_comparator:
541       case nir_tex_src_ddy:
542          src2 = &tex->src[i].src;
543          break;
544       default:
545          compile_error(c, "Unhandled NIR tex src type: %d\n",
546                        tex->src[i].src_type);
547          break;
548       }
549    }
550 
551    etna_emit_tex(c, tex->op, tex->sampler_index, dst_swiz, dst, get_src(c, coord),
552                  src1 ? get_src(c, src1) : SRC_DISABLE,
553                  src2 ? get_src(c, src2) : SRC_DISABLE);
554 }
555 
556 static void
emit_intrinsic(struct etna_compile * c,nir_intrinsic_instr * intr)557 emit_intrinsic(struct etna_compile *c, nir_intrinsic_instr * intr)
558 {
559    switch (intr->intrinsic) {
560    case nir_intrinsic_store_deref:
561       etna_emit_output(c, nir_src_as_deref(intr->src[0])->var, get_src(c, &intr->src[1]));
562       break;
563    case nir_intrinsic_discard_if:
564       etna_emit_discard(c, get_src(c, &intr->src[0]));
565       break;
566    case nir_intrinsic_discard:
567       etna_emit_discard(c, SRC_DISABLE);
568       break;
569    case nir_intrinsic_load_uniform: {
570       unsigned dst_swiz;
571       struct etna_inst_dst dst = ra_def(c, &intr->def, &dst_swiz);
572 
573       /* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */
574       emit_inst(c, &(struct etna_inst) {
575          .opcode = INST_OPCODE_MOVAR,
576          .dst.write_mask = 0x1,
577          .src[2] = get_src(c, &intr->src[0]),
578       });
579       emit_inst(c, &(struct etna_inst) {
580          .opcode = INST_OPCODE_MOV,
581          .dst = dst,
582          .src[2] = {
583             .use = 1,
584             .rgroup = INST_RGROUP_UNIFORM_0,
585             .reg = nir_intrinsic_base(intr),
586             .swiz = dst_swiz,
587             .amode = INST_AMODE_ADD_A_X,
588          },
589       });
590    } break;
591    case nir_intrinsic_load_ubo: {
592       /* TODO: if offset is of the form (x + C) then add C to the base instead */
593       unsigned idx = nir_src_as_const_value(intr->src[0])[0].u32;
594       unsigned dst_swiz;
595       emit_inst(c, &(struct etna_inst) {
596          .opcode = INST_OPCODE_LOAD,
597          .type = INST_TYPE_U32,
598          .dst = ra_def(c, &intr->def, &dst_swiz),
599          .src[0] = get_src(c, &intr->src[1]),
600          .src[1] = const_src(c, &CONST_VAL(ETNA_UNIFORM_UBO0_ADDR + idx, 0), 1),
601       });
602    } break;
603    case nir_intrinsic_load_front_face:
604    case nir_intrinsic_load_frag_coord:
605       break;
606    case nir_intrinsic_load_input:
607    case nir_intrinsic_load_instance_id:
608    case nir_intrinsic_load_texture_scale:
609    case nir_intrinsic_load_texture_size_etna:
610    case nir_intrinsic_decl_reg:
611    case nir_intrinsic_load_reg:
612    case nir_intrinsic_store_reg:
613       break;
614    default:
615       compile_error(c, "Unhandled NIR intrinsic type: %s\n",
616                     nir_intrinsic_infos[intr->intrinsic].name);
617    }
618 }
619 
620 static void
emit_instr(struct etna_compile * c,nir_instr * instr)621 emit_instr(struct etna_compile *c, nir_instr * instr)
622 {
623    switch (instr->type) {
624    case nir_instr_type_alu:
625       emit_alu(c, nir_instr_as_alu(instr));
626       break;
627    case nir_instr_type_tex:
628       emit_tex(c, nir_instr_as_tex(instr));
629       break;
630    case nir_instr_type_intrinsic:
631       emit_intrinsic(c, nir_instr_as_intrinsic(instr));
632       break;
633    case nir_instr_type_jump:
634       assert(nir_instr_is_last(instr));
635       break;
636    case nir_instr_type_load_const:
637    case nir_instr_type_undef:
638    case nir_instr_type_deref:
639       break;
640    default:
641       compile_error(c, "Unhandled NIR instruction type: %d\n", instr->type);
642       break;
643    }
644 }
645 
646 static void
emit_block(struct etna_compile * c,nir_block * block)647 emit_block(struct etna_compile *c, nir_block * block)
648 {
649    etna_emit_block_start(c, block->index);
650 
651    nir_foreach_instr(instr, block)
652       emit_instr(c, instr);
653 
654    /* succs->index < block->index is for the loop case  */
655    nir_block *succs = block->successors[0];
656    if (nir_block_ends_in_jump(block) || succs->index < block->index)
657       etna_emit_jump(c, succs->index, SRC_DISABLE);
658 }
659 
660 static void
661 emit_cf_list(struct etna_compile *c, struct exec_list *list);
662 
663 static void
emit_if(struct etna_compile * c,nir_if * nif)664 emit_if(struct etna_compile *c, nir_if * nif)
665 {
666    etna_emit_jump(c, nir_if_first_else_block(nif)->index, get_src(c, &nif->condition));
667    emit_cf_list(c, &nif->then_list);
668 
669    /* jump at end of then_list to skip else_list
670     * not needed if then_list already ends with a jump or else_list is empty
671     */
672    if (!nir_block_ends_in_jump(nir_if_last_then_block(nif)) &&
673        !nir_cf_list_is_empty_block(&nif->else_list))
674       etna_emit_jump(c, nir_if_last_then_block(nif)->successors[0]->index, SRC_DISABLE);
675 
676    emit_cf_list(c, &nif->else_list);
677 }
678 
679 static void
emit_cf_list(struct etna_compile * c,struct exec_list * list)680 emit_cf_list(struct etna_compile *c, struct exec_list *list)
681 {
682    foreach_list_typed(nir_cf_node, node, node, list) {
683       switch (node->type) {
684       case nir_cf_node_block:
685          emit_block(c, nir_cf_node_as_block(node));
686          break;
687       case nir_cf_node_if:
688          emit_if(c, nir_cf_node_as_if(node));
689          break;
690       case nir_cf_node_loop:
691          assert(!nir_loop_has_continue_construct(nir_cf_node_as_loop(node)));
692          emit_cf_list(c, &nir_cf_node_as_loop(node)->body);
693          break;
694       default:
695          compile_error(c, "Unknown NIR node type\n");
696          break;
697       }
698    }
699 }
700 
701 /* based on nir_lower_vec_to_movs */
702 static unsigned
insert_vec_mov(nir_alu_instr * vec,unsigned start_idx,nir_shader * shader)703 insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
704 {
705    assert(start_idx < nir_op_infos[vec->op].num_inputs);
706    unsigned write_mask = (1u << start_idx);
707 
708    nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
709    nir_alu_src_copy(&mov->src[0], &vec->src[start_idx]);
710 
711    mov->src[0].swizzle[0] = vec->src[start_idx].swizzle[0];
712 
713    if (is_src_mod_neg(&vec->instr, start_idx))
714       set_src_mod_neg(&mov->instr, 0);
715 
716    if (is_src_mod_abs(&vec->instr, start_idx))
717       set_src_mod_abs(&mov->instr, 0);
718 
719    unsigned num_components = 1;
720 
721    for (unsigned i = start_idx + 1; i < vec->def.num_components; i++) {
722       if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
723          is_src_mod_neg(&vec->instr, i) == is_src_mod_neg(&vec->instr, start_idx) &&
724          is_src_mod_abs(&vec->instr, i) == is_src_mod_neg(&vec->instr, start_idx)) {
725          write_mask |= (1 << i);
726          mov->src[0].swizzle[num_components] = vec->src[i].swizzle[0];
727          num_components++;
728       }
729    }
730 
731    nir_def_init(&mov->instr, &mov->def, num_components, 32);
732 
733    /* replace vec srcs with inserted mov */
734    for (unsigned i = 0, j = 0; i < 4; i++) {
735       if (!(write_mask & (1 << i)))
736          continue;
737 
738       nir_src_rewrite(&vec->src[i].src, &mov->def);
739       vec->src[i].swizzle[0] = j++;
740    }
741 
742    nir_instr_insert_before(&vec->instr, &mov->instr);
743 
744    return write_mask;
745 }
746 
747 /*
748  * Get the nir_const_value from an alu src.  Also look at
749  * the parent instruction as it could be a fabs/fneg.
750  */
get_alu_cv(nir_alu_src * src)751 static nir_const_value *get_alu_cv(nir_alu_src *src)
752  {
753    nir_const_value *cv = nir_src_as_const_value(src->src);
754 
755    if (!cv &&
756        (src->src.ssa->parent_instr->type == nir_instr_type_alu)) {
757       nir_alu_instr *parent = nir_instr_as_alu(src->src.ssa->parent_instr);
758 
759       if ((parent->op == nir_op_fabs) ||
760           (parent->op == nir_op_fneg)) {
761          cv = nir_src_as_const_value(parent->src[0].src);
762 
763          if (cv) {
764             /* Validate that we are only using ETNA_UNIFORM_CONSTANT const_values. */
765             for (unsigned i = 0; i < parent->def.num_components; i++) {
766                if (cv[i].u64 >> 32 != ETNA_UNIFORM_CONSTANT) {
767                   cv = NULL;
768                   break;
769                }
770             }
771          }
772       }
773    }
774 
775    return cv;
776  }
777 
778 /*
779  * for vecN instructions:
780  * -merge constant sources into a single src
781  * -insert movs (nir_lower_vec_to_movs equivalent)
782  * for non-vecN instructions:
783  * -try to merge constants as single constant
784  * -insert movs for multiple constants if required
785  */
786 static void
lower_alu(struct etna_compile * c,nir_alu_instr * alu)787 lower_alu(struct etna_compile *c, nir_alu_instr *alu)
788 {
789    const nir_op_info *info = &nir_op_infos[alu->op];
790 
791    nir_builder b = nir_builder_at(nir_before_instr(&alu->instr));
792 
793    switch (alu->op) {
794    case nir_op_vec2:
795    case nir_op_vec3:
796    case nir_op_vec4:
797       break;
798    default:
799       if (c->specs->has_no_oneconst_limit)
800          return;
801 
802       nir_const_value value[4] = {};
803       uint8_t swizzle[4][4] = {};
804       unsigned swiz_max = 0, num_different_const_srcs = 0;
805       int first_const = -1;
806 
807       for (unsigned i = 0; i < info->num_inputs; i++) {
808          nir_const_value *cv = get_alu_cv(&alu->src[i]);
809          if (!cv)
810             continue;
811 
812          unsigned num_components = info->input_sizes[i] ?: alu->def.num_components;
813          for (unsigned j = 0; j < num_components; j++) {
814             int idx = const_add(&value[0].u64, cv[alu->src[i].swizzle[j]].u64);
815             swizzle[i][j] = idx;
816             swiz_max = MAX2(swiz_max, (unsigned) idx);
817          }
818 
819          if (first_const == -1)
820             first_const = i;
821 
822          if (!nir_srcs_equal(alu->src[first_const].src, alu->src[i].src))
823             num_different_const_srcs++;
824       }
825 
826       /* nothing to do */
827       if (num_different_const_srcs == 0)
828          return;
829 
830       /* resolve with single combined const src */
831       if (swiz_max < 4) {
832          nir_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
833 
834          for (unsigned i = 0; i < info->num_inputs; i++) {
835             nir_const_value *cv = get_alu_cv(&alu->src[i]);
836             if (!cv)
837                continue;
838 
839             nir_src_rewrite(&alu->src[i].src, def);
840 
841             for (unsigned j = 0; j < 4; j++)
842                alu->src[i].swizzle[j] = swizzle[i][j];
843          }
844          return;
845       }
846 
847       /* resolve with movs */
848       unsigned num_const = 0;
849       for (unsigned i = 0; i < info->num_inputs; i++) {
850          nir_const_value *cv = get_alu_cv(&alu->src[i]);
851          if (!cv)
852             continue;
853 
854          num_const++;
855          if (num_const == 1)
856             continue;
857 
858          nir_def *mov = nir_mov(&b, alu->src[i].src.ssa);
859          nir_src_rewrite(&alu->src[i].src, mov);
860       }
861       return;
862    }
863 
864    nir_const_value value[4];
865    unsigned num_components = 0;
866 
867    for (unsigned i = 0; i < info->num_inputs; i++) {
868       nir_const_value *cv = get_alu_cv(&alu->src[i]);
869       if (cv)
870          value[num_components++] = cv[alu->src[i].swizzle[0]];
871    }
872 
873    /* if there is more than one constant source to the vecN, combine them
874     * into a single load_const (removing the vecN completely if all components
875     * are constant)
876     */
877    if (num_components > 1) {
878       nir_def *def = nir_build_imm(&b, num_components, 32, value);
879 
880       if (num_components == info->num_inputs) {
881          nir_def_rewrite_uses(&alu->def, def);
882          nir_instr_remove(&alu->instr);
883          return;
884       }
885 
886       for (unsigned i = 0, j = 0; i < info->num_inputs; i++) {
887          nir_const_value *cv = get_alu_cv(&alu->src[i]);
888          if (!cv)
889             continue;
890 
891          nir_src_rewrite(&alu->src[i].src, def);
892          alu->src[i].swizzle[0] = j++;
893       }
894    }
895 
896    unsigned finished_write_mask = 0;
897    for (unsigned i = 0; i < alu->def.num_components; i++) {
898       nir_def *ssa = alu->src[i].src.ssa;
899 
900       /* check that vecN instruction is only user of this */
901       bool need_mov = false;
902       nir_foreach_use_including_if(use_src, ssa) {
903          if (nir_src_is_if(use_src) || nir_src_parent_instr(use_src) != &alu->instr)
904             need_mov = true;
905       }
906 
907       nir_instr *instr = ssa->parent_instr;
908       switch (instr->type) {
909       case nir_instr_type_alu:
910       case nir_instr_type_tex:
911          break;
912       case nir_instr_type_intrinsic:
913          if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) {
914             need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->def);
915             break;
916          }
917          FALLTHROUGH;
918       default:
919          need_mov = true;
920       }
921 
922       if (need_mov && !(finished_write_mask & (1 << i)))
923          finished_write_mask |= insert_vec_mov(alu, i, c->nir);
924    }
925 }
926 
927 static bool
emit_shader(struct etna_compile * c,unsigned * num_temps,unsigned * num_consts)928 emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts)
929 {
930    nir_shader *shader = c->nir;
931    c->impl = nir_shader_get_entrypoint(shader);
932 
933    bool have_indirect_uniform = false;
934    unsigned indirect_max = 0;
935 
936    nir_builder b = nir_builder_create(c->impl);
937 
938    /* convert non-dynamic uniform loads to constants, etc */
939    nir_foreach_block(block, c->impl) {
940       nir_foreach_instr_safe(instr, block) {
941          switch(instr->type) {
942          case nir_instr_type_alu:
943             /* deals with vecN and const srcs */
944             lower_alu(c, nir_instr_as_alu(instr));
945             break;
946          case nir_instr_type_load_const: {
947             nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
948             for (unsigned  i = 0; i < load_const->def.num_components; i++)
949                load_const->value[i] = CONST(load_const->value[i].u32);
950          } break;
951          case nir_instr_type_intrinsic: {
952             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
953             /* TODO: load_ubo can also become a constant in some cases
954              * (at the moment it can end up emitting a LOAD with two
955              *  uniform sources, which could be a problem on HALTI2)
956              */
957             if (intr->intrinsic != nir_intrinsic_load_uniform)
958                break;
959             nir_const_value *off = nir_src_as_const_value(intr->src[0]);
960             if (!off || off[0].u64 >> 32 != ETNA_UNIFORM_CONSTANT) {
961                have_indirect_uniform = true;
962                indirect_max = nir_intrinsic_base(intr) + nir_intrinsic_range(intr);
963                break;
964             }
965 
966             unsigned base = nir_intrinsic_base(intr);
967             /* pre halti2 uniform offset will be float */
968             if (c->specs->halti < 2)
969                base += (unsigned) off[0].f32;
970             else
971                base += off[0].u32;
972             nir_const_value value[4];
973 
974             for (unsigned i = 0; i < intr->def.num_components; i++)
975                value[i] = UNIFORM(base * 4 + i);
976 
977             b.cursor = nir_after_instr(instr);
978             nir_def *def = nir_build_imm(&b, intr->def.num_components, 32, value);
979 
980             nir_def_rewrite_uses(&intr->def, def);
981             nir_instr_remove(instr);
982          } break;
983          default:
984             break;
985          }
986       }
987    }
988 
989    /* TODO: only emit required indirect uniform ranges */
990    if (have_indirect_uniform) {
991       for (unsigned i = 0; i < indirect_max * 4; i++)
992          c->consts[i] = UNIFORM(i).u64;
993       c->const_count = indirect_max;
994    }
995 
996    /* add mov for any store output using sysval/const and for depth stores from intrinsics */
997    nir_foreach_block(block, c->impl) {
998       nir_foreach_instr_safe(instr, block) {
999          if (instr->type != nir_instr_type_intrinsic)
1000             continue;
1001 
1002          nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1003 
1004          switch (intr->intrinsic) {
1005          case nir_intrinsic_store_deref: {
1006             nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
1007             nir_src *src = &intr->src[1];
1008             if (nir_src_is_const(*src) || is_sysval(src->ssa->parent_instr) ||
1009                 (shader->info.stage == MESA_SHADER_FRAGMENT &&
1010                  deref->var->data.location == FRAG_RESULT_DEPTH &&
1011                  src->ssa->parent_instr->type != nir_instr_type_alu)) {
1012                b.cursor = nir_before_instr(instr);
1013                nir_src_rewrite(src, nir_mov(&b, src->ssa));
1014             }
1015          } break;
1016          default:
1017             break;
1018          }
1019       }
1020    }
1021 
1022    /* call directly to avoid validation (load_const don't pass validation at this point) */
1023    nir_convert_from_ssa(shader, true);
1024    nir_trivialize_registers(shader);
1025 
1026    etna_ra_assign(c, shader);
1027 
1028    emit_cf_list(c, &nir_shader_get_entrypoint(shader)->body);
1029 
1030    *num_temps = etna_ra_finish(c);
1031    *num_consts = c->const_count;
1032    return true;
1033 }
1034 
1035 static bool
etna_compile_check_limits(struct etna_shader_variant * v)1036 etna_compile_check_limits(struct etna_shader_variant *v)
1037 {
1038    const struct etna_specs *specs = v->shader->specs;
1039    int max_uniforms = (v->stage == MESA_SHADER_VERTEX)
1040                          ? specs->max_vs_uniforms
1041                          : specs->max_ps_uniforms;
1042 
1043    if (!specs->has_icache && v->needs_icache) {
1044       DBG("Number of instructions (%d) exceeds maximum %d", v->code_size / 4,
1045           specs->max_instructions);
1046       return false;
1047    }
1048 
1049    if (v->num_temps > specs->max_registers) {
1050       DBG("Number of registers (%d) exceeds maximum %d", v->num_temps,
1051           specs->max_registers);
1052       return false;
1053    }
1054 
1055    if (v->uniforms.count / 4 > max_uniforms) {
1056       DBG("Number of uniforms (%d) exceeds maximum %d",
1057           v->uniforms.count / 4, max_uniforms);
1058       return false;
1059    }
1060 
1061    return true;
1062 }
1063 
1064 static void
fill_vs_mystery(struct etna_shader_variant * v)1065 fill_vs_mystery(struct etna_shader_variant *v)
1066 {
1067    const struct etna_specs *specs = v->shader->specs;
1068 
1069    v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
1070 
1071    /* fill in "mystery meat" load balancing value. This value determines how
1072     * work is scheduled between VS and PS
1073     * in the unified shader architecture. More precisely, it is determined from
1074     * the number of VS outputs, as well as chip-specific
1075     * vertex output buffer size, vertex cache size, and the number of shader
1076     * cores.
1077     *
1078     * XXX this is a conservative estimate, the "optimal" value is only known for
1079     * sure at link time because some
1080     * outputs may be unused and thus unmapped. Then again, in the general use
1081     * case with GLSL the vertex and fragment
1082     * shaders are linked already before submitting to Gallium, thus all outputs
1083     * are used.
1084     *
1085     * note: TGSI compiler counts all outputs (including position and pointsize), here
1086     * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
1087     * TODO: might have a problem that we don't count pointsize when it is used
1088     */
1089 
1090    int half_out = v->outfile.num_reg / 2 + 1;
1091    assert(half_out);
1092 
1093    uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
1094                            2 * half_out * specs->vertex_cache_size)) +
1095                  9) /
1096                 10;
1097    uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
1098    v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
1099                              VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
1100                              VIVS_VS_LOAD_BALANCING_C(0x3f) |
1101                              VIVS_VS_LOAD_BALANCING_D(0x0f);
1102 }
1103 
1104 bool
etna_compile_shader(struct etna_shader_variant * v)1105 etna_compile_shader(struct etna_shader_variant *v)
1106 {
1107    if (unlikely(!v))
1108       return false;
1109 
1110    struct etna_compile *c = CALLOC_STRUCT(etna_compile);
1111    if (!c)
1112       return false;
1113 
1114    c->variant = v;
1115    c->specs = v->shader->specs;
1116    c->nir = nir_shader_clone(NULL, v->shader->nir);
1117 
1118    nir_shader *s = c->nir;
1119    const struct etna_specs *specs = c->specs;
1120 
1121    v->stage = s->info.stage;
1122    v->uses_discard = s->info.fs.uses_discard;
1123    v->num_loops = 0; /* TODO */
1124    v->vs_id_in_reg = -1;
1125    v->vs_pos_out_reg = -1;
1126    v->vs_pointsize_out_reg = -1;
1127    v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
1128    v->ps_depth_out_reg = -1;
1129 
1130    /*
1131     * Lower glTexCoord, fixes e.g. neverball point sprite (exit cylinder stars)
1132     * and gl4es pointsprite.trace apitrace
1133     */
1134    if (s->info.stage == MESA_SHADER_FRAGMENT && v->key.sprite_coord_enable) {
1135       NIR_PASS_V(s, nir_lower_texcoord_replace, v->key.sprite_coord_enable,
1136                  false, v->key.sprite_coord_yinvert);
1137    }
1138 
1139    /*
1140     * Remove any dead in variables before we iterate over them
1141     */
1142    NIR_PASS_V(s, nir_remove_dead_variables, nir_var_shader_in, NULL);
1143 
1144    /* setup input linking */
1145    struct etna_shader_io_file *sf = &v->infile;
1146    if (s->info.stage == MESA_SHADER_VERTEX) {
1147       nir_foreach_shader_in_variable(var, s) {
1148          unsigned idx = var->data.driver_location;
1149          sf->reg[idx].reg = idx;
1150          sf->reg[idx].slot = var->data.location;
1151          sf->reg[idx].num_components = glsl_get_components(var->type);
1152          sf->num_reg = MAX2(sf->num_reg, idx+1);
1153       }
1154    } else {
1155       unsigned count = 0;
1156       nir_foreach_shader_in_variable(var, s) {
1157          unsigned idx = var->data.driver_location;
1158          sf->reg[idx].reg = idx + 1;
1159          sf->reg[idx].slot = var->data.location;
1160          sf->reg[idx].num_components = glsl_get_components(var->type);
1161          sf->num_reg = MAX2(sf->num_reg, idx+1);
1162          count++;
1163       }
1164       assert(sf->num_reg == count);
1165    }
1166 
1167    NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_uniform, etna_glsl_type_size,
1168             (nir_lower_io_options)0);
1169 
1170    NIR_PASS_V(s, nir_lower_vars_to_ssa);
1171    NIR_PASS_V(s, nir_lower_indirect_derefs, nir_var_all, UINT32_MAX);
1172    NIR_PASS_V(s, etna_nir_lower_texture, &v->key);
1173 
1174    NIR_PASS_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
1175    if (c->specs->halti >= 2) {
1176       nir_lower_idiv_options idiv_options = {
1177          .allow_fp16 = true,
1178       };
1179       NIR_PASS_V(s, nir_lower_idiv, &idiv_options);
1180    }
1181    NIR_PASS_V(s, nir_lower_alu);
1182 
1183    etna_optimize_loop(s);
1184 
1185    /* TODO: remove this extra run if nir_opt_peephole_select is able to handle ubo's. */
1186    if (OPT(s, etna_nir_lower_ubo_to_uniform))
1187       etna_optimize_loop(s);
1188 
1189    NIR_PASS_V(s, etna_lower_io, v);
1190    NIR_PASS_V(s, nir_lower_pack);
1191    etna_optimize_loop(s);
1192 
1193    if (v->shader->specs->vs_need_z_div)
1194       NIR_PASS_V(s, nir_lower_clip_halfz);
1195 
1196    /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
1197    if (c->specs->halti < 2) {
1198       /* use opt_algebraic between int_to_float and boot_to_float because
1199        * int_to_float emits ftrunc, and ftrunc lowering generates bool ops
1200        */
1201       NIR_PASS_V(s, nir_lower_int_to_float);
1202       NIR_PASS_V(s, nir_opt_algebraic);
1203       NIR_PASS_V(s, nir_lower_bool_to_float, true);
1204    } else {
1205       NIR_PASS_V(s, nir_lower_bool_to_int32);
1206    }
1207 
1208    while( OPT(s, nir_opt_vectorize, NULL, NULL) );
1209    NIR_PASS_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
1210 
1211    NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
1212    NIR_PASS_V(s, nir_opt_algebraic_late);
1213 
1214    NIR_PASS_V(s, nir_move_vec_src_uses_to_dest, false);
1215    NIR_PASS_V(s, nir_copy_prop);
1216    /* need copy prop after uses_to_dest, and before src mods: see
1217     * dEQP-GLES2.functional.shaders.random.all_features.fragment.95
1218     */
1219 
1220    NIR_PASS_V(s, nir_opt_dce);
1221    NIR_PASS_V(s, nir_opt_cse);
1222 
1223    NIR_PASS_V(s, nir_lower_bool_to_bitsize);
1224    NIR_PASS_V(s, etna_lower_alu, c->specs->has_new_transcendentals);
1225 
1226    /* needs to be the last pass that touches pass_flags! */
1227    NIR_PASS_V(s, etna_nir_lower_to_source_mods);
1228 
1229    if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
1230       nir_print_shader(s, stdout);
1231 
1232    unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
1233    c->block_ptr = block_ptr;
1234 
1235    unsigned num_consts;
1236    ASSERTED bool ok = emit_shader(c, &v->num_temps, &num_consts);
1237    assert(ok);
1238 
1239    /* empty shader, emit NOP */
1240    if (!c->inst_ptr)
1241       emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_NOP });
1242 
1243    /* assemble instructions, fixing up labels */
1244    uint32_t *code = MALLOC(c->inst_ptr * 16);
1245    for (unsigned i = 0; i < c->inst_ptr; i++) {
1246       struct etna_inst *inst = &c->code[i];
1247       if (inst->opcode == INST_OPCODE_BRANCH)
1248          inst->imm = block_ptr[inst->imm];
1249 
1250       inst->no_oneconst_limit = specs->has_no_oneconst_limit;
1251       etna_assemble(&code[i * 4], inst);
1252    }
1253 
1254    v->code_size = c->inst_ptr * 4;
1255    v->code = code;
1256    v->needs_icache = c->inst_ptr > specs->max_instructions;
1257 
1258    copy_uniform_state_to_shader(v, c->consts, num_consts);
1259 
1260    if (s->info.stage == MESA_SHADER_FRAGMENT) {
1261       v->input_count_unk8 = 31; /* XXX what is this */
1262       assert(v->ps_depth_out_reg <= 0);
1263    } else {
1264       fill_vs_mystery(v);
1265    }
1266 
1267    bool result = etna_compile_check_limits(v);
1268    ralloc_free(c->nir);
1269    FREE(c);
1270    return result;
1271 }
1272 
1273 static const struct etna_shader_inout *
etna_shader_vs_lookup(const struct etna_shader_variant * sobj,const struct etna_shader_inout * in)1274 etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
1275                       const struct etna_shader_inout *in)
1276 {
1277    for (int i = 0; i < sobj->outfile.num_reg; i++)
1278       if (sobj->outfile.reg[i].slot == in->slot)
1279          return &sobj->outfile.reg[i];
1280 
1281    /*
1282     * There are valid NIR shaders pairs where the vertex shader has
1283     * a VARYING_SLOT_BFC0 shader_out and the corresponding framgent
1284     * shader has a VARYING_SLOT_COL0 shader_in.
1285     * So at link time if there is no matching VARYING_SLOT_BFC[n],
1286     * we must map VARYING_SLOT_BFC0[n] to VARYING_SLOT_COL[n].
1287     */
1288    gl_varying_slot slot;
1289 
1290    if (in->slot == VARYING_SLOT_COL0)
1291       slot = VARYING_SLOT_BFC0;
1292    else if (in->slot == VARYING_SLOT_COL1)
1293       slot = VARYING_SLOT_BFC1;
1294    else
1295       return NULL;
1296 
1297    for (int i = 0; i < sobj->outfile.num_reg; i++)
1298       if (sobj->outfile.reg[i].slot == slot)
1299          return &sobj->outfile.reg[i];
1300 
1301    return NULL;
1302 }
1303 
1304 void
etna_link_shader(struct etna_shader_link_info * info,const struct etna_shader_variant * vs,const struct etna_shader_variant * fs)1305 etna_link_shader(struct etna_shader_link_info *info,
1306                  const struct etna_shader_variant *vs,
1307                  const struct etna_shader_variant *fs)
1308 {
1309    int comp_ofs = 0;
1310    /* For each fragment input we need to find the associated vertex shader
1311     * output, which can be found by matching on semantic name and index. A
1312     * binary search could be used because the vs outputs are sorted by their
1313     * semantic index and grouped by semantic type by fill_in_vs_outputs.
1314     */
1315    assert(fs->infile.num_reg < ETNA_NUM_INPUTS);
1316    info->pcoord_varying_comp_ofs = -1;
1317 
1318    for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
1319       const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
1320       const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
1321       struct etna_varying *varying;
1322       bool interpolate_always = true;
1323 
1324       assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
1325 
1326       if (fsio->reg > info->num_varyings)
1327          info->num_varyings = fsio->reg;
1328 
1329       varying = &info->varyings[fsio->reg - 1];
1330       varying->num_components = fsio->num_components;
1331 
1332       if (!interpolate_always) /* colors affected by flat shading */
1333          varying->pa_attributes = 0x200;
1334       else /* texture coord or other bypasses flat shading */
1335          varying->pa_attributes = 0x2f1;
1336 
1337       varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
1338       varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
1339       varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
1340       varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
1341 
1342       /* point/tex coord is an input to the PS without matching VS output,
1343        * so it gets a varying slot without being assigned a VS register.
1344        */
1345       if (fsio->slot == VARYING_SLOT_PNTC) {
1346          varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
1347          varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
1348 
1349          info->pcoord_varying_comp_ofs = comp_ofs;
1350       } else if (util_varying_is_point_coord(fsio->slot, fs->key.sprite_coord_enable)) {
1351          /*
1352 	  * Do nothing, TexCoord is lowered to PointCoord above
1353 	  * and the TexCoord here is just a remnant. This needs
1354 	  * to be removed with some nir_remove_dead_variables(),
1355 	  * but that one removes all FS inputs ... why?
1356 	  */
1357       } else {
1358          /* pick a random register to use if there is no VS output */
1359          if (vsio == NULL)
1360             varying->reg = 0;
1361          else
1362             varying->reg = vsio->reg;
1363       }
1364 
1365       comp_ofs += varying->num_components;
1366    }
1367 
1368    assert(info->num_varyings == fs->infile.num_reg);
1369 }
1370