• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
34 
35 static void
36 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
37 
38 static void
resize_qreg_array(struct v3d_compile * c,struct qreg ** regs,uint32_t * size,uint32_t decl_size)39 resize_qreg_array(struct v3d_compile *c,
40                   struct qreg **regs,
41                   uint32_t *size,
42                   uint32_t decl_size)
43 {
44         if (*size >= decl_size)
45                 return;
46 
47         uint32_t old_size = *size;
48         *size = MAX2(*size * 2, decl_size);
49         *regs = reralloc(c, *regs, struct qreg, *size);
50         if (!*regs) {
51                 fprintf(stderr, "Malloc failure\n");
52                 abort();
53         }
54 
55         for (uint32_t i = old_size; i < *size; i++)
56                 (*regs)[i] = c->undef;
57 }
58 
59 void
vir_emit_thrsw(struct v3d_compile * c)60 vir_emit_thrsw(struct v3d_compile *c)
61 {
62         if (c->threads == 1)
63                 return;
64 
65         /* Always thread switch after each texture operation for now.
66          *
67          * We could do better by batching a bunch of texture fetches up and
68          * then doing one thread switch and collecting all their results
69          * afterward.
70          */
71         c->last_thrsw = vir_NOP(c);
72         c->last_thrsw->qpu.sig.thrsw = true;
73         c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
74 }
75 
76 static struct qreg
vir_SFU(struct v3d_compile * c,int waddr,struct qreg src)77 vir_SFU(struct v3d_compile *c, int waddr, struct qreg src)
78 {
79         vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, waddr), src);
80         return vir_FMOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4));
81 }
82 
83 static struct qreg
indirect_uniform_load(struct v3d_compile * c,nir_intrinsic_instr * intr)84 indirect_uniform_load(struct v3d_compile *c, nir_intrinsic_instr *intr)
85 {
86         struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
87         uint32_t offset = nir_intrinsic_base(intr);
88         struct v3d_ubo_range *range = NULL;
89         unsigned i;
90 
91         for (i = 0; i < c->num_ubo_ranges; i++) {
92                 range = &c->ubo_ranges[i];
93                 if (offset >= range->src_offset &&
94                     offset < range->src_offset + range->size) {
95                         break;
96                 }
97         }
98         /* The driver-location-based offset always has to be within a declared
99          * uniform range.
100          */
101         assert(i != c->num_ubo_ranges);
102         if (!c->ubo_range_used[i]) {
103                 c->ubo_range_used[i] = true;
104                 range->dst_offset = c->next_ubo_dst_offset;
105                 c->next_ubo_dst_offset += range->size;
106         }
107 
108         offset -= range->src_offset;
109 
110         if (range->dst_offset + offset != 0) {
111                 indirect_offset = vir_ADD(c, indirect_offset,
112                                           vir_uniform_ui(c, range->dst_offset +
113                                                          offset));
114         }
115 
116         /* Adjust for where we stored the TGSI register base. */
117         vir_ADD_dest(c,
118                      vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUA),
119                      vir_uniform(c, QUNIFORM_UBO_ADDR, 0),
120                      indirect_offset);
121 
122         vir_emit_thrsw(c);
123         return vir_LDTMU(c);
124 }
125 
126 static struct qreg *
ntq_init_ssa_def(struct v3d_compile * c,nir_ssa_def * def)127 ntq_init_ssa_def(struct v3d_compile *c, nir_ssa_def *def)
128 {
129         struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
130                                           def->num_components);
131         _mesa_hash_table_insert(c->def_ht, def, qregs);
132         return qregs;
133 }
134 
135 /**
136  * This function is responsible for getting VIR results into the associated
137  * storage for a NIR instruction.
138  *
139  * If it's a NIR SSA def, then we just set the associated hash table entry to
140  * the new result.
141  *
142  * If it's a NIR reg, then we need to update the existing qreg assigned to the
143  * NIR destination with the incoming value.  To do that without introducing
144  * new MOVs, we require that the incoming qreg either be a uniform, or be
145  * SSA-defined by the previous VIR instruction in the block and rewritable by
146  * this function.  That lets us sneak ahead and insert the SF flag beforehand
147  * (knowing that the previous instruction doesn't depend on flags) and rewrite
148  * its destination to be the NIR reg's destination
149  */
150 void
ntq_store_dest(struct v3d_compile * c,nir_dest * dest,int chan,struct qreg result)151 ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
152                struct qreg result)
153 {
154         struct qinst *last_inst = NULL;
155         if (!list_empty(&c->cur_block->instructions))
156                 last_inst = (struct qinst *)c->cur_block->instructions.prev;
157 
158         assert(result.file == QFILE_UNIF ||
159                (result.file == QFILE_TEMP &&
160                 last_inst && last_inst == c->defs[result.index]));
161 
162         if (dest->is_ssa) {
163                 assert(chan < dest->ssa.num_components);
164 
165                 struct qreg *qregs;
166                 struct hash_entry *entry =
167                         _mesa_hash_table_search(c->def_ht, &dest->ssa);
168 
169                 if (entry)
170                         qregs = entry->data;
171                 else
172                         qregs = ntq_init_ssa_def(c, &dest->ssa);
173 
174                 qregs[chan] = result;
175         } else {
176                 nir_register *reg = dest->reg.reg;
177                 assert(dest->reg.base_offset == 0);
178                 assert(reg->num_array_elems == 0);
179                 struct hash_entry *entry =
180                         _mesa_hash_table_search(c->def_ht, reg);
181                 struct qreg *qregs = entry->data;
182 
183                 /* Insert a MOV if the source wasn't an SSA def in the
184                  * previous instruction.
185                  */
186                 if (result.file == QFILE_UNIF) {
187                         result = vir_MOV(c, result);
188                         last_inst = c->defs[result.index];
189                 }
190 
191                 /* We know they're both temps, so just rewrite index. */
192                 c->defs[last_inst->dst.index] = NULL;
193                 last_inst->dst.index = qregs[chan].index;
194 
195                 /* If we're in control flow, then make this update of the reg
196                  * conditional on the execution mask.
197                  */
198                 if (c->execute.file != QFILE_NULL) {
199                         last_inst->dst.index = qregs[chan].index;
200 
201                         /* Set the flags to the current exec mask.  To insert
202                          * the flags push, we temporarily remove our SSA
203                          * instruction.
204                          */
205                         list_del(&last_inst->link);
206                         vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
207                         list_addtail(&last_inst->link,
208                                      &c->cur_block->instructions);
209 
210                         vir_set_cond(last_inst, V3D_QPU_COND_IFA);
211                         last_inst->cond_is_exec_mask = true;
212                 }
213         }
214 }
215 
216 struct qreg
ntq_get_src(struct v3d_compile * c,nir_src src,int i)217 ntq_get_src(struct v3d_compile *c, nir_src src, int i)
218 {
219         struct hash_entry *entry;
220         if (src.is_ssa) {
221                 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
222                 assert(i < src.ssa->num_components);
223         } else {
224                 nir_register *reg = src.reg.reg;
225                 entry = _mesa_hash_table_search(c->def_ht, reg);
226                 assert(reg->num_array_elems == 0);
227                 assert(src.reg.base_offset == 0);
228                 assert(i < reg->num_components);
229         }
230 
231         struct qreg *qregs = entry->data;
232         return qregs[i];
233 }
234 
235 static struct qreg
ntq_get_alu_src(struct v3d_compile * c,nir_alu_instr * instr,unsigned src)236 ntq_get_alu_src(struct v3d_compile *c, nir_alu_instr *instr,
237                 unsigned src)
238 {
239         assert(util_is_power_of_two(instr->dest.write_mask));
240         unsigned chan = ffs(instr->dest.write_mask) - 1;
241         struct qreg r = ntq_get_src(c, instr->src[src].src,
242                                     instr->src[src].swizzle[chan]);
243 
244         assert(!instr->src[src].abs);
245         assert(!instr->src[src].negate);
246 
247         return r;
248 };
249 
250 static inline struct qreg
vir_SAT(struct v3d_compile * c,struct qreg val)251 vir_SAT(struct v3d_compile *c, struct qreg val)
252 {
253         return vir_FMAX(c,
254                         vir_FMIN(c, val, vir_uniform_f(c, 1.0)),
255                         vir_uniform_f(c, 0.0));
256 }
257 
258 static struct qreg
ntq_umul(struct v3d_compile * c,struct qreg src0,struct qreg src1)259 ntq_umul(struct v3d_compile *c, struct qreg src0, struct qreg src1)
260 {
261         vir_MULTOP(c, src0, src1);
262         return vir_UMUL24(c, src0, src1);
263 }
264 
265 static struct qreg
ntq_minify(struct v3d_compile * c,struct qreg size,struct qreg level)266 ntq_minify(struct v3d_compile *c, struct qreg size, struct qreg level)
267 {
268         return vir_MAX(c, vir_SHR(c, size, level), vir_uniform_ui(c, 1));
269 }
270 
271 static void
ntq_emit_txs(struct v3d_compile * c,nir_tex_instr * instr)272 ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr)
273 {
274         unsigned unit = instr->texture_index;
275         int lod_index = nir_tex_instr_src_index(instr, nir_tex_src_lod);
276         int dest_size = nir_tex_instr_dest_size(instr);
277 
278         struct qreg lod = c->undef;
279         if (lod_index != -1)
280                 lod = ntq_get_src(c, instr->src[lod_index].src, 0);
281 
282         for (int i = 0; i < dest_size; i++) {
283                 assert(i < 3);
284                 enum quniform_contents contents;
285 
286                 if (instr->is_array && i == dest_size - 1)
287                         contents = QUNIFORM_TEXTURE_ARRAY_SIZE;
288                 else
289                         contents = QUNIFORM_TEXTURE_WIDTH + i;
290 
291                 struct qreg size = vir_uniform(c, contents, unit);
292 
293                 switch (instr->sampler_dim) {
294                 case GLSL_SAMPLER_DIM_1D:
295                 case GLSL_SAMPLER_DIM_2D:
296                 case GLSL_SAMPLER_DIM_3D:
297                 case GLSL_SAMPLER_DIM_CUBE:
298                         /* Don't minify the array size. */
299                         if (!(instr->is_array && i == dest_size - 1)) {
300                                 size = ntq_minify(c, size, lod);
301                         }
302                         break;
303 
304                 case GLSL_SAMPLER_DIM_RECT:
305                         /* There's no LOD field for rects */
306                         break;
307 
308                 default:
309                         unreachable("Bad sampler type");
310                 }
311 
312                 ntq_store_dest(c, &instr->dest, i, size);
313         }
314 }
315 
316 static void
ntq_emit_tex(struct v3d_compile * c,nir_tex_instr * instr)317 ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
318 {
319         unsigned unit = instr->texture_index;
320 
321         /* Since each texture sampling op requires uploading uniforms to
322          * reference the texture, there's no HW support for texture size and
323          * you just upload uniforms containing the size.
324          */
325         switch (instr->op) {
326         case nir_texop_query_levels:
327                 ntq_store_dest(c, &instr->dest, 0,
328                                vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit));
329                 return;
330         case nir_texop_txs:
331                 ntq_emit_txs(c, instr);
332                 return;
333         default:
334                 break;
335         }
336 
337         if (c->devinfo->ver >= 40)
338                 v3d40_vir_emit_tex(c, instr);
339         else
340                 v3d33_vir_emit_tex(c, instr);
341 }
342 
343 static struct qreg
ntq_fsincos(struct v3d_compile * c,struct qreg src,bool is_cos)344 ntq_fsincos(struct v3d_compile *c, struct qreg src, bool is_cos)
345 {
346         struct qreg input = vir_FMUL(c, src, vir_uniform_f(c, 1.0f / M_PI));
347         if (is_cos)
348                 input = vir_FADD(c, input, vir_uniform_f(c, 0.5));
349 
350         struct qreg periods = vir_FROUND(c, input);
351         struct qreg sin_output = vir_SFU(c, V3D_QPU_WADDR_SIN,
352                                          vir_FSUB(c, input, periods));
353         return vir_XOR(c, sin_output, vir_SHL(c,
354                                               vir_FTOIN(c, periods),
355                                               vir_uniform_ui(c, -1)));
356 }
357 
358 static struct qreg
ntq_fsign(struct v3d_compile * c,struct qreg src)359 ntq_fsign(struct v3d_compile *c, struct qreg src)
360 {
361         struct qreg t = vir_get_temp(c);
362 
363         vir_MOV_dest(c, t, vir_uniform_f(c, 0.0));
364         vir_PF(c, vir_FMOV(c, src), V3D_QPU_PF_PUSHZ);
365         vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_f(c, 1.0));
366         vir_PF(c, vir_FMOV(c, src), V3D_QPU_PF_PUSHN);
367         vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_f(c, -1.0));
368         return vir_MOV(c, t);
369 }
370 
371 static struct qreg
ntq_isign(struct v3d_compile * c,struct qreg src)372 ntq_isign(struct v3d_compile *c, struct qreg src)
373 {
374         struct qreg t = vir_get_temp(c);
375 
376         vir_MOV_dest(c, t, vir_uniform_ui(c, 0));
377         vir_PF(c, vir_MOV(c, src), V3D_QPU_PF_PUSHZ);
378         vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_ui(c, 1));
379         vir_PF(c, vir_MOV(c, src), V3D_QPU_PF_PUSHN);
380         vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_ui(c, -1));
381         return vir_MOV(c, t);
382 }
383 
384 static void
emit_fragcoord_input(struct v3d_compile * c,int attr)385 emit_fragcoord_input(struct v3d_compile *c, int attr)
386 {
387         c->inputs[attr * 4 + 0] = vir_FXCD(c);
388         c->inputs[attr * 4 + 1] = vir_FYCD(c);
389         c->inputs[attr * 4 + 2] = c->payload_z;
390         c->inputs[attr * 4 + 3] = vir_SFU(c, V3D_QPU_WADDR_RECIP,
391                                           c->payload_w);
392 }
393 
394 static struct qreg
emit_fragment_varying(struct v3d_compile * c,nir_variable * var,uint8_t swizzle)395 emit_fragment_varying(struct v3d_compile *c, nir_variable *var,
396                       uint8_t swizzle)
397 {
398         struct qreg r3 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R3);
399         struct qreg r5 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R5);
400 
401         struct qreg vary;
402         if (c->devinfo->ver >= 41) {
403                 struct qinst *ldvary = vir_add_inst(V3D_QPU_A_NOP, c->undef,
404                                                     c->undef, c->undef);
405                 ldvary->qpu.sig.ldvary = true;
406                 vary = vir_emit_def(c, ldvary);
407         } else {
408                 vir_NOP(c)->qpu.sig.ldvary = true;
409                 vary = r3;
410         }
411 
412         /* For gl_PointCoord input or distance along a line, we'll be called
413          * with no nir_variable, and we don't count toward VPM size so we
414          * don't track an input slot.
415          */
416         if (!var) {
417                 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
418         }
419 
420         int i = c->num_inputs++;
421         c->input_slots[i] = v3d_slot_from_slot_and_component(var->data.location,
422                                                              swizzle);
423 
424         switch (var->data.interpolation) {
425         case INTERP_MODE_NONE:
426                 /* If a gl_FrontColor or gl_BackColor input has no interp
427                  * qualifier, then if we're using glShadeModel(GL_FLAT) it
428                  * needs to be flat shaded.
429                  */
430                 switch (var->data.location) {
431                 case VARYING_SLOT_COL0:
432                 case VARYING_SLOT_COL1:
433                 case VARYING_SLOT_BFC0:
434                 case VARYING_SLOT_BFC1:
435                         if (c->fs_key->shade_model_flat) {
436                                 BITSET_SET(c->flat_shade_flags, i);
437                                 vir_MOV_dest(c, c->undef, vary);
438                                 return vir_MOV(c, r5);
439                         } else {
440                                 return vir_FADD(c, vir_FMUL(c, vary,
441                                                             c->payload_w), r5);
442                         }
443                 default:
444                         break;
445                 }
446                 /* FALLTHROUGH */
447         case INTERP_MODE_SMOOTH:
448                 if (var->data.centroid) {
449                         return vir_FADD(c, vir_FMUL(c, vary,
450                                                     c->payload_w_centroid), r5);
451                 } else {
452                         return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
453                 }
454         case INTERP_MODE_NOPERSPECTIVE:
455                 /* C appears after the mov from the varying.
456                    XXX: improve ldvary setup.
457                 */
458                 return vir_FADD(c, vir_MOV(c, vary), r5);
459         case INTERP_MODE_FLAT:
460                 BITSET_SET(c->flat_shade_flags, i);
461                 vir_MOV_dest(c, c->undef, vary);
462                 return vir_MOV(c, r5);
463         default:
464                 unreachable("Bad interp mode");
465         }
466 }
467 
468 static void
emit_fragment_input(struct v3d_compile * c,int attr,nir_variable * var)469 emit_fragment_input(struct v3d_compile *c, int attr, nir_variable *var)
470 {
471         for (int i = 0; i < glsl_get_vector_elements(var->type); i++) {
472                 int chan = var->data.location_frac + i;
473                 c->inputs[attr * 4 + chan] =
474                         emit_fragment_varying(c, var, chan);
475         }
476 }
477 
478 static void
add_output(struct v3d_compile * c,uint32_t decl_offset,uint8_t slot,uint8_t swizzle)479 add_output(struct v3d_compile *c,
480            uint32_t decl_offset,
481            uint8_t slot,
482            uint8_t swizzle)
483 {
484         uint32_t old_array_size = c->outputs_array_size;
485         resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
486                           decl_offset + 1);
487 
488         if (old_array_size != c->outputs_array_size) {
489                 c->output_slots = reralloc(c,
490                                            c->output_slots,
491                                            struct v3d_varying_slot,
492                                            c->outputs_array_size);
493         }
494 
495         c->output_slots[decl_offset] =
496                 v3d_slot_from_slot_and_component(slot, swizzle);
497 }
498 
499 static void
declare_uniform_range(struct v3d_compile * c,uint32_t start,uint32_t size)500 declare_uniform_range(struct v3d_compile *c, uint32_t start, uint32_t size)
501 {
502         unsigned array_id = c->num_ubo_ranges++;
503         if (array_id >= c->ubo_ranges_array_size) {
504                 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
505                                                 array_id + 1);
506                 c->ubo_ranges = reralloc(c, c->ubo_ranges,
507                                          struct v3d_ubo_range,
508                                          c->ubo_ranges_array_size);
509                 c->ubo_range_used = reralloc(c, c->ubo_range_used,
510                                              bool,
511                                              c->ubo_ranges_array_size);
512         }
513 
514         c->ubo_ranges[array_id].dst_offset = 0;
515         c->ubo_ranges[array_id].src_offset = start;
516         c->ubo_ranges[array_id].size = size;
517         c->ubo_range_used[array_id] = false;
518 }
519 
520 /**
521  * If compare_instr is a valid comparison instruction, emits the
522  * compare_instr's comparison and returns the sel_instr's return value based
523  * on the compare_instr's result.
524  */
525 static bool
ntq_emit_comparison(struct v3d_compile * c,struct qreg * dest,nir_alu_instr * compare_instr,nir_alu_instr * sel_instr)526 ntq_emit_comparison(struct v3d_compile *c, struct qreg *dest,
527                     nir_alu_instr *compare_instr,
528                     nir_alu_instr *sel_instr)
529 {
530         struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
531         struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
532         bool cond_invert = false;
533 
534         switch (compare_instr->op) {
535         case nir_op_feq:
536         case nir_op_seq:
537                 vir_PF(c, vir_FCMP(c, src0, src1), V3D_QPU_PF_PUSHZ);
538                 break;
539         case nir_op_ieq:
540                 vir_PF(c, vir_XOR(c, src0, src1), V3D_QPU_PF_PUSHZ);
541                 break;
542 
543         case nir_op_fne:
544         case nir_op_sne:
545                 vir_PF(c, vir_FCMP(c, src0, src1), V3D_QPU_PF_PUSHZ);
546                 cond_invert = true;
547                 break;
548         case nir_op_ine:
549                 vir_PF(c, vir_XOR(c, src0, src1), V3D_QPU_PF_PUSHZ);
550                 cond_invert = true;
551                 break;
552 
553         case nir_op_fge:
554         case nir_op_sge:
555                 vir_PF(c, vir_FCMP(c, src1, src0), V3D_QPU_PF_PUSHC);
556                 break;
557         case nir_op_ige:
558                 vir_PF(c, vir_MIN(c, src1, src0), V3D_QPU_PF_PUSHC);
559                 cond_invert = true;
560                 break;
561         case nir_op_uge:
562                 vir_PF(c, vir_SUB(c, src0, src1), V3D_QPU_PF_PUSHC);
563                 cond_invert = true;
564                 break;
565 
566         case nir_op_slt:
567         case nir_op_flt:
568                 vir_PF(c, vir_FCMP(c, src0, src1), V3D_QPU_PF_PUSHN);
569                 break;
570         case nir_op_ilt:
571                 vir_PF(c, vir_MIN(c, src1, src0), V3D_QPU_PF_PUSHC);
572                 break;
573         case nir_op_ult:
574                 vir_PF(c, vir_SUB(c, src0, src1), V3D_QPU_PF_PUSHC);
575                 break;
576 
577         default:
578                 return false;
579         }
580 
581         enum v3d_qpu_cond cond = (cond_invert ?
582                                   V3D_QPU_COND_IFNA :
583                                   V3D_QPU_COND_IFA);
584 
585         switch (sel_instr->op) {
586         case nir_op_seq:
587         case nir_op_sne:
588         case nir_op_sge:
589         case nir_op_slt:
590                 *dest = vir_SEL(c, cond,
591                                 vir_uniform_f(c, 1.0), vir_uniform_f(c, 0.0));
592                 break;
593 
594         case nir_op_bcsel:
595                 *dest = vir_SEL(c, cond,
596                                 ntq_get_alu_src(c, sel_instr, 1),
597                                 ntq_get_alu_src(c, sel_instr, 2));
598                 break;
599 
600         default:
601                 *dest = vir_SEL(c, cond,
602                                 vir_uniform_ui(c, ~0), vir_uniform_ui(c, 0));
603                 break;
604         }
605 
606         /* Make the temporary for nir_store_dest(). */
607         *dest = vir_MOV(c, *dest);
608 
609         return true;
610 }
611 
612 /**
613  * Attempts to fold a comparison generating a boolean result into the
614  * condition code for selecting between two values, instead of comparing the
615  * boolean result against 0 to generate the condition code.
616  */
ntq_emit_bcsel(struct v3d_compile * c,nir_alu_instr * instr,struct qreg * src)617 static struct qreg ntq_emit_bcsel(struct v3d_compile *c, nir_alu_instr *instr,
618                                   struct qreg *src)
619 {
620         if (!instr->src[0].src.is_ssa)
621                 goto out;
622         if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
623                 goto out;
624         nir_alu_instr *compare =
625                 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
626         if (!compare)
627                 goto out;
628 
629         struct qreg dest;
630         if (ntq_emit_comparison(c, &dest, compare, instr))
631                 return dest;
632 
633 out:
634         vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
635         return vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA, src[1], src[2]));
636 }
637 
638 
639 static void
ntq_emit_alu(struct v3d_compile * c,nir_alu_instr * instr)640 ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
641 {
642         /* This should always be lowered to ALU operations for V3D. */
643         assert(!instr->dest.saturate);
644 
645         /* Vectors are special in that they have non-scalarized writemasks,
646          * and just take the first swizzle channel for each argument in order
647          * into each writemask channel.
648          */
649         if (instr->op == nir_op_vec2 ||
650             instr->op == nir_op_vec3 ||
651             instr->op == nir_op_vec4) {
652                 struct qreg srcs[4];
653                 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
654                         srcs[i] = ntq_get_src(c, instr->src[i].src,
655                                               instr->src[i].swizzle[0]);
656                 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
657                         ntq_store_dest(c, &instr->dest.dest, i,
658                                        vir_MOV(c, srcs[i]));
659                 return;
660         }
661 
662         /* General case: We can just grab the one used channel per src. */
663         struct qreg src[nir_op_infos[instr->op].num_inputs];
664         for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
665                 src[i] = ntq_get_alu_src(c, instr, i);
666         }
667 
668         struct qreg result;
669 
670         switch (instr->op) {
671         case nir_op_fmov:
672         case nir_op_imov:
673                 result = vir_MOV(c, src[0]);
674                 break;
675 
676         case nir_op_fneg:
677                 result = vir_XOR(c, src[0], vir_uniform_ui(c, 1 << 31));
678                 break;
679         case nir_op_ineg:
680                 result = vir_NEG(c, src[0]);
681                 break;
682 
683         case nir_op_fmul:
684                 result = vir_FMUL(c, src[0], src[1]);
685                 break;
686         case nir_op_fadd:
687                 result = vir_FADD(c, src[0], src[1]);
688                 break;
689         case nir_op_fsub:
690                 result = vir_FSUB(c, src[0], src[1]);
691                 break;
692         case nir_op_fmin:
693                 result = vir_FMIN(c, src[0], src[1]);
694                 break;
695         case nir_op_fmax:
696                 result = vir_FMAX(c, src[0], src[1]);
697                 break;
698 
699         case nir_op_f2i32:
700                 result = vir_FTOIZ(c, src[0]);
701                 break;
702         case nir_op_f2u32:
703                 result = vir_FTOUZ(c, src[0]);
704                 break;
705         case nir_op_i2f32:
706                 result = vir_ITOF(c, src[0]);
707                 break;
708         case nir_op_u2f32:
709                 result = vir_UTOF(c, src[0]);
710                 break;
711         case nir_op_b2f:
712                 result = vir_AND(c, src[0], vir_uniform_f(c, 1.0));
713                 break;
714         case nir_op_b2i:
715                 result = vir_AND(c, src[0], vir_uniform_ui(c, 1));
716                 break;
717         case nir_op_i2b:
718         case nir_op_f2b:
719                 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
720                 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
721                                             vir_uniform_ui(c, ~0),
722                                             vir_uniform_ui(c, 0)));
723                 break;
724 
725         case nir_op_iadd:
726                 result = vir_ADD(c, src[0], src[1]);
727                 break;
728         case nir_op_ushr:
729                 result = vir_SHR(c, src[0], src[1]);
730                 break;
731         case nir_op_isub:
732                 result = vir_SUB(c, src[0], src[1]);
733                 break;
734         case nir_op_ishr:
735                 result = vir_ASR(c, src[0], src[1]);
736                 break;
737         case nir_op_ishl:
738                 result = vir_SHL(c, src[0], src[1]);
739                 break;
740         case nir_op_imin:
741                 result = vir_MIN(c, src[0], src[1]);
742                 break;
743         case nir_op_umin:
744                 result = vir_UMIN(c, src[0], src[1]);
745                 break;
746         case nir_op_imax:
747                 result = vir_MAX(c, src[0], src[1]);
748                 break;
749         case nir_op_umax:
750                 result = vir_UMAX(c, src[0], src[1]);
751                 break;
752         case nir_op_iand:
753                 result = vir_AND(c, src[0], src[1]);
754                 break;
755         case nir_op_ior:
756                 result = vir_OR(c, src[0], src[1]);
757                 break;
758         case nir_op_ixor:
759                 result = vir_XOR(c, src[0], src[1]);
760                 break;
761         case nir_op_inot:
762                 result = vir_NOT(c, src[0]);
763                 break;
764 
765         case nir_op_imul:
766                 result = ntq_umul(c, src[0], src[1]);
767                 break;
768 
769         case nir_op_seq:
770         case nir_op_sne:
771         case nir_op_sge:
772         case nir_op_slt:
773         case nir_op_feq:
774         case nir_op_fne:
775         case nir_op_fge:
776         case nir_op_flt:
777         case nir_op_ieq:
778         case nir_op_ine:
779         case nir_op_ige:
780         case nir_op_uge:
781         case nir_op_ilt:
782         case nir_op_ult:
783                 if (!ntq_emit_comparison(c, &result, instr, instr)) {
784                         fprintf(stderr, "Bad comparison instruction\n");
785                 }
786                 break;
787 
788         case nir_op_bcsel:
789                 result = ntq_emit_bcsel(c, instr, src);
790                 break;
791         case nir_op_fcsel:
792                 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
793                 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
794                                             src[1], src[2]));
795                 break;
796 
797         case nir_op_frcp:
798                 result = vir_SFU(c, V3D_QPU_WADDR_RECIP, src[0]);
799                 break;
800         case nir_op_frsq:
801                 result = vir_SFU(c, V3D_QPU_WADDR_RSQRT, src[0]);
802                 break;
803         case nir_op_fexp2:
804                 result = vir_SFU(c, V3D_QPU_WADDR_EXP, src[0]);
805                 break;
806         case nir_op_flog2:
807                 result = vir_SFU(c, V3D_QPU_WADDR_LOG, src[0]);
808                 break;
809 
810         case nir_op_fceil:
811                 result = vir_FCEIL(c, src[0]);
812                 break;
813         case nir_op_ffloor:
814                 result = vir_FFLOOR(c, src[0]);
815                 break;
816         case nir_op_fround_even:
817                 result = vir_FROUND(c, src[0]);
818                 break;
819         case nir_op_ftrunc:
820                 result = vir_FTRUNC(c, src[0]);
821                 break;
822         case nir_op_ffract:
823                 result = vir_FSUB(c, src[0], vir_FFLOOR(c, src[0]));
824                 break;
825 
826         case nir_op_fsin:
827                 result = ntq_fsincos(c, src[0], false);
828                 break;
829         case nir_op_fcos:
830                 result = ntq_fsincos(c, src[0], true);
831                 break;
832 
833         case nir_op_fsign:
834                 result = ntq_fsign(c, src[0]);
835                 break;
836         case nir_op_isign:
837                 result = ntq_isign(c, src[0]);
838                 break;
839 
840         case nir_op_fabs: {
841                 result = vir_FMOV(c, src[0]);
842                 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_ABS);
843                 break;
844         }
845 
846         case nir_op_iabs:
847                 result = vir_MAX(c, src[0],
848                                 vir_SUB(c, vir_uniform_ui(c, 0), src[0]));
849                 break;
850 
851         case nir_op_fddx:
852         case nir_op_fddx_coarse:
853         case nir_op_fddx_fine:
854                 result = vir_FDX(c, src[0]);
855                 break;
856 
857         case nir_op_fddy:
858         case nir_op_fddy_coarse:
859         case nir_op_fddy_fine:
860                 result = vir_FDY(c, src[0]);
861                 break;
862 
863         default:
864                 fprintf(stderr, "unknown NIR ALU inst: ");
865                 nir_print_instr(&instr->instr, stderr);
866                 fprintf(stderr, "\n");
867                 abort();
868         }
869 
870         /* We have a scalar result, so the instruction should only have a
871          * single channel written to.
872          */
873         assert(util_is_power_of_two(instr->dest.write_mask));
874         ntq_store_dest(c, &instr->dest.dest,
875                        ffs(instr->dest.write_mask) - 1, result);
876 }
877 
878 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
879  * specifier.  They come from a register that's preloaded with 0xffffffff
880  * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
881  * 8 bits are shifted off the bottom and 0xff shifted in from the top.
882  */
883 #define TLB_TYPE_F16_COLOR         (3 << 6)
884 #define TLB_TYPE_I32_COLOR         (1 << 6)
885 #define TLB_TYPE_F32_COLOR         (0 << 6)
886 #define TLB_RENDER_TARGET_SHIFT    3 /* Reversed!  7 = RT 0, 0 = RT 7. */
887 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
888 #define TLB_SAMPLE_MODE_PER_PIXEL  (1 << 2)
889 #define TLB_F16_SWAP_HI_LO         (1 << 1)
890 #define TLB_VEC_SIZE_4_F16         (1 << 0)
891 #define TLB_VEC_SIZE_2_F16         (0 << 0)
892 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
893 
894 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
895  * flag is set.
896  */
897 #define TLB_TYPE_DEPTH             ((2 << 6) | (0 << 4))
898 #define TLB_DEPTH_TYPE_INVARIANT   (0 << 2) /* Unmodified sideband input used */
899 #define TLB_DEPTH_TYPE_PER_PIXEL   (1 << 2) /* QPU result used */
900 
901 /* Stencil is a single 32-bit write. */
902 #define TLB_TYPE_STENCIL_ALPHA     ((2 << 6) | (1 << 4))
903 
904 static void
emit_frag_end(struct v3d_compile * c)905 emit_frag_end(struct v3d_compile *c)
906 {
907         /* XXX
908         if (c->output_sample_mask_index != -1) {
909                 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
910         }
911         */
912 
913         bool has_any_tlb_color_write = false;
914         for (int rt = 0; rt < c->fs_key->nr_cbufs; rt++) {
915                 if (c->output_color_var[rt])
916                         has_any_tlb_color_write = true;
917         }
918 
919         if (c->output_position_index != -1) {
920                 struct qinst *inst = vir_MOV_dest(c,
921                                                   vir_reg(QFILE_TLBU, 0),
922                                                   c->outputs[c->output_position_index]);
923 
924                 inst->src[vir_get_implicit_uniform_src(inst)] =
925                         vir_uniform_ui(c,
926                                        TLB_TYPE_DEPTH |
927                                        TLB_DEPTH_TYPE_PER_PIXEL |
928                                        0xffffff00);
929         } else if (c->s->info.fs.uses_discard || !has_any_tlb_color_write) {
930                 /* Emit passthrough Z if it needed to be delayed until shader
931                  * end due to potential discards.
932                  *
933                  * Since (single-threaded) fragment shaders always need a TLB
934                  * write, emit passthrouh Z if we didn't have any color
935                  * buffers and flag us as potentially discarding, so that we
936                  * can use Z as the TLB write.
937                  */
938                 c->s->info.fs.uses_discard = true;
939 
940                 struct qinst *inst = vir_MOV_dest(c,
941                                                   vir_reg(QFILE_TLBU, 0),
942                                                   vir_reg(QFILE_NULL, 0));
943 
944                 inst->src[vir_get_implicit_uniform_src(inst)] =
945                         vir_uniform_ui(c,
946                                        TLB_TYPE_DEPTH |
947                                        TLB_DEPTH_TYPE_INVARIANT |
948                                        0xffffff00);
949         }
950 
951         /* XXX: Performance improvement: Merge Z write and color writes TLB
952          * uniform setup
953          */
954 
955         for (int rt = 0; rt < c->fs_key->nr_cbufs; rt++) {
956                 if (!c->output_color_var[rt])
957                         continue;
958 
959                 nir_variable *var = c->output_color_var[rt];
960                 struct qreg *color = &c->outputs[var->data.driver_location * 4];
961                 int num_components = glsl_get_vector_elements(var->type);
962                 uint32_t conf = 0xffffff00;
963                 struct qinst *inst;
964 
965                 conf |= TLB_SAMPLE_MODE_PER_PIXEL;
966                 conf |= (7 - rt) << TLB_RENDER_TARGET_SHIFT;
967 
968                 assert(num_components != 0);
969                 switch (glsl_get_base_type(var->type)) {
970                 case GLSL_TYPE_UINT:
971                 case GLSL_TYPE_INT:
972                         conf |= TLB_TYPE_I32_COLOR;
973                         conf |= ((num_components - 1) <<
974                                  TLB_VEC_SIZE_MINUS_1_SHIFT);
975 
976                         inst = vir_MOV_dest(c, vir_reg(QFILE_TLBU, 0), color[0]);
977                         inst->src[vir_get_implicit_uniform_src(inst)] =
978                                 vir_uniform_ui(c, conf);
979 
980                         for (int i = 1; i < num_components; i++) {
981                                 inst = vir_MOV_dest(c, vir_reg(QFILE_TLB, 0),
982                                                     color[i]);
983                         }
984                         break;
985 
986                 default: {
987                         struct qreg r = color[0];
988                         struct qreg g = color[1];
989                         struct qreg b = color[2];
990                         struct qreg a = color[3];
991 
992                         if (c->fs_key->f32_color_rb) {
993                                 conf |= TLB_TYPE_F32_COLOR;
994                                 conf |= ((num_components - 1) <<
995                                          TLB_VEC_SIZE_MINUS_1_SHIFT);
996                         } else {
997                                 conf |= TLB_TYPE_F16_COLOR;
998                                 conf |= TLB_F16_SWAP_HI_LO;
999                                 if (num_components >= 3)
1000                                         conf |= TLB_VEC_SIZE_4_F16;
1001                                 else
1002                                         conf |= TLB_VEC_SIZE_2_F16;
1003                         }
1004 
1005                         if (c->fs_key->swap_color_rb & (1 << rt))  {
1006                                 r = color[2];
1007                                 b = color[0];
1008                         }
1009 
1010                         if (c->fs_key->f32_color_rb & (1 << rt)) {
1011                                 inst = vir_MOV_dest(c, vir_reg(QFILE_TLBU, 0), color[0]);
1012                                 inst->src[vir_get_implicit_uniform_src(inst)] =
1013                                         vir_uniform_ui(c, conf);
1014 
1015                                 for (int i = 1; i < num_components; i++) {
1016                                         inst = vir_MOV_dest(c, vir_reg(QFILE_TLB, 0),
1017                                                             color[i]);
1018                                 }
1019                         } else {
1020                                 inst = vir_VFPACK_dest(c, vir_reg(QFILE_TLB, 0), r, g);
1021                                 if (conf != ~0) {
1022                                         inst->dst.file = QFILE_TLBU;
1023                                         inst->src[vir_get_implicit_uniform_src(inst)] =
1024                                                 vir_uniform_ui(c, conf);
1025                                 }
1026 
1027                                 if (num_components >= 3)
1028                                         inst = vir_VFPACK_dest(c, vir_reg(QFILE_TLB, 0), b, a);
1029                         }
1030                         break;
1031                 }
1032                 }
1033         }
1034 }
1035 
1036 static void
vir_VPM_WRITE(struct v3d_compile * c,struct qreg val,uint32_t * vpm_index)1037 vir_VPM_WRITE(struct v3d_compile *c, struct qreg val, uint32_t *vpm_index)
1038 {
1039         if (c->devinfo->ver >= 40) {
1040                 vir_STVPMV(c, vir_uniform_ui(c, *vpm_index), val);
1041                 *vpm_index = *vpm_index + 1;
1042         } else {
1043                 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_VPM), val);
1044         }
1045 
1046         c->num_vpm_writes++;
1047 }
1048 
1049 static void
emit_scaled_viewport_write(struct v3d_compile * c,struct qreg rcp_w,uint32_t * vpm_index)1050 emit_scaled_viewport_write(struct v3d_compile *c, struct qreg rcp_w,
1051                            uint32_t *vpm_index)
1052 {
1053         for (int i = 0; i < 2; i++) {
1054                 struct qreg coord = c->outputs[c->output_position_index + i];
1055                 coord = vir_FMUL(c, coord,
1056                                  vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i,
1057                                              0));
1058                 coord = vir_FMUL(c, coord, rcp_w);
1059                 vir_VPM_WRITE(c, vir_FTOIN(c, coord), vpm_index);
1060         }
1061 
1062 }
1063 
1064 static void
emit_zs_write(struct v3d_compile * c,struct qreg rcp_w,uint32_t * vpm_index)1065 emit_zs_write(struct v3d_compile *c, struct qreg rcp_w, uint32_t *vpm_index)
1066 {
1067         struct qreg zscale = vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1068         struct qreg zoffset = vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1069 
1070         struct qreg z = c->outputs[c->output_position_index + 2];
1071         z = vir_FMUL(c, z, zscale);
1072         z = vir_FMUL(c, z, rcp_w);
1073         z = vir_FADD(c, z, zoffset);
1074         vir_VPM_WRITE(c, z, vpm_index);
1075 }
1076 
1077 static void
emit_rcp_wc_write(struct v3d_compile * c,struct qreg rcp_w,uint32_t * vpm_index)1078 emit_rcp_wc_write(struct v3d_compile *c, struct qreg rcp_w, uint32_t *vpm_index)
1079 {
1080         vir_VPM_WRITE(c, rcp_w, vpm_index);
1081 }
1082 
1083 static void
emit_point_size_write(struct v3d_compile * c,uint32_t * vpm_index)1084 emit_point_size_write(struct v3d_compile *c, uint32_t *vpm_index)
1085 {
1086         struct qreg point_size;
1087 
1088         if (c->output_point_size_index != -1)
1089                 point_size = c->outputs[c->output_point_size_index];
1090         else
1091                 point_size = vir_uniform_f(c, 1.0);
1092 
1093         /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1094          * BCM21553).
1095          */
1096         point_size = vir_FMAX(c, point_size, vir_uniform_f(c, .125));
1097 
1098         vir_VPM_WRITE(c, point_size, vpm_index);
1099 }
1100 
1101 static void
emit_vpm_write_setup(struct v3d_compile * c)1102 emit_vpm_write_setup(struct v3d_compile *c)
1103 {
1104         if (c->devinfo->ver >= 40)
1105                 return;
1106 
1107         v3d33_vir_vpm_write_setup(c);
1108 }
1109 
1110 static void
emit_vert_end(struct v3d_compile * c)1111 emit_vert_end(struct v3d_compile *c)
1112 {
1113         uint32_t vpm_index = 0;
1114         struct qreg rcp_w = vir_SFU(c, V3D_QPU_WADDR_RECIP,
1115                                     c->outputs[c->output_position_index + 3]);
1116 
1117         emit_vpm_write_setup(c);
1118 
1119         if (c->vs_key->is_coord) {
1120                 for (int i = 0; i < 4; i++)
1121                         vir_VPM_WRITE(c, c->outputs[c->output_position_index + i],
1122                                       &vpm_index);
1123                 emit_scaled_viewport_write(c, rcp_w, &vpm_index);
1124                 if (c->vs_key->per_vertex_point_size) {
1125                         emit_point_size_write(c, &vpm_index);
1126                         /* emit_rcp_wc_write(c, rcp_w); */
1127                 }
1128                 /* XXX: Z-only rendering */
1129                 if (0)
1130                         emit_zs_write(c, rcp_w, &vpm_index);
1131         } else {
1132                 emit_scaled_viewport_write(c, rcp_w, &vpm_index);
1133                 emit_zs_write(c, rcp_w, &vpm_index);
1134                 emit_rcp_wc_write(c, rcp_w, &vpm_index);
1135                 if (c->vs_key->per_vertex_point_size)
1136                         emit_point_size_write(c, &vpm_index);
1137         }
1138 
1139         for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
1140                 struct v3d_varying_slot input = c->vs_key->fs_inputs[i];
1141                 int j;
1142 
1143                 for (j = 0; j < c->num_outputs; j++) {
1144                         struct v3d_varying_slot output = c->output_slots[j];
1145 
1146                         if (!memcmp(&input, &output, sizeof(input))) {
1147                                 vir_VPM_WRITE(c, c->outputs[j],
1148                                               &vpm_index);
1149                                 break;
1150                         }
1151                 }
1152                 /* Emit padding if we didn't find a declared VS output for
1153                  * this FS input.
1154                  */
1155                 if (j == c->num_outputs)
1156                         vir_VPM_WRITE(c, vir_uniform_f(c, 0.0),
1157                                       &vpm_index);
1158         }
1159 
1160         /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1161          */
1162         if (c->devinfo->ver >= 40 && c->devinfo->ver <= 41)
1163                 vir_VPMWT(c);
1164 }
1165 
1166 void
v3d_optimize_nir(struct nir_shader * s)1167 v3d_optimize_nir(struct nir_shader *s)
1168 {
1169         bool progress;
1170 
1171         do {
1172                 progress = false;
1173 
1174                 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1175                 NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1176                 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1177                 NIR_PASS(progress, s, nir_copy_prop);
1178                 NIR_PASS(progress, s, nir_opt_remove_phis);
1179                 NIR_PASS(progress, s, nir_opt_dce);
1180                 NIR_PASS(progress, s, nir_opt_dead_cf);
1181                 NIR_PASS(progress, s, nir_opt_cse);
1182                 NIR_PASS(progress, s, nir_opt_peephole_select, 8);
1183                 NIR_PASS(progress, s, nir_opt_algebraic);
1184                 NIR_PASS(progress, s, nir_opt_constant_folding);
1185                 NIR_PASS(progress, s, nir_opt_undef);
1186         } while (progress);
1187 }
1188 
1189 static int
driver_location_compare(const void * in_a,const void * in_b)1190 driver_location_compare(const void *in_a, const void *in_b)
1191 {
1192         const nir_variable *const *a = in_a;
1193         const nir_variable *const *b = in_b;
1194 
1195         return (*a)->data.driver_location - (*b)->data.driver_location;
1196 }
1197 
1198 static struct qreg
ntq_emit_vpm_read(struct v3d_compile * c,uint32_t * num_components_queued,uint32_t * remaining,uint32_t vpm_index)1199 ntq_emit_vpm_read(struct v3d_compile *c,
1200                   uint32_t *num_components_queued,
1201                   uint32_t *remaining,
1202                   uint32_t vpm_index)
1203 {
1204         struct qreg vpm = vir_reg(QFILE_VPM, vpm_index);
1205 
1206         if (c->devinfo->ver >= 40 ) {
1207                 return vir_LDVPMV_IN(c,
1208                                      vir_uniform_ui(c,
1209                                                     (*num_components_queued)++));
1210         }
1211 
1212         if (*num_components_queued != 0) {
1213                 (*num_components_queued)--;
1214                 c->num_inputs++;
1215                 return vir_MOV(c, vpm);
1216         }
1217 
1218         uint32_t num_components = MIN2(*remaining, 32);
1219 
1220         v3d33_vir_vpm_read_setup(c, num_components);
1221 
1222         *num_components_queued = num_components - 1;
1223         *remaining -= num_components;
1224         c->num_inputs++;
1225 
1226         return vir_MOV(c, vpm);
1227 }
1228 
1229 static void
ntq_setup_inputs(struct v3d_compile * c)1230 ntq_setup_inputs(struct v3d_compile *c)
1231 {
1232         unsigned num_entries = 0;
1233         unsigned num_components = 0;
1234         nir_foreach_variable(var, &c->s->inputs) {
1235                 num_entries++;
1236                 num_components += glsl_get_components(var->type);
1237         }
1238 
1239         nir_variable *vars[num_entries];
1240 
1241         unsigned i = 0;
1242         nir_foreach_variable(var, &c->s->inputs)
1243                 vars[i++] = var;
1244 
1245         /* Sort the variables so that we emit the input setup in
1246          * driver_location order.  This is required for VPM reads, whose data
1247          * is fetched into the VPM in driver_location (TGSI register index)
1248          * order.
1249          */
1250         qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1251 
1252         uint32_t vpm_components_queued = 0;
1253         if (c->s->info.stage == MESA_SHADER_VERTEX) {
1254                 bool uses_iid = c->s->info.system_values_read &
1255                         (1ull << SYSTEM_VALUE_INSTANCE_ID);
1256                 bool uses_vid = c->s->info.system_values_read &
1257                         (1ull << SYSTEM_VALUE_VERTEX_ID);
1258 
1259                 num_components += uses_iid;
1260                 num_components += uses_vid;
1261 
1262                 if (uses_iid) {
1263                         c->iid = ntq_emit_vpm_read(c, &vpm_components_queued,
1264                                                    &num_components, ~0);
1265                 }
1266 
1267                 if (uses_vid) {
1268                         c->vid = ntq_emit_vpm_read(c, &vpm_components_queued,
1269                                                    &num_components, ~0);
1270                 }
1271         }
1272 
1273         for (unsigned i = 0; i < num_entries; i++) {
1274                 nir_variable *var = vars[i];
1275                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1276                 unsigned loc = var->data.driver_location;
1277 
1278                 assert(array_len == 1);
1279                 (void)array_len;
1280                 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1281                                   (loc + 1) * 4);
1282 
1283                 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
1284                         if (var->data.location == VARYING_SLOT_POS) {
1285                                 emit_fragcoord_input(c, loc);
1286                         } else if (var->data.location == VARYING_SLOT_PNTC ||
1287                                    (var->data.location >= VARYING_SLOT_VAR0 &&
1288                                     (c->fs_key->point_sprite_mask &
1289                                      (1 << (var->data.location -
1290                                             VARYING_SLOT_VAR0))))) {
1291                                 c->inputs[loc * 4 + 0] = c->point_x;
1292                                 c->inputs[loc * 4 + 1] = c->point_y;
1293                         } else {
1294                                 emit_fragment_input(c, loc, var);
1295                         }
1296                 } else {
1297                         int var_components = glsl_get_components(var->type);
1298 
1299                         for (int i = 0; i < var_components; i++) {
1300                                 c->inputs[loc * 4 + i] =
1301                                         ntq_emit_vpm_read(c,
1302                                                           &vpm_components_queued,
1303                                                           &num_components,
1304                                                           loc * 4 + i);
1305 
1306                         }
1307                         c->vattr_sizes[loc] = var_components;
1308                 }
1309         }
1310 
1311         if (c->s->info.stage == MESA_SHADER_VERTEX) {
1312                 if (c->devinfo->ver >= 40) {
1313                         assert(vpm_components_queued == num_components);
1314                 } else {
1315                         assert(vpm_components_queued == 0);
1316                         assert(num_components == 0);
1317                 }
1318         }
1319 }
1320 
1321 static void
ntq_setup_outputs(struct v3d_compile * c)1322 ntq_setup_outputs(struct v3d_compile *c)
1323 {
1324         nir_foreach_variable(var, &c->s->outputs) {
1325                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1326                 unsigned loc = var->data.driver_location * 4;
1327 
1328                 assert(array_len == 1);
1329                 (void)array_len;
1330 
1331                 for (int i = 0; i < glsl_get_vector_elements(var->type); i++) {
1332                         add_output(c, loc + var->data.location_frac + i,
1333                                    var->data.location,
1334                                    var->data.location_frac + i);
1335                 }
1336 
1337                 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
1338                         switch (var->data.location) {
1339                         case FRAG_RESULT_COLOR:
1340                                 c->output_color_var[0] = var;
1341                                 c->output_color_var[1] = var;
1342                                 c->output_color_var[2] = var;
1343                                 c->output_color_var[3] = var;
1344                                 break;
1345                         case FRAG_RESULT_DATA0:
1346                         case FRAG_RESULT_DATA1:
1347                         case FRAG_RESULT_DATA2:
1348                         case FRAG_RESULT_DATA3:
1349                                 c->output_color_var[var->data.location -
1350                                                     FRAG_RESULT_DATA0] = var;
1351                                 break;
1352                         case FRAG_RESULT_DEPTH:
1353                                 c->output_position_index = loc;
1354                                 break;
1355                         case FRAG_RESULT_SAMPLE_MASK:
1356                                 c->output_sample_mask_index = loc;
1357                                 break;
1358                         }
1359                 } else {
1360                         switch (var->data.location) {
1361                         case VARYING_SLOT_POS:
1362                                 c->output_position_index = loc;
1363                                 break;
1364                         case VARYING_SLOT_PSIZ:
1365                                 c->output_point_size_index = loc;
1366                                 break;
1367                         }
1368                 }
1369         }
1370 }
1371 
1372 static void
ntq_setup_uniforms(struct v3d_compile * c)1373 ntq_setup_uniforms(struct v3d_compile *c)
1374 {
1375         nir_foreach_variable(var, &c->s->uniforms) {
1376                 uint32_t vec4_count = glsl_count_attribute_slots(var->type,
1377                                                                  false);
1378                 unsigned vec4_size = 4 * sizeof(float);
1379 
1380                 declare_uniform_range(c, var->data.driver_location * vec4_size,
1381                                       vec4_count * vec4_size);
1382 
1383         }
1384 }
1385 
1386 /**
1387  * Sets up the mapping from nir_register to struct qreg *.
1388  *
1389  * Each nir_register gets a struct qreg per 32-bit component being stored.
1390  */
1391 static void
ntq_setup_registers(struct v3d_compile * c,struct exec_list * list)1392 ntq_setup_registers(struct v3d_compile *c, struct exec_list *list)
1393 {
1394         foreach_list_typed(nir_register, nir_reg, node, list) {
1395                 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1396                 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1397                                                   array_len *
1398                                                   nir_reg->num_components);
1399 
1400                 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1401 
1402                 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1403                         qregs[i] = vir_get_temp(c);
1404         }
1405 }
1406 
1407 static void
ntq_emit_load_const(struct v3d_compile * c,nir_load_const_instr * instr)1408 ntq_emit_load_const(struct v3d_compile *c, nir_load_const_instr *instr)
1409 {
1410         struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1411         for (int i = 0; i < instr->def.num_components; i++)
1412                 qregs[i] = vir_uniform_ui(c, instr->value.u32[i]);
1413 
1414         _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1415 }
1416 
1417 static void
ntq_emit_ssa_undef(struct v3d_compile * c,nir_ssa_undef_instr * instr)1418 ntq_emit_ssa_undef(struct v3d_compile *c, nir_ssa_undef_instr *instr)
1419 {
1420         struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1421 
1422         /* VIR needs there to be *some* value, so pick 0 (same as for
1423          * ntq_setup_registers().
1424          */
1425         for (int i = 0; i < instr->def.num_components; i++)
1426                 qregs[i] = vir_uniform_ui(c, 0);
1427 }
1428 
1429 static void
ntq_emit_intrinsic(struct v3d_compile * c,nir_intrinsic_instr * instr)1430 ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
1431 {
1432         nir_const_value *const_offset;
1433         unsigned offset;
1434 
1435         switch (instr->intrinsic) {
1436         case nir_intrinsic_load_uniform:
1437                 assert(instr->num_components == 1);
1438                 const_offset = nir_src_as_const_value(instr->src[0]);
1439                 if (const_offset) {
1440                         offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1441                         assert(offset % 4 == 0);
1442                         /* We need dwords */
1443                         offset = offset / 4;
1444                         ntq_store_dest(c, &instr->dest, 0,
1445                                        vir_uniform(c, QUNIFORM_UNIFORM,
1446                                                    offset));
1447                 } else {
1448                         ntq_store_dest(c, &instr->dest, 0,
1449                                        indirect_uniform_load(c, instr));
1450                 }
1451                 break;
1452 
1453         case nir_intrinsic_load_ubo:
1454                 for (int i = 0; i < instr->num_components; i++) {
1455                         int ubo = nir_src_as_const_value(instr->src[0])->u32[0];
1456 
1457                         /* Adjust for where we stored the TGSI register base. */
1458                         vir_ADD_dest(c,
1459                                      vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUA),
1460                                      vir_uniform(c, QUNIFORM_UBO_ADDR, 1 + ubo),
1461                                      vir_ADD(c,
1462                                              ntq_get_src(c, instr->src[1], 0),
1463                                              vir_uniform_ui(c, i * 4)));
1464 
1465                         vir_emit_thrsw(c);
1466 
1467                         ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
1468                 }
1469                 break;
1470 
1471                 const_offset = nir_src_as_const_value(instr->src[0]);
1472                 if (const_offset) {
1473                         offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1474                         assert(offset % 4 == 0);
1475                         /* We need dwords */
1476                         offset = offset / 4;
1477                         ntq_store_dest(c, &instr->dest, 0,
1478                                        vir_uniform(c, QUNIFORM_UNIFORM,
1479                                                    offset));
1480                 } else {
1481                         ntq_store_dest(c, &instr->dest, 0,
1482                                        indirect_uniform_load(c, instr));
1483                 }
1484                 break;
1485 
1486         case nir_intrinsic_load_user_clip_plane:
1487                 for (int i = 0; i < instr->num_components; i++) {
1488                         ntq_store_dest(c, &instr->dest, i,
1489                                        vir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1490                                                    nir_intrinsic_ucp_id(instr) *
1491                                                    4 + i));
1492                 }
1493                 break;
1494 
1495         case nir_intrinsic_load_alpha_ref_float:
1496                 ntq_store_dest(c, &instr->dest, 0,
1497                                vir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1498                 break;
1499 
1500         case nir_intrinsic_load_sample_mask_in:
1501                 ntq_store_dest(c, &instr->dest, 0,
1502                                vir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1503                 break;
1504 
1505         case nir_intrinsic_load_front_face:
1506                 /* The register contains 0 (front) or 1 (back), and we need to
1507                  * turn it into a NIR bool where true means front.
1508                  */
1509                 ntq_store_dest(c, &instr->dest, 0,
1510                                vir_ADD(c,
1511                                        vir_uniform_ui(c, -1),
1512                                        vir_REVF(c)));
1513                 break;
1514 
1515         case nir_intrinsic_load_instance_id:
1516                 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->iid));
1517                 break;
1518 
1519         case nir_intrinsic_load_vertex_id:
1520                 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->vid));
1521                 break;
1522 
1523         case nir_intrinsic_load_input:
1524                 const_offset = nir_src_as_const_value(instr->src[0]);
1525                 assert(const_offset && "v3d doesn't support indirect inputs");
1526                 for (int i = 0; i < instr->num_components; i++) {
1527                         offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1528                         int comp = nir_intrinsic_component(instr) + i;
1529                         ntq_store_dest(c, &instr->dest, i,
1530                                        vir_MOV(c, c->inputs[offset * 4 + comp]));
1531                 }
1532                 break;
1533 
1534         case nir_intrinsic_store_output:
1535                 const_offset = nir_src_as_const_value(instr->src[1]);
1536                 assert(const_offset && "v3d doesn't support indirect outputs");
1537                 offset = ((nir_intrinsic_base(instr) +
1538                            const_offset->u32[0]) * 4 +
1539                           nir_intrinsic_component(instr));
1540 
1541                 for (int i = 0; i < instr->num_components; i++) {
1542                         c->outputs[offset + i] =
1543                                 vir_MOV(c, ntq_get_src(c, instr->src[0], i));
1544                 }
1545                 c->num_outputs = MAX2(c->num_outputs,
1546                                       offset + instr->num_components);
1547                 break;
1548 
1549         case nir_intrinsic_discard:
1550                 if (c->execute.file != QFILE_NULL) {
1551                         vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1552                         vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1553                                                      vir_uniform_ui(c, 0)),
1554                                 V3D_QPU_COND_IFA);
1555                 } else {
1556                         vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1557                                         vir_uniform_ui(c, 0));
1558                 }
1559                 break;
1560 
1561         case nir_intrinsic_discard_if: {
1562                 /* true (~0) if we're discarding */
1563                 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1564 
1565                 if (c->execute.file != QFILE_NULL) {
1566                         /* execute == 0 means the channel is active.  Invert
1567                          * the condition so that we can use zero as "executing
1568                          * and discarding."
1569                          */
1570                         vir_PF(c, vir_OR(c, c->execute, vir_NOT(c, cond)),
1571                                V3D_QPU_PF_PUSHZ);
1572                         vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1573                                                      vir_uniform_ui(c, 0)),
1574                                      V3D_QPU_COND_IFA);
1575                 } else {
1576                         vir_PF(c, cond, V3D_QPU_PF_PUSHZ);
1577                         vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1578                                                      vir_uniform_ui(c, 0)),
1579                                      V3D_QPU_COND_IFNA);
1580                 }
1581 
1582                 break;
1583         }
1584 
1585         default:
1586                 fprintf(stderr, "Unknown intrinsic: ");
1587                 nir_print_instr(&instr->instr, stderr);
1588                 fprintf(stderr, "\n");
1589                 break;
1590         }
1591 }
1592 
1593 /* Clears (activates) the execute flags for any channels whose jump target
1594  * matches this block.
1595  */
1596 static void
ntq_activate_execute_for_block(struct v3d_compile * c)1597 ntq_activate_execute_for_block(struct v3d_compile *c)
1598 {
1599         vir_PF(c, vir_XOR(c, c->execute, vir_uniform_ui(c, c->cur_block->index)),
1600                V3D_QPU_PF_PUSHZ);
1601 
1602         vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
1603 }
1604 
1605 static void
ntq_emit_if(struct v3d_compile * c,nir_if * if_stmt)1606 ntq_emit_if(struct v3d_compile *c, nir_if *if_stmt)
1607 {
1608         nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1609         bool empty_else_block =
1610                 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1611                  exec_list_is_empty(&nir_else_block->instr_list));
1612 
1613         struct qblock *then_block = vir_new_block(c);
1614         struct qblock *after_block = vir_new_block(c);
1615         struct qblock *else_block;
1616         if (empty_else_block)
1617                 else_block = after_block;
1618         else
1619                 else_block = vir_new_block(c);
1620 
1621         bool was_top_level = false;
1622         if (c->execute.file == QFILE_NULL) {
1623                 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
1624                 was_top_level = true;
1625         }
1626 
1627         /* Set A for executing (execute == 0) and jumping (if->condition ==
1628          * 0) channels, and then update execute flags for those to point to
1629          * the ELSE block.
1630          */
1631         vir_PF(c, vir_OR(c,
1632                          c->execute,
1633                          ntq_get_src(c, if_stmt->condition, 0)),
1634                 V3D_QPU_PF_PUSHZ);
1635         vir_MOV_cond(c, V3D_QPU_COND_IFA,
1636                      c->execute,
1637                      vir_uniform_ui(c, else_block->index));
1638 
1639         /* Jump to ELSE if nothing is active for THEN, otherwise fall
1640          * through.
1641          */
1642         vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1643         vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLNA);
1644         vir_link_blocks(c->cur_block, else_block);
1645         vir_link_blocks(c->cur_block, then_block);
1646 
1647         /* Process the THEN block. */
1648         vir_set_emit_block(c, then_block);
1649         ntq_emit_cf_list(c, &if_stmt->then_list);
1650 
1651         if (!empty_else_block) {
1652                 /* Handle the end of the THEN block.  First, all currently
1653                  * active channels update their execute flags to point to
1654                  * ENDIF
1655                  */
1656                 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1657                 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1658                              vir_uniform_ui(c, after_block->index));
1659 
1660                 /* If everything points at ENDIF, then jump there immediately. */
1661                 vir_PF(c, vir_XOR(c, c->execute,
1662                                   vir_uniform_ui(c, after_block->index)),
1663                        V3D_QPU_PF_PUSHZ);
1664                 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLA);
1665                 vir_link_blocks(c->cur_block, after_block);
1666                 vir_link_blocks(c->cur_block, else_block);
1667 
1668                 vir_set_emit_block(c, else_block);
1669                 ntq_activate_execute_for_block(c);
1670                 ntq_emit_cf_list(c, &if_stmt->else_list);
1671         }
1672 
1673         vir_link_blocks(c->cur_block, after_block);
1674 
1675         vir_set_emit_block(c, after_block);
1676         if (was_top_level)
1677                 c->execute = c->undef;
1678         else
1679                 ntq_activate_execute_for_block(c);
1680 }
1681 
1682 static void
ntq_emit_jump(struct v3d_compile * c,nir_jump_instr * jump)1683 ntq_emit_jump(struct v3d_compile *c, nir_jump_instr *jump)
1684 {
1685         switch (jump->type) {
1686         case nir_jump_break:
1687                 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1688                 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1689                              vir_uniform_ui(c, c->loop_break_block->index));
1690                 break;
1691 
1692         case nir_jump_continue:
1693                 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1694                 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1695                              vir_uniform_ui(c, c->loop_cont_block->index));
1696                 break;
1697 
1698         case nir_jump_return:
1699                 unreachable("All returns shouold be lowered\n");
1700         }
1701 }
1702 
1703 static void
ntq_emit_instr(struct v3d_compile * c,nir_instr * instr)1704 ntq_emit_instr(struct v3d_compile *c, nir_instr *instr)
1705 {
1706         switch (instr->type) {
1707         case nir_instr_type_alu:
1708                 ntq_emit_alu(c, nir_instr_as_alu(instr));
1709                 break;
1710 
1711         case nir_instr_type_intrinsic:
1712                 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1713                 break;
1714 
1715         case nir_instr_type_load_const:
1716                 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1717                 break;
1718 
1719         case nir_instr_type_ssa_undef:
1720                 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1721                 break;
1722 
1723         case nir_instr_type_tex:
1724                 ntq_emit_tex(c, nir_instr_as_tex(instr));
1725                 break;
1726 
1727         case nir_instr_type_jump:
1728                 ntq_emit_jump(c, nir_instr_as_jump(instr));
1729                 break;
1730 
1731         default:
1732                 fprintf(stderr, "Unknown NIR instr type: ");
1733                 nir_print_instr(instr, stderr);
1734                 fprintf(stderr, "\n");
1735                 abort();
1736         }
1737 }
1738 
1739 static void
ntq_emit_block(struct v3d_compile * c,nir_block * block)1740 ntq_emit_block(struct v3d_compile *c, nir_block *block)
1741 {
1742         nir_foreach_instr(instr, block) {
1743                 ntq_emit_instr(c, instr);
1744         }
1745 }
1746 
1747 static void ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
1748 
1749 static void
ntq_emit_loop(struct v3d_compile * c,nir_loop * loop)1750 ntq_emit_loop(struct v3d_compile *c, nir_loop *loop)
1751 {
1752         bool was_top_level = false;
1753         if (c->execute.file == QFILE_NULL) {
1754                 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
1755                 was_top_level = true;
1756         }
1757 
1758         struct qblock *save_loop_cont_block = c->loop_cont_block;
1759         struct qblock *save_loop_break_block = c->loop_break_block;
1760 
1761         c->loop_cont_block = vir_new_block(c);
1762         c->loop_break_block = vir_new_block(c);
1763 
1764         vir_link_blocks(c->cur_block, c->loop_cont_block);
1765         vir_set_emit_block(c, c->loop_cont_block);
1766         ntq_activate_execute_for_block(c);
1767 
1768         ntq_emit_cf_list(c, &loop->body);
1769 
1770         /* Re-enable any previous continues now, so our ANYA check below
1771          * works.
1772          *
1773          * XXX: Use the .ORZ flags update, instead.
1774          */
1775         vir_PF(c, vir_XOR(c,
1776                           c->execute,
1777                           vir_uniform_ui(c, c->loop_cont_block->index)),
1778                V3D_QPU_PF_PUSHZ);
1779         vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
1780 
1781         vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1782 
1783         struct qinst *branch = vir_BRANCH(c, V3D_QPU_BRANCH_COND_ANYA);
1784         /* Pixels that were not dispatched or have been discarded should not
1785          * contribute to looping again.
1786          */
1787         branch->qpu.branch.msfign = V3D_QPU_MSFIGN_P;
1788         vir_link_blocks(c->cur_block, c->loop_cont_block);
1789         vir_link_blocks(c->cur_block, c->loop_break_block);
1790 
1791         vir_set_emit_block(c, c->loop_break_block);
1792         if (was_top_level)
1793                 c->execute = c->undef;
1794         else
1795                 ntq_activate_execute_for_block(c);
1796 
1797         c->loop_break_block = save_loop_break_block;
1798         c->loop_cont_block = save_loop_cont_block;
1799 }
1800 
1801 static void
ntq_emit_function(struct v3d_compile * c,nir_function_impl * func)1802 ntq_emit_function(struct v3d_compile *c, nir_function_impl *func)
1803 {
1804         fprintf(stderr, "FUNCTIONS not handled.\n");
1805         abort();
1806 }
1807 
1808 static void
ntq_emit_cf_list(struct v3d_compile * c,struct exec_list * list)1809 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list)
1810 {
1811         foreach_list_typed(nir_cf_node, node, node, list) {
1812                 switch (node->type) {
1813                 case nir_cf_node_block:
1814                         ntq_emit_block(c, nir_cf_node_as_block(node));
1815                         break;
1816 
1817                 case nir_cf_node_if:
1818                         ntq_emit_if(c, nir_cf_node_as_if(node));
1819                         break;
1820 
1821                 case nir_cf_node_loop:
1822                         ntq_emit_loop(c, nir_cf_node_as_loop(node));
1823                         break;
1824 
1825                 case nir_cf_node_function:
1826                         ntq_emit_function(c, nir_cf_node_as_function(node));
1827                         break;
1828 
1829                 default:
1830                         fprintf(stderr, "Unknown NIR node type\n");
1831                         abort();
1832                 }
1833         }
1834 }
1835 
1836 static void
ntq_emit_impl(struct v3d_compile * c,nir_function_impl * impl)1837 ntq_emit_impl(struct v3d_compile *c, nir_function_impl *impl)
1838 {
1839         ntq_setup_registers(c, &impl->registers);
1840         ntq_emit_cf_list(c, &impl->body);
1841 }
1842 
1843 static void
nir_to_vir(struct v3d_compile * c)1844 nir_to_vir(struct v3d_compile *c)
1845 {
1846         if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
1847                 c->payload_w = vir_MOV(c, vir_reg(QFILE_REG, 0));
1848                 c->payload_w_centroid = vir_MOV(c, vir_reg(QFILE_REG, 1));
1849                 c->payload_z = vir_MOV(c, vir_reg(QFILE_REG, 2));
1850 
1851                 if (c->fs_key->is_points) {
1852                         c->point_x = emit_fragment_varying(c, NULL, 0);
1853                         c->point_y = emit_fragment_varying(c, NULL, 0);
1854                 } else if (c->fs_key->is_lines) {
1855                         c->line_x = emit_fragment_varying(c, NULL, 0);
1856                 }
1857         }
1858 
1859         ntq_setup_inputs(c);
1860         ntq_setup_outputs(c);
1861         ntq_setup_uniforms(c);
1862         ntq_setup_registers(c, &c->s->registers);
1863 
1864         /* Find the main function and emit the body. */
1865         nir_foreach_function(function, c->s) {
1866                 assert(strcmp(function->name, "main") == 0);
1867                 assert(function->impl);
1868                 ntq_emit_impl(c, function->impl);
1869         }
1870 }
1871 
1872 const nir_shader_compiler_options v3d_nir_options = {
1873         .lower_extract_byte = true,
1874         .lower_extract_word = true,
1875         .lower_bitfield_insert = true,
1876         .lower_bitfield_extract = true,
1877         .lower_pack_unorm_2x16 = true,
1878         .lower_pack_snorm_2x16 = true,
1879         .lower_pack_unorm_4x8 = true,
1880         .lower_pack_snorm_4x8 = true,
1881         .lower_unpack_unorm_4x8 = true,
1882         .lower_unpack_snorm_4x8 = true,
1883         .lower_fdiv = true,
1884         .lower_ffma = true,
1885         .lower_flrp32 = true,
1886         .lower_fpow = true,
1887         .lower_fsat = true,
1888         .lower_fsqrt = true,
1889         .native_integers = true,
1890 };
1891 
1892 
1893 #if 0
1894 static int
1895 count_nir_instrs(nir_shader *nir)
1896 {
1897         int count = 0;
1898         nir_foreach_function(function, nir) {
1899                 if (!function->impl)
1900                         continue;
1901                 nir_foreach_block(block, function->impl) {
1902                         nir_foreach_instr(instr, block)
1903                                 count++;
1904                 }
1905         }
1906         return count;
1907 }
1908 #endif
1909 
1910 /**
1911  * When demoting a shader down to single-threaded, removes the THRSW
1912  * instructions (one will still be inserted at v3d_vir_to_qpu() for the
1913  * program end).
1914  */
1915 static void
vir_remove_thrsw(struct v3d_compile * c)1916 vir_remove_thrsw(struct v3d_compile *c)
1917 {
1918         vir_for_each_block(block, c) {
1919                 vir_for_each_inst_safe(inst, block) {
1920                         if (inst->qpu.sig.thrsw)
1921                                 vir_remove_instruction(c, inst);
1922                 }
1923         }
1924 
1925         c->last_thrsw = NULL;
1926 }
1927 
1928 static void
vir_emit_last_thrsw(struct v3d_compile * c)1929 vir_emit_last_thrsw(struct v3d_compile *c)
1930 {
1931         /* On V3D before 4.1, we need a TMU op to be outstanding when thread
1932          * switching, so disable threads if we didn't do any TMU ops (each of
1933          * which would have emitted a THRSW).
1934          */
1935         if (!c->last_thrsw_at_top_level && c->devinfo->ver < 41) {
1936                 c->threads = 1;
1937                 if (c->last_thrsw)
1938                         vir_remove_thrsw(c);
1939                 return;
1940         }
1941 
1942         /* If we're threaded and the last THRSW was in conditional code, then
1943          * we need to emit another one so that we can flag it as the last
1944          * thrsw.
1945          */
1946         if (c->last_thrsw && !c->last_thrsw_at_top_level) {
1947                 assert(c->devinfo->ver >= 41);
1948                 vir_emit_thrsw(c);
1949         }
1950 
1951         /* If we're threaded, then we need to mark the last THRSW instruction
1952          * so we can emit a pair of them at QPU emit time.
1953          *
1954          * For V3D 4.x, we can spawn the non-fragment shaders already in the
1955          * post-last-THRSW state, so we can skip this.
1956          */
1957         if (!c->last_thrsw && c->s->info.stage == MESA_SHADER_FRAGMENT) {
1958                 assert(c->devinfo->ver >= 41);
1959                 vir_emit_thrsw(c);
1960         }
1961 
1962         if (c->last_thrsw)
1963                 c->last_thrsw->is_last_thrsw = true;
1964 }
1965 
1966 void
v3d_nir_to_vir(struct v3d_compile * c)1967 v3d_nir_to_vir(struct v3d_compile *c)
1968 {
1969         if (V3D_DEBUG & (V3D_DEBUG_NIR |
1970                          v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
1971                 fprintf(stderr, "%s prog %d/%d NIR:\n",
1972                         vir_get_stage_name(c),
1973                         c->program_id, c->variant_id);
1974                 nir_print_shader(c->s, stderr);
1975         }
1976 
1977         nir_to_vir(c);
1978 
1979         /* Emit the last THRSW before STVPM and TLB writes. */
1980         vir_emit_last_thrsw(c);
1981 
1982         switch (c->s->info.stage) {
1983         case MESA_SHADER_FRAGMENT:
1984                 emit_frag_end(c);
1985                 break;
1986         case MESA_SHADER_VERTEX:
1987                 emit_vert_end(c);
1988                 break;
1989         default:
1990                 unreachable("bad stage");
1991         }
1992 
1993         if (V3D_DEBUG & (V3D_DEBUG_VIR |
1994                          v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
1995                 fprintf(stderr, "%s prog %d/%d pre-opt VIR:\n",
1996                         vir_get_stage_name(c),
1997                         c->program_id, c->variant_id);
1998                 vir_dump(c);
1999                 fprintf(stderr, "\n");
2000         }
2001 
2002         vir_optimize(c);
2003         vir_lower_uniforms(c);
2004 
2005         /* XXX: vir_schedule_instructions(c); */
2006 
2007         if (V3D_DEBUG & (V3D_DEBUG_VIR |
2008                          v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2009                 fprintf(stderr, "%s prog %d/%d VIR:\n",
2010                         vir_get_stage_name(c),
2011                         c->program_id, c->variant_id);
2012                 vir_dump(c);
2013                 fprintf(stderr, "\n");
2014         }
2015 
2016         /* Compute the live ranges so we can figure out interference. */
2017         vir_calculate_live_intervals(c);
2018 
2019         /* Attempt to allocate registers for the temporaries.  If we fail,
2020          * reduce thread count and try again.
2021          */
2022         int min_threads = (c->devinfo->ver >= 41) ? 2 : 1;
2023         struct qpu_reg *temp_registers;
2024         while (true) {
2025                 temp_registers = v3d_register_allocate(c);
2026 
2027                 if (temp_registers)
2028                         break;
2029 
2030                 if (c->threads == min_threads) {
2031                         fprintf(stderr, "Failed to register allocate at %d threads:\n",
2032                                 c->threads);
2033                         vir_dump(c);
2034                         c->failed = true;
2035                         return;
2036                 }
2037 
2038                 c->threads /= 2;
2039 
2040                 if (c->threads == 1)
2041                         vir_remove_thrsw(c);
2042         }
2043 
2044         v3d_vir_to_qpu(c, temp_registers);
2045 }
2046