• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "brw_vec4.h"
25 #include "brw_fs.h"
26 #include "brw_cfg.h"
27 #include "brw_vs.h"
28 #include "brw_nir.h"
29 #include "brw_vec4_builder.h"
30 #include "brw_vec4_live_variables.h"
31 #include "brw_dead_control_flow.h"
32 #include "program/prog_parameter.h"
33 
34 #define MAX_INSTRUCTION (1 << 30)
35 
36 using namespace brw;
37 
38 namespace brw {
39 
40 void
init()41 src_reg::init()
42 {
43    memset(this, 0, sizeof(*this));
44 
45    this->file = BAD_FILE;
46 }
47 
src_reg(enum brw_reg_file file,int nr,const glsl_type * type)48 src_reg::src_reg(enum brw_reg_file file, int nr, const glsl_type *type)
49 {
50    init();
51 
52    this->file = file;
53    this->nr = nr;
54    if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
55       this->swizzle = brw_swizzle_for_size(type->vector_elements);
56    else
57       this->swizzle = BRW_SWIZZLE_XYZW;
58    if (type)
59       this->type = brw_type_for_base_type(type);
60 }
61 
62 /** Generic unset register constructor. */
src_reg()63 src_reg::src_reg()
64 {
65    init();
66 }
67 
src_reg(struct::brw_reg reg)68 src_reg::src_reg(struct ::brw_reg reg) :
69    backend_reg(reg)
70 {
71    this->offset = 0;
72    this->reladdr = NULL;
73 }
74 
src_reg(const dst_reg & reg)75 src_reg::src_reg(const dst_reg &reg) :
76    backend_reg(reg)
77 {
78    this->reladdr = reg.reladdr;
79    this->swizzle = brw_swizzle_for_mask(reg.writemask);
80 }
81 
82 void
init()83 dst_reg::init()
84 {
85    memset(this, 0, sizeof(*this));
86    this->file = BAD_FILE;
87    this->writemask = WRITEMASK_XYZW;
88 }
89 
dst_reg()90 dst_reg::dst_reg()
91 {
92    init();
93 }
94 
dst_reg(enum brw_reg_file file,int nr)95 dst_reg::dst_reg(enum brw_reg_file file, int nr)
96 {
97    init();
98 
99    this->file = file;
100    this->nr = nr;
101 }
102 
dst_reg(enum brw_reg_file file,int nr,const glsl_type * type,unsigned writemask)103 dst_reg::dst_reg(enum brw_reg_file file, int nr, const glsl_type *type,
104                  unsigned writemask)
105 {
106    init();
107 
108    this->file = file;
109    this->nr = nr;
110    this->type = brw_type_for_base_type(type);
111    this->writemask = writemask;
112 }
113 
dst_reg(enum brw_reg_file file,int nr,brw_reg_type type,unsigned writemask)114 dst_reg::dst_reg(enum brw_reg_file file, int nr, brw_reg_type type,
115                  unsigned writemask)
116 {
117    init();
118 
119    this->file = file;
120    this->nr = nr;
121    this->type = type;
122    this->writemask = writemask;
123 }
124 
dst_reg(struct::brw_reg reg)125 dst_reg::dst_reg(struct ::brw_reg reg) :
126    backend_reg(reg)
127 {
128    this->offset = 0;
129    this->reladdr = NULL;
130 }
131 
dst_reg(const src_reg & reg)132 dst_reg::dst_reg(const src_reg &reg) :
133    backend_reg(reg)
134 {
135    this->writemask = brw_mask_for_swizzle(reg.swizzle);
136    this->reladdr = reg.reladdr;
137 }
138 
139 bool
equals(const dst_reg & r) const140 dst_reg::equals(const dst_reg &r) const
141 {
142    return (this->backend_reg::equals(r) &&
143            (reladdr == r.reladdr ||
144             (reladdr && r.reladdr && reladdr->equals(*r.reladdr))));
145 }
146 
147 bool
is_send_from_grf()148 vec4_instruction::is_send_from_grf()
149 {
150    switch (opcode) {
151    case SHADER_OPCODE_SHADER_TIME_ADD:
152    case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
153    case SHADER_OPCODE_UNTYPED_ATOMIC:
154    case SHADER_OPCODE_UNTYPED_SURFACE_READ:
155    case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
156    case SHADER_OPCODE_TYPED_ATOMIC:
157    case SHADER_OPCODE_TYPED_SURFACE_READ:
158    case SHADER_OPCODE_TYPED_SURFACE_WRITE:
159    case VEC4_OPCODE_URB_READ:
160    case TCS_OPCODE_URB_WRITE:
161    case TCS_OPCODE_RELEASE_INPUT:
162    case SHADER_OPCODE_BARRIER:
163       return true;
164    default:
165       return false;
166    }
167 }
168 
169 /**
170  * Returns true if this instruction's sources and destinations cannot
171  * safely be the same register.
172  *
173  * In most cases, a register can be written over safely by the same
174  * instruction that is its last use.  For a single instruction, the
175  * sources are dereferenced before writing of the destination starts
176  * (naturally).
177  *
178  * However, there are a few cases where this can be problematic:
179  *
180  * - Virtual opcodes that translate to multiple instructions in the
181  *   code generator: if src == dst and one instruction writes the
182  *   destination before a later instruction reads the source, then
183  *   src will have been clobbered.
184  *
185  * The register allocator uses this information to set up conflicts between
186  * GRF sources and the destination.
187  */
188 bool
has_source_and_destination_hazard() const189 vec4_instruction::has_source_and_destination_hazard() const
190 {
191    switch (opcode) {
192    case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
193    case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
194    case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
195       return true;
196    default:
197       /* 8-wide compressed DF operations are executed as two 4-wide operations,
198        * so we have a src/dst hazard if the first half of the instruction
199        * overwrites the source of the second half. Prevent this by marking
200        * compressed instructions as having src/dst hazards, so the register
201        * allocator assigns safe register regions for dst and srcs.
202        */
203       return size_written > REG_SIZE;
204    }
205 }
206 
207 unsigned
size_read(unsigned arg) const208 vec4_instruction::size_read(unsigned arg) const
209 {
210    switch (opcode) {
211    case SHADER_OPCODE_SHADER_TIME_ADD:
212    case SHADER_OPCODE_UNTYPED_ATOMIC:
213    case SHADER_OPCODE_UNTYPED_SURFACE_READ:
214    case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
215    case SHADER_OPCODE_TYPED_ATOMIC:
216    case SHADER_OPCODE_TYPED_SURFACE_READ:
217    case SHADER_OPCODE_TYPED_SURFACE_WRITE:
218    case TCS_OPCODE_URB_WRITE:
219       if (arg == 0)
220          return mlen * REG_SIZE;
221       break;
222    case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
223       if (arg == 1)
224          return mlen * REG_SIZE;
225       break;
226    default:
227       break;
228    }
229 
230    switch (src[arg].file) {
231    case BAD_FILE:
232       return 0;
233    case IMM:
234    case UNIFORM:
235       return 4 * type_sz(src[arg].type);
236    default:
237       /* XXX - Represent actual vertical stride. */
238       return exec_size * type_sz(src[arg].type);
239    }
240 }
241 
242 bool
can_do_source_mods(const struct gen_device_info * devinfo)243 vec4_instruction::can_do_source_mods(const struct gen_device_info *devinfo)
244 {
245    if (devinfo->gen == 6 && is_math())
246       return false;
247 
248    if (is_send_from_grf())
249       return false;
250 
251    if (!backend_instruction::can_do_source_mods())
252       return false;
253 
254    return true;
255 }
256 
257 bool
can_do_writemask(const struct gen_device_info * devinfo)258 vec4_instruction::can_do_writemask(const struct gen_device_info *devinfo)
259 {
260    switch (opcode) {
261    case SHADER_OPCODE_GEN4_SCRATCH_READ:
262    case VEC4_OPCODE_FROM_DOUBLE:
263    case VEC4_OPCODE_TO_DOUBLE:
264    case VEC4_OPCODE_PICK_LOW_32BIT:
265    case VEC4_OPCODE_PICK_HIGH_32BIT:
266    case VEC4_OPCODE_SET_LOW_32BIT:
267    case VEC4_OPCODE_SET_HIGH_32BIT:
268    case VS_OPCODE_PULL_CONSTANT_LOAD:
269    case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
270    case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9:
271    case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
272    case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
273    case TES_OPCODE_CREATE_INPUT_READ_HEADER:
274    case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
275    case VEC4_OPCODE_URB_READ:
276    case SHADER_OPCODE_MOV_INDIRECT:
277       return false;
278    default:
279       /* The MATH instruction on Gen6 only executes in align1 mode, which does
280        * not support writemasking.
281        */
282       if (devinfo->gen == 6 && is_math())
283          return false;
284 
285       if (is_tex())
286          return false;
287 
288       return true;
289    }
290 }
291 
292 bool
can_change_types() const293 vec4_instruction::can_change_types() const
294 {
295    return dst.type == src[0].type &&
296           !src[0].abs && !src[0].negate && !saturate &&
297           (opcode == BRW_OPCODE_MOV ||
298            (opcode == BRW_OPCODE_SEL &&
299             dst.type == src[1].type &&
300             predicate != BRW_PREDICATE_NONE &&
301             !src[1].abs && !src[1].negate));
302 }
303 
304 /**
305  * Returns how many MRFs an opcode will write over.
306  *
307  * Note that this is not the 0 or 1 implied writes in an actual gen
308  * instruction -- the generate_* functions generate additional MOVs
309  * for setup.
310  */
311 int
implied_mrf_writes(vec4_instruction * inst)312 vec4_visitor::implied_mrf_writes(vec4_instruction *inst)
313 {
314    if (inst->mlen == 0 || inst->is_send_from_grf())
315       return 0;
316 
317    switch (inst->opcode) {
318    case SHADER_OPCODE_RCP:
319    case SHADER_OPCODE_RSQ:
320    case SHADER_OPCODE_SQRT:
321    case SHADER_OPCODE_EXP2:
322    case SHADER_OPCODE_LOG2:
323    case SHADER_OPCODE_SIN:
324    case SHADER_OPCODE_COS:
325       return 1;
326    case SHADER_OPCODE_INT_QUOTIENT:
327    case SHADER_OPCODE_INT_REMAINDER:
328    case SHADER_OPCODE_POW:
329    case TCS_OPCODE_THREAD_END:
330       return 2;
331    case VS_OPCODE_URB_WRITE:
332       return 1;
333    case VS_OPCODE_PULL_CONSTANT_LOAD:
334       return 2;
335    case SHADER_OPCODE_GEN4_SCRATCH_READ:
336       return 2;
337    case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
338       return 3;
339    case GS_OPCODE_URB_WRITE:
340    case GS_OPCODE_URB_WRITE_ALLOCATE:
341    case GS_OPCODE_THREAD_END:
342       return 0;
343    case GS_OPCODE_FF_SYNC:
344       return 1;
345    case TCS_OPCODE_URB_WRITE:
346       return 0;
347    case SHADER_OPCODE_SHADER_TIME_ADD:
348       return 0;
349    case SHADER_OPCODE_TEX:
350    case SHADER_OPCODE_TXL:
351    case SHADER_OPCODE_TXD:
352    case SHADER_OPCODE_TXF:
353    case SHADER_OPCODE_TXF_CMS:
354    case SHADER_OPCODE_TXF_CMS_W:
355    case SHADER_OPCODE_TXF_MCS:
356    case SHADER_OPCODE_TXS:
357    case SHADER_OPCODE_TG4:
358    case SHADER_OPCODE_TG4_OFFSET:
359    case SHADER_OPCODE_SAMPLEINFO:
360    case VS_OPCODE_GET_BUFFER_SIZE:
361       return inst->header_size;
362    default:
363       unreachable("not reached");
364    }
365 }
366 
367 bool
equals(const src_reg & r) const368 src_reg::equals(const src_reg &r) const
369 {
370    return (this->backend_reg::equals(r) &&
371 	   !reladdr && !r.reladdr);
372 }
373 
374 bool
opt_vector_float()375 vec4_visitor::opt_vector_float()
376 {
377    bool progress = false;
378 
379    foreach_block(block, cfg) {
380       int last_reg = -1, last_offset = -1;
381       enum brw_reg_file last_reg_file = BAD_FILE;
382 
383       uint8_t imm[4] = { 0 };
384       int inst_count = 0;
385       vec4_instruction *imm_inst[4];
386       unsigned writemask = 0;
387       enum brw_reg_type dest_type = BRW_REGISTER_TYPE_F;
388 
389       foreach_inst_in_block_safe(vec4_instruction, inst, block) {
390          int vf = -1;
391          enum brw_reg_type need_type;
392 
393          /* Look for unconditional MOVs from an immediate with a partial
394           * writemask.  Skip type-conversion MOVs other than integer 0,
395           * where the type doesn't matter.  See if the immediate can be
396           * represented as a VF.
397           */
398          if (inst->opcode == BRW_OPCODE_MOV &&
399              inst->src[0].file == IMM &&
400              inst->predicate == BRW_PREDICATE_NONE &&
401              inst->dst.writemask != WRITEMASK_XYZW &&
402              type_sz(inst->src[0].type) < 8 &&
403              (inst->src[0].type == inst->dst.type || inst->src[0].d == 0)) {
404 
405             vf = brw_float_to_vf(inst->src[0].d);
406             need_type = BRW_REGISTER_TYPE_D;
407 
408             if (vf == -1) {
409                vf = brw_float_to_vf(inst->src[0].f);
410                need_type = BRW_REGISTER_TYPE_F;
411             }
412          } else {
413             last_reg = -1;
414          }
415 
416          /* If this wasn't a MOV, or the destination register doesn't match,
417           * or we have to switch destination types, then this breaks our
418           * sequence.  Combine anything we've accumulated so far.
419           */
420          if (last_reg != inst->dst.nr ||
421              last_offset != inst->dst.offset ||
422              last_reg_file != inst->dst.file ||
423              (vf > 0 && dest_type != need_type)) {
424 
425             if (inst_count > 1) {
426                unsigned vf;
427                memcpy(&vf, imm, sizeof(vf));
428                vec4_instruction *mov = MOV(imm_inst[0]->dst, brw_imm_vf(vf));
429                mov->dst.type = dest_type;
430                mov->dst.writemask = writemask;
431                inst->insert_before(block, mov);
432 
433                for (int i = 0; i < inst_count; i++) {
434                   imm_inst[i]->remove(block);
435                }
436 
437                progress = true;
438             }
439 
440             inst_count = 0;
441             last_reg = -1;
442             writemask = 0;
443             dest_type = BRW_REGISTER_TYPE_F;
444 
445             for (int i = 0; i < 4; i++) {
446                imm[i] = 0;
447             }
448          }
449 
450          /* Record this instruction's value (if it was representable). */
451          if (vf != -1) {
452             if ((inst->dst.writemask & WRITEMASK_X) != 0)
453                imm[0] = vf;
454             if ((inst->dst.writemask & WRITEMASK_Y) != 0)
455                imm[1] = vf;
456             if ((inst->dst.writemask & WRITEMASK_Z) != 0)
457                imm[2] = vf;
458             if ((inst->dst.writemask & WRITEMASK_W) != 0)
459                imm[3] = vf;
460 
461             writemask |= inst->dst.writemask;
462             imm_inst[inst_count++] = inst;
463 
464             last_reg = inst->dst.nr;
465             last_offset = inst->dst.offset;
466             last_reg_file = inst->dst.file;
467             if (vf > 0)
468                dest_type = need_type;
469          }
470       }
471    }
472 
473    if (progress)
474       invalidate_live_intervals();
475 
476    return progress;
477 }
478 
479 /* Replaces unused channels of a swizzle with channels that are used.
480  *
481  * For instance, this pass transforms
482  *
483  *    mov vgrf4.yz, vgrf5.wxzy
484  *
485  * into
486  *
487  *    mov vgrf4.yz, vgrf5.xxzx
488  *
489  * This eliminates false uses of some channels, letting dead code elimination
490  * remove the instructions that wrote them.
491  */
492 bool
opt_reduce_swizzle()493 vec4_visitor::opt_reduce_swizzle()
494 {
495    bool progress = false;
496 
497    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
498       if (inst->dst.file == BAD_FILE ||
499           inst->dst.file == ARF ||
500           inst->dst.file == FIXED_GRF ||
501           inst->is_send_from_grf())
502          continue;
503 
504       unsigned swizzle;
505 
506       /* Determine which channels of the sources are read. */
507       switch (inst->opcode) {
508       case VEC4_OPCODE_PACK_BYTES:
509       case BRW_OPCODE_DP4:
510       case BRW_OPCODE_DPH: /* FINISHME: DPH reads only three channels of src0,
511                             *           but all four of src1.
512                             */
513          swizzle = brw_swizzle_for_size(4);
514          break;
515       case BRW_OPCODE_DP3:
516          swizzle = brw_swizzle_for_size(3);
517          break;
518       case BRW_OPCODE_DP2:
519          swizzle = brw_swizzle_for_size(2);
520          break;
521 
522       case VEC4_OPCODE_TO_DOUBLE:
523       case VEC4_OPCODE_FROM_DOUBLE:
524       case VEC4_OPCODE_PICK_LOW_32BIT:
525       case VEC4_OPCODE_PICK_HIGH_32BIT:
526       case VEC4_OPCODE_SET_LOW_32BIT:
527       case VEC4_OPCODE_SET_HIGH_32BIT:
528          swizzle = brw_swizzle_for_size(4);
529          break;
530 
531       default:
532          swizzle = brw_swizzle_for_mask(inst->dst.writemask);
533          break;
534       }
535 
536       /* Update sources' swizzles. */
537       for (int i = 0; i < 3; i++) {
538          if (inst->src[i].file != VGRF &&
539              inst->src[i].file != ATTR &&
540              inst->src[i].file != UNIFORM)
541             continue;
542 
543          const unsigned new_swizzle =
544             brw_compose_swizzle(swizzle, inst->src[i].swizzle);
545          if (inst->src[i].swizzle != new_swizzle) {
546             inst->src[i].swizzle = new_swizzle;
547             progress = true;
548          }
549       }
550    }
551 
552    if (progress)
553       invalidate_live_intervals();
554 
555    return progress;
556 }
557 
558 void
split_uniform_registers()559 vec4_visitor::split_uniform_registers()
560 {
561    /* Prior to this, uniforms have been in an array sized according to
562     * the number of vector uniforms present, sparsely filled (so an
563     * aggregate results in reg indices being skipped over).  Now we're
564     * going to cut those aggregates up so each .nr index is one
565     * vector.  The goal is to make elimination of unused uniform
566     * components easier later.
567     */
568    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
569       for (int i = 0 ; i < 3; i++) {
570 	 if (inst->src[i].file != UNIFORM)
571 	    continue;
572 
573 	 assert(!inst->src[i].reladdr);
574 
575          inst->src[i].nr += inst->src[i].offset / 16;
576 	 inst->src[i].offset %= 16;
577       }
578    }
579 }
580 
581 void
pack_uniform_registers()582 vec4_visitor::pack_uniform_registers()
583 {
584    uint8_t chans_used[this->uniforms];
585    int new_loc[this->uniforms];
586    int new_chan[this->uniforms];
587 
588    memset(chans_used, 0, sizeof(chans_used));
589    memset(new_loc, 0, sizeof(new_loc));
590    memset(new_chan, 0, sizeof(new_chan));
591 
592    /* Find which uniform vectors are actually used by the program.  We
593     * expect unused vector elements when we've moved array access out
594     * to pull constants, and from some GLSL code generators like wine.
595     */
596    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
597       unsigned readmask;
598       switch (inst->opcode) {
599       case VEC4_OPCODE_PACK_BYTES:
600       case BRW_OPCODE_DP4:
601       case BRW_OPCODE_DPH:
602          readmask = 0xf;
603          break;
604       case BRW_OPCODE_DP3:
605          readmask = 0x7;
606          break;
607       case BRW_OPCODE_DP2:
608          readmask = 0x3;
609          break;
610       default:
611          readmask = inst->dst.writemask;
612          break;
613       }
614 
615       for (int i = 0 ; i < 3; i++) {
616          if (inst->src[i].file != UNIFORM)
617             continue;
618 
619          assert(type_sz(inst->src[i].type) % 4 == 0);
620          unsigned channel_size = type_sz(inst->src[i].type) / 4;
621 
622          int reg = inst->src[i].nr;
623          for (int c = 0; c < 4; c++) {
624             if (!(readmask & (1 << c)))
625                continue;
626 
627             unsigned channel = BRW_GET_SWZ(inst->src[i].swizzle, c) + 1;
628             unsigned used = MAX2(chans_used[reg], channel * channel_size);
629             if (used <= 4)
630                chans_used[reg] = used;
631             else
632                chans_used[reg + 1] = used - 4;
633          }
634       }
635 
636       if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
637           inst->src[0].file == UNIFORM) {
638          assert(inst->src[2].file == BRW_IMMEDIATE_VALUE);
639          assert(inst->src[0].subnr == 0);
640 
641          unsigned bytes_read = inst->src[2].ud;
642          assert(bytes_read % 4 == 0);
643          unsigned vec4s_read = DIV_ROUND_UP(bytes_read, 16);
644 
645          /* We just mark every register touched by a MOV_INDIRECT as being
646           * fully used.  This ensures that it doesn't broken up piecewise by
647           * the next part of our packing algorithm.
648           */
649          int reg = inst->src[0].nr;
650          for (unsigned i = 0; i < vec4s_read; i++)
651             chans_used[reg + i] = 4;
652       }
653    }
654 
655    int new_uniform_count = 0;
656 
657    /* Now, figure out a packing of the live uniform vectors into our
658     * push constants.
659     */
660    for (int src = 0; src < uniforms; src++) {
661       int size = chans_used[src];
662 
663       if (size == 0)
664          continue;
665 
666       int dst;
667       /* Find the lowest place we can slot this uniform in. */
668       for (dst = 0; dst < src; dst++) {
669          if (chans_used[dst] + size <= 4)
670             break;
671       }
672 
673       if (src == dst) {
674          new_loc[src] = dst;
675          new_chan[src] = 0;
676       } else {
677          new_loc[src] = dst;
678          new_chan[src] = chans_used[dst];
679 
680          /* Move the references to the data */
681          for (int j = 0; j < size; j++) {
682             stage_prog_data->param[dst * 4 + new_chan[src] + j] =
683                stage_prog_data->param[src * 4 + j];
684          }
685 
686          chans_used[dst] += size;
687          chans_used[src] = 0;
688       }
689 
690       new_uniform_count = MAX2(new_uniform_count, dst + 1);
691    }
692 
693    this->uniforms = new_uniform_count;
694 
695    /* Now, update the instructions for our repacked uniforms. */
696    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
697       for (int i = 0 ; i < 3; i++) {
698          int src = inst->src[i].nr;
699 
700          if (inst->src[i].file != UNIFORM)
701             continue;
702 
703          inst->src[i].nr = new_loc[src];
704          inst->src[i].swizzle += BRW_SWIZZLE4(new_chan[src], new_chan[src],
705                                               new_chan[src], new_chan[src]);
706       }
707    }
708 }
709 
710 /**
711  * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
712  *
713  * While GLSL IR also performs this optimization, we end up with it in
714  * our instruction stream for a couple of reasons.  One is that we
715  * sometimes generate silly instructions, for example in array access
716  * where we'll generate "ADD offset, index, base" even if base is 0.
717  * The other is that GLSL IR's constant propagation doesn't track the
718  * components of aggregates, so some VS patterns (initialize matrix to
719  * 0, accumulate in vertex blending factors) end up breaking down to
720  * instructions involving 0.
721  */
722 bool
opt_algebraic()723 vec4_visitor::opt_algebraic()
724 {
725    bool progress = false;
726 
727    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
728       switch (inst->opcode) {
729       case BRW_OPCODE_MOV:
730          if (inst->src[0].file != IMM)
731             break;
732 
733          if (inst->saturate) {
734             if (inst->dst.type != inst->src[0].type)
735                assert(!"unimplemented: saturate mixed types");
736 
737             if (brw_saturate_immediate(inst->dst.type,
738                                        &inst->src[0].as_brw_reg())) {
739                inst->saturate = false;
740                progress = true;
741             }
742          }
743          break;
744 
745       case VEC4_OPCODE_UNPACK_UNIFORM:
746          if (inst->src[0].file != UNIFORM) {
747             inst->opcode = BRW_OPCODE_MOV;
748             progress = true;
749          }
750          break;
751 
752       case BRW_OPCODE_ADD:
753 	 if (inst->src[1].is_zero()) {
754 	    inst->opcode = BRW_OPCODE_MOV;
755 	    inst->src[1] = src_reg();
756 	    progress = true;
757 	 }
758 	 break;
759 
760       case BRW_OPCODE_MUL:
761 	 if (inst->src[1].is_zero()) {
762 	    inst->opcode = BRW_OPCODE_MOV;
763 	    switch (inst->src[0].type) {
764 	    case BRW_REGISTER_TYPE_F:
765 	       inst->src[0] = brw_imm_f(0.0f);
766 	       break;
767 	    case BRW_REGISTER_TYPE_D:
768 	       inst->src[0] = brw_imm_d(0);
769 	       break;
770 	    case BRW_REGISTER_TYPE_UD:
771 	       inst->src[0] = brw_imm_ud(0u);
772 	       break;
773 	    default:
774 	       unreachable("not reached");
775 	    }
776 	    inst->src[1] = src_reg();
777 	    progress = true;
778 	 } else if (inst->src[1].is_one()) {
779 	    inst->opcode = BRW_OPCODE_MOV;
780 	    inst->src[1] = src_reg();
781 	    progress = true;
782          } else if (inst->src[1].is_negative_one()) {
783             inst->opcode = BRW_OPCODE_MOV;
784             inst->src[0].negate = !inst->src[0].negate;
785             inst->src[1] = src_reg();
786             progress = true;
787 	 }
788 	 break;
789       case BRW_OPCODE_CMP:
790          if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
791              inst->src[0].abs &&
792              inst->src[0].negate &&
793              inst->src[1].is_zero()) {
794             inst->src[0].abs = false;
795             inst->src[0].negate = false;
796             inst->conditional_mod = BRW_CONDITIONAL_Z;
797             progress = true;
798             break;
799          }
800          break;
801       case SHADER_OPCODE_BROADCAST:
802          if (is_uniform(inst->src[0]) ||
803              inst->src[1].is_zero()) {
804             inst->opcode = BRW_OPCODE_MOV;
805             inst->src[1] = src_reg();
806             inst->force_writemask_all = true;
807             progress = true;
808          }
809          break;
810 
811       default:
812 	 break;
813       }
814    }
815 
816    if (progress)
817       invalidate_live_intervals();
818 
819    return progress;
820 }
821 
822 /**
823  * Only a limited number of hardware registers may be used for push
824  * constants, so this turns access to the overflowed constants into
825  * pull constants.
826  */
827 void
move_push_constants_to_pull_constants()828 vec4_visitor::move_push_constants_to_pull_constants()
829 {
830    int pull_constant_loc[this->uniforms];
831 
832    /* Only allow 32 registers (256 uniform components) as push constants,
833     * which is the limit on gen6.
834     *
835     * If changing this value, note the limitation about total_regs in
836     * brw_curbe.c.
837     */
838    int max_uniform_components = 32 * 8;
839    if (this->uniforms * 4 <= max_uniform_components)
840       return;
841 
842    /* Make some sort of choice as to which uniforms get sent to pull
843     * constants.  We could potentially do something clever here like
844     * look for the most infrequently used uniform vec4s, but leave
845     * that for later.
846     */
847    for (int i = 0; i < this->uniforms * 4; i += 4) {
848       pull_constant_loc[i / 4] = -1;
849 
850       if (i >= max_uniform_components) {
851          const gl_constant_value **values = &stage_prog_data->param[i];
852 
853          /* Try to find an existing copy of this uniform in the pull
854           * constants if it was part of an array access already.
855           */
856          for (unsigned int j = 0; j < stage_prog_data->nr_pull_params; j += 4) {
857             int matches;
858 
859             for (matches = 0; matches < 4; matches++) {
860                if (stage_prog_data->pull_param[j + matches] != values[matches])
861                   break;
862             }
863 
864             if (matches == 4) {
865                pull_constant_loc[i / 4] = j / 4;
866                break;
867             }
868          }
869 
870          if (pull_constant_loc[i / 4] == -1) {
871             assert(stage_prog_data->nr_pull_params % 4 == 0);
872             pull_constant_loc[i / 4] = stage_prog_data->nr_pull_params / 4;
873 
874             for (int j = 0; j < 4; j++) {
875                stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
876                   values[j];
877             }
878          }
879       }
880    }
881 
882    /* Now actually rewrite usage of the things we've moved to pull
883     * constants.
884     */
885    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
886       for (int i = 0 ; i < 3; i++) {
887          if (inst->src[i].file != UNIFORM ||
888              pull_constant_loc[inst->src[i].nr] == -1)
889             continue;
890 
891          int uniform = inst->src[i].nr;
892 
893          const glsl_type *temp_type = type_sz(inst->src[i].type) == 8 ?
894             glsl_type::dvec4_type : glsl_type::vec4_type;
895          dst_reg temp = dst_reg(this, temp_type);
896 
897          emit_pull_constant_load(block, inst, temp, inst->src[i],
898                                  pull_constant_loc[uniform], src_reg());
899 
900          inst->src[i].file = temp.file;
901          inst->src[i].nr = temp.nr;
902          inst->src[i].offset %= 16;
903          inst->src[i].reladdr = NULL;
904       }
905    }
906 
907    /* Repack push constants to remove the now-unused ones. */
908    pack_uniform_registers();
909 }
910 
911 /* Conditions for which we want to avoid setting the dependency control bits */
912 bool
is_dep_ctrl_unsafe(const vec4_instruction * inst)913 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction *inst)
914 {
915 #define IS_DWORD(reg) \
916    (reg.type == BRW_REGISTER_TYPE_UD || \
917     reg.type == BRW_REGISTER_TYPE_D)
918 
919 #define IS_64BIT(reg) (reg.file != BAD_FILE && type_sz(reg.type) == 8)
920 
921    /* From the Cherryview and Broadwell PRMs:
922     *
923     * "When source or destination datatype is 64b or operation is integer DWord
924     * multiply, DepCtrl must not be used."
925     *
926     * SKL PRMs don't include this restriction, however, gen7 seems to be
927     * affected, at least by the 64b restriction, since DepCtrl with double
928     * precision instructions seems to produce GPU hangs in some cases.
929     */
930    if (devinfo->gen == 8 || devinfo->is_broxton) {
931       if (inst->opcode == BRW_OPCODE_MUL &&
932          IS_DWORD(inst->src[0]) &&
933          IS_DWORD(inst->src[1]))
934          return true;
935    }
936 
937    if (devinfo->gen >= 7 && devinfo->gen <= 8) {
938       if (IS_64BIT(inst->dst) || IS_64BIT(inst->src[0]) ||
939           IS_64BIT(inst->src[1]) || IS_64BIT(inst->src[2]))
940       return true;
941    }
942 
943 #undef IS_64BIT
944 #undef IS_DWORD
945 
946    if (devinfo->gen >= 8) {
947       if (inst->opcode == BRW_OPCODE_F32TO16)
948          return true;
949    }
950 
951    /*
952     * mlen:
953     * In the presence of send messages, totally interrupt dependency
954     * control. They're long enough that the chance of dependency
955     * control around them just doesn't matter.
956     *
957     * predicate:
958     * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
959     * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
960     * completes the scoreboard clear must have a non-zero execution mask. This
961     * means, if any kind of predication can change the execution mask or channel
962     * enable of the last instruction, the optimization must be avoided. This is
963     * to avoid instructions being shot down the pipeline when no writes are
964     * required.
965     *
966     * math:
967     * Dependency control does not work well over math instructions.
968     * NB: Discovered empirically
969     */
970    return (inst->mlen || inst->predicate || inst->is_math());
971 }
972 
973 /**
974  * Sets the dependency control fields on instructions after register
975  * allocation and before the generator is run.
976  *
977  * When you have a sequence of instructions like:
978  *
979  * DP4 temp.x vertex uniform[0]
980  * DP4 temp.y vertex uniform[0]
981  * DP4 temp.z vertex uniform[0]
982  * DP4 temp.w vertex uniform[0]
983  *
984  * The hardware doesn't know that it can actually run the later instructions
985  * while the previous ones are in flight, producing stalls.  However, we have
986  * manual fields we can set in the instructions that let it do so.
987  */
988 void
opt_set_dependency_control()989 vec4_visitor::opt_set_dependency_control()
990 {
991    vec4_instruction *last_grf_write[BRW_MAX_GRF];
992    uint8_t grf_channels_written[BRW_MAX_GRF];
993    vec4_instruction *last_mrf_write[BRW_MAX_GRF];
994    uint8_t mrf_channels_written[BRW_MAX_GRF];
995 
996    assert(prog_data->total_grf ||
997           !"Must be called after register allocation");
998 
999    foreach_block (block, cfg) {
1000       memset(last_grf_write, 0, sizeof(last_grf_write));
1001       memset(last_mrf_write, 0, sizeof(last_mrf_write));
1002 
1003       foreach_inst_in_block (vec4_instruction, inst, block) {
1004          /* If we read from a register that we were doing dependency control
1005           * on, don't do dependency control across the read.
1006           */
1007          for (int i = 0; i < 3; i++) {
1008             int reg = inst->src[i].nr + inst->src[i].offset / REG_SIZE;
1009             if (inst->src[i].file == VGRF) {
1010                last_grf_write[reg] = NULL;
1011             } else if (inst->src[i].file == FIXED_GRF) {
1012                memset(last_grf_write, 0, sizeof(last_grf_write));
1013                break;
1014             }
1015             assert(inst->src[i].file != MRF);
1016          }
1017 
1018          if (is_dep_ctrl_unsafe(inst)) {
1019             memset(last_grf_write, 0, sizeof(last_grf_write));
1020             memset(last_mrf_write, 0, sizeof(last_mrf_write));
1021             continue;
1022          }
1023 
1024          /* Now, see if we can do dependency control for this instruction
1025           * against a previous one writing to its destination.
1026           */
1027          int reg = inst->dst.nr + inst->dst.offset / REG_SIZE;
1028          if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) {
1029             if (last_grf_write[reg] &&
1030                 last_grf_write[reg]->dst.offset == inst->dst.offset &&
1031                 !(inst->dst.writemask & grf_channels_written[reg])) {
1032                last_grf_write[reg]->no_dd_clear = true;
1033                inst->no_dd_check = true;
1034             } else {
1035                grf_channels_written[reg] = 0;
1036             }
1037 
1038             last_grf_write[reg] = inst;
1039             grf_channels_written[reg] |= inst->dst.writemask;
1040          } else if (inst->dst.file == MRF) {
1041             if (last_mrf_write[reg] &&
1042                 last_mrf_write[reg]->dst.offset == inst->dst.offset &&
1043                 !(inst->dst.writemask & mrf_channels_written[reg])) {
1044                last_mrf_write[reg]->no_dd_clear = true;
1045                inst->no_dd_check = true;
1046             } else {
1047                mrf_channels_written[reg] = 0;
1048             }
1049 
1050             last_mrf_write[reg] = inst;
1051             mrf_channels_written[reg] |= inst->dst.writemask;
1052          }
1053       }
1054    }
1055 }
1056 
1057 bool
can_reswizzle(const struct gen_device_info * devinfo,int dst_writemask,int swizzle,int swizzle_mask)1058 vec4_instruction::can_reswizzle(const struct gen_device_info *devinfo,
1059                                 int dst_writemask,
1060                                 int swizzle,
1061                                 int swizzle_mask)
1062 {
1063    /* Gen6 MATH instructions can not execute in align16 mode, so swizzles
1064     * are not allowed.
1065     */
1066    if (devinfo->gen == 6 && is_math() && swizzle != BRW_SWIZZLE_XYZW)
1067       return false;
1068 
1069    if (!can_do_writemask(devinfo) && dst_writemask != WRITEMASK_XYZW)
1070       return false;
1071 
1072    /* If this instruction sets anything not referenced by swizzle, then we'd
1073     * totally break it when we reswizzle.
1074     */
1075    if (dst.writemask & ~swizzle_mask)
1076       return false;
1077 
1078    if (mlen > 0)
1079       return false;
1080 
1081    for (int i = 0; i < 3; i++) {
1082       if (src[i].is_accumulator())
1083          return false;
1084    }
1085 
1086    return true;
1087 }
1088 
1089 /**
1090  * For any channels in the swizzle's source that were populated by this
1091  * instruction, rewrite the instruction to put the appropriate result directly
1092  * in those channels.
1093  *
1094  * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
1095  */
1096 void
reswizzle(int dst_writemask,int swizzle)1097 vec4_instruction::reswizzle(int dst_writemask, int swizzle)
1098 {
1099    /* Destination write mask doesn't correspond to source swizzle for the dot
1100     * product and pack_bytes instructions.
1101     */
1102    if (opcode != BRW_OPCODE_DP4 && opcode != BRW_OPCODE_DPH &&
1103        opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2 &&
1104        opcode != VEC4_OPCODE_PACK_BYTES) {
1105       for (int i = 0; i < 3; i++) {
1106          if (src[i].file == BAD_FILE || src[i].file == IMM)
1107             continue;
1108 
1109          src[i].swizzle = brw_compose_swizzle(swizzle, src[i].swizzle);
1110       }
1111    }
1112 
1113    /* Apply the specified swizzle and writemask to the original mask of
1114     * written components.
1115     */
1116    dst.writemask = dst_writemask &
1117                    brw_apply_swizzle_to_mask(swizzle, dst.writemask);
1118 }
1119 
1120 /*
1121  * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1122  * just written and then MOVed into another reg and making the original write
1123  * of the GRF write directly to the final destination instead.
1124  */
1125 bool
opt_register_coalesce()1126 vec4_visitor::opt_register_coalesce()
1127 {
1128    bool progress = false;
1129    int next_ip = 0;
1130 
1131    calculate_live_intervals();
1132 
1133    foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
1134       int ip = next_ip;
1135       next_ip++;
1136 
1137       if (inst->opcode != BRW_OPCODE_MOV ||
1138           (inst->dst.file != VGRF && inst->dst.file != MRF) ||
1139 	  inst->predicate ||
1140 	  inst->src[0].file != VGRF ||
1141 	  inst->dst.type != inst->src[0].type ||
1142 	  inst->src[0].abs || inst->src[0].negate || inst->src[0].reladdr)
1143 	 continue;
1144 
1145       /* Remove no-op MOVs */
1146       if (inst->dst.file == inst->src[0].file &&
1147           inst->dst.nr == inst->src[0].nr &&
1148           inst->dst.offset == inst->src[0].offset) {
1149          bool is_nop_mov = true;
1150 
1151          for (unsigned c = 0; c < 4; c++) {
1152             if ((inst->dst.writemask & (1 << c)) == 0)
1153                continue;
1154 
1155             if (BRW_GET_SWZ(inst->src[0].swizzle, c) != c) {
1156                is_nop_mov = false;
1157                break;
1158             }
1159          }
1160 
1161          if (is_nop_mov) {
1162             inst->remove(block);
1163             progress = true;
1164             continue;
1165          }
1166       }
1167 
1168       bool to_mrf = (inst->dst.file == MRF);
1169 
1170       /* Can't coalesce this GRF if someone else was going to
1171        * read it later.
1172        */
1173       if (var_range_end(var_from_reg(alloc, dst_reg(inst->src[0])), 8) > ip)
1174 	 continue;
1175 
1176       /* We need to check interference with the final destination between this
1177        * instruction and the earliest instruction involved in writing the GRF
1178        * we're eliminating.  To do that, keep track of which of our source
1179        * channels we've seen initialized.
1180        */
1181       const unsigned chans_needed =
1182          brw_apply_inv_swizzle_to_mask(inst->src[0].swizzle,
1183                                        inst->dst.writemask);
1184       unsigned chans_remaining = chans_needed;
1185 
1186       /* Now walk up the instruction stream trying to see if we can rewrite
1187        * everything writing to the temporary to write into the destination
1188        * instead.
1189        */
1190       vec4_instruction *_scan_inst = (vec4_instruction *)inst->prev;
1191       foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst,
1192                                                   inst) {
1193          _scan_inst = scan_inst;
1194 
1195          if (regions_overlap(inst->src[0], inst->size_read(0),
1196                              scan_inst->dst, scan_inst->size_written)) {
1197             /* Found something writing to the reg we want to coalesce away. */
1198             if (to_mrf) {
1199                /* SEND instructions can't have MRF as a destination. */
1200                if (scan_inst->mlen)
1201                   break;
1202 
1203                if (devinfo->gen == 6) {
1204                   /* gen6 math instructions must have the destination be
1205                    * VGRF, so no compute-to-MRF for them.
1206                    */
1207                   if (scan_inst->is_math()) {
1208                      break;
1209                   }
1210                }
1211             }
1212 
1213             /* This doesn't handle saturation on the instruction we
1214              * want to coalesce away if the register types do not match.
1215              * But if scan_inst is a non type-converting 'mov', we can fix
1216              * the types later.
1217              */
1218             if (inst->saturate &&
1219                 inst->dst.type != scan_inst->dst.type &&
1220                 !(scan_inst->opcode == BRW_OPCODE_MOV &&
1221                   scan_inst->dst.type == scan_inst->src[0].type))
1222                break;
1223 
1224             /* Only allow coalescing between registers of the same type size.
1225              * Otherwise we would need to make the pass aware of the fact that
1226              * channel sizes are different for single and double precision.
1227              */
1228             if (type_sz(inst->src[0].type) != type_sz(scan_inst->src[0].type))
1229                break;
1230 
1231             /* Check that scan_inst writes the same amount of data as the
1232              * instruction, otherwise coalescing would lead to writing a
1233              * different (larger or smaller) region of the destination
1234              */
1235             if (scan_inst->size_written != inst->size_written)
1236                break;
1237 
1238             /* If we can't handle the swizzle, bail. */
1239             if (!scan_inst->can_reswizzle(devinfo, inst->dst.writemask,
1240                                           inst->src[0].swizzle,
1241                                           chans_needed)) {
1242                break;
1243             }
1244 
1245             /* This only handles coalescing writes of 8 channels (1 register
1246              * for single-precision and 2 registers for double-precision)
1247              * starting at the source offset of the copy instruction.
1248              */
1249             if (DIV_ROUND_UP(scan_inst->size_written,
1250                              type_sz(scan_inst->dst.type)) > 8 ||
1251                 scan_inst->dst.offset != inst->src[0].offset)
1252                break;
1253 
1254 	    /* Mark which channels we found unconditional writes for. */
1255 	    if (!scan_inst->predicate)
1256                chans_remaining &= ~scan_inst->dst.writemask;
1257 
1258 	    if (chans_remaining == 0)
1259 	       break;
1260 	 }
1261 
1262          /* You can't read from an MRF, so if someone else reads our MRF's
1263           * source GRF that we wanted to rewrite, that stops us.  If it's a
1264           * GRF we're trying to coalesce to, we don't actually handle
1265           * rewriting sources so bail in that case as well.
1266           */
1267 	 bool interfered = false;
1268 	 for (int i = 0; i < 3; i++) {
1269             if (regions_overlap(inst->src[0], inst->size_read(0),
1270                                 scan_inst->src[i], scan_inst->size_read(i)))
1271 	       interfered = true;
1272 	 }
1273 	 if (interfered)
1274 	    break;
1275 
1276          /* If somebody else writes the same channels of our destination here,
1277           * we can't coalesce before that.
1278           */
1279          if (regions_overlap(inst->dst, inst->size_written,
1280                              scan_inst->dst, scan_inst->size_written) &&
1281              (inst->dst.writemask & scan_inst->dst.writemask) != 0) {
1282             break;
1283          }
1284 
1285          /* Check for reads of the register we're trying to coalesce into.  We
1286           * can't go rewriting instructions above that to put some other value
1287           * in the register instead.
1288           */
1289          if (to_mrf && scan_inst->mlen > 0) {
1290             if (inst->dst.nr >= scan_inst->base_mrf &&
1291                 inst->dst.nr < scan_inst->base_mrf + scan_inst->mlen) {
1292                break;
1293             }
1294          } else {
1295             for (int i = 0; i < 3; i++) {
1296                if (regions_overlap(inst->dst, inst->size_written,
1297                                    scan_inst->src[i], scan_inst->size_read(i)))
1298                   interfered = true;
1299             }
1300             if (interfered)
1301                break;
1302          }
1303       }
1304 
1305       if (chans_remaining == 0) {
1306 	 /* If we've made it here, we have an MOV we want to coalesce out, and
1307 	  * a scan_inst pointing to the earliest instruction involved in
1308 	  * computing the value.  Now go rewrite the instruction stream
1309 	  * between the two.
1310 	  */
1311          vec4_instruction *scan_inst = _scan_inst;
1312 	 while (scan_inst != inst) {
1313 	    if (scan_inst->dst.file == VGRF &&
1314                 scan_inst->dst.nr == inst->src[0].nr &&
1315 		scan_inst->dst.offset == inst->src[0].offset) {
1316                scan_inst->reswizzle(inst->dst.writemask,
1317                                     inst->src[0].swizzle);
1318 	       scan_inst->dst.file = inst->dst.file;
1319                scan_inst->dst.nr = inst->dst.nr;
1320 	       scan_inst->dst.offset = inst->dst.offset;
1321                if (inst->saturate &&
1322                    inst->dst.type != scan_inst->dst.type) {
1323                   /* If we have reached this point, scan_inst is a non
1324                    * type-converting 'mov' and we can modify its register types
1325                    * to match the ones in inst. Otherwise, we could have an
1326                    * incorrect saturation result.
1327                    */
1328                   scan_inst->dst.type = inst->dst.type;
1329                   scan_inst->src[0].type = inst->src[0].type;
1330                }
1331 	       scan_inst->saturate |= inst->saturate;
1332 	    }
1333 	    scan_inst = (vec4_instruction *)scan_inst->next;
1334 	 }
1335 	 inst->remove(block);
1336 	 progress = true;
1337       }
1338    }
1339 
1340    if (progress)
1341       invalidate_live_intervals();
1342 
1343    return progress;
1344 }
1345 
1346 /**
1347  * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1348  * flow.  We could probably do better here with some form of divergence
1349  * analysis.
1350  */
1351 bool
eliminate_find_live_channel()1352 vec4_visitor::eliminate_find_live_channel()
1353 {
1354    bool progress = false;
1355    unsigned depth = 0;
1356 
1357    if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
1358       /* The optimization below assumes that channel zero is live on thread
1359        * dispatch, which may not be the case if the fixed function dispatches
1360        * threads sparsely.
1361        */
1362       return false;
1363    }
1364 
1365    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
1366       switch (inst->opcode) {
1367       case BRW_OPCODE_IF:
1368       case BRW_OPCODE_DO:
1369          depth++;
1370          break;
1371 
1372       case BRW_OPCODE_ENDIF:
1373       case BRW_OPCODE_WHILE:
1374          depth--;
1375          break;
1376 
1377       case SHADER_OPCODE_FIND_LIVE_CHANNEL:
1378          if (depth == 0) {
1379             inst->opcode = BRW_OPCODE_MOV;
1380             inst->src[0] = brw_imm_d(0);
1381             inst->force_writemask_all = true;
1382             progress = true;
1383          }
1384          break;
1385 
1386       default:
1387          break;
1388       }
1389    }
1390 
1391    return progress;
1392 }
1393 
1394 /**
1395  * Splits virtual GRFs requesting more than one contiguous physical register.
1396  *
1397  * We initially create large virtual GRFs for temporary structures, arrays,
1398  * and matrices, so that the visitor functions can add offsets to work their
1399  * way down to the actual member being accessed.  But when it comes to
1400  * optimization, we'd like to treat each register as individual storage if
1401  * possible.
1402  *
1403  * So far, the only thing that might prevent splitting is a send message from
1404  * a GRF on IVB.
1405  */
1406 void
split_virtual_grfs()1407 vec4_visitor::split_virtual_grfs()
1408 {
1409    int num_vars = this->alloc.count;
1410    int new_virtual_grf[num_vars];
1411    bool split_grf[num_vars];
1412 
1413    memset(new_virtual_grf, 0, sizeof(new_virtual_grf));
1414 
1415    /* Try to split anything > 0 sized. */
1416    for (int i = 0; i < num_vars; i++) {
1417       split_grf[i] = this->alloc.sizes[i] != 1;
1418    }
1419 
1420    /* Check that the instructions are compatible with the registers we're trying
1421     * to split.
1422     */
1423    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1424       if (inst->dst.file == VGRF && regs_written(inst) > 1)
1425          split_grf[inst->dst.nr] = false;
1426 
1427       for (int i = 0; i < 3; i++) {
1428          if (inst->src[i].file == VGRF && regs_read(inst, i) > 1)
1429             split_grf[inst->src[i].nr] = false;
1430       }
1431    }
1432 
1433    /* Allocate new space for split regs.  Note that the virtual
1434     * numbers will be contiguous.
1435     */
1436    for (int i = 0; i < num_vars; i++) {
1437       if (!split_grf[i])
1438          continue;
1439 
1440       new_virtual_grf[i] = alloc.allocate(1);
1441       for (unsigned j = 2; j < this->alloc.sizes[i]; j++) {
1442          unsigned reg = alloc.allocate(1);
1443          assert(reg == new_virtual_grf[i] + j - 1);
1444          (void) reg;
1445       }
1446       this->alloc.sizes[i] = 1;
1447    }
1448 
1449    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1450       if (inst->dst.file == VGRF && split_grf[inst->dst.nr] &&
1451           inst->dst.offset / REG_SIZE != 0) {
1452          inst->dst.nr = (new_virtual_grf[inst->dst.nr] +
1453                          inst->dst.offset / REG_SIZE - 1);
1454          inst->dst.offset %= REG_SIZE;
1455       }
1456       for (int i = 0; i < 3; i++) {
1457          if (inst->src[i].file == VGRF && split_grf[inst->src[i].nr] &&
1458              inst->src[i].offset / REG_SIZE != 0) {
1459             inst->src[i].nr = (new_virtual_grf[inst->src[i].nr] +
1460                                 inst->src[i].offset / REG_SIZE - 1);
1461             inst->src[i].offset %= REG_SIZE;
1462          }
1463       }
1464    }
1465    invalidate_live_intervals();
1466 }
1467 
1468 void
dump_instruction(backend_instruction * be_inst)1469 vec4_visitor::dump_instruction(backend_instruction *be_inst)
1470 {
1471    dump_instruction(be_inst, stderr);
1472 }
1473 
1474 void
dump_instruction(backend_instruction * be_inst,FILE * file)1475 vec4_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
1476 {
1477    vec4_instruction *inst = (vec4_instruction *)be_inst;
1478 
1479    if (inst->predicate) {
1480       fprintf(file, "(%cf0.%d%s) ",
1481               inst->predicate_inverse ? '-' : '+',
1482               inst->flag_subreg,
1483               pred_ctrl_align16[inst->predicate]);
1484    }
1485 
1486    fprintf(file, "%s(%d)", brw_instruction_name(devinfo, inst->opcode),
1487            inst->exec_size);
1488    if (inst->saturate)
1489       fprintf(file, ".sat");
1490    if (inst->conditional_mod) {
1491       fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
1492       if (!inst->predicate &&
1493           (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
1494                                 inst->opcode != BRW_OPCODE_IF &&
1495                                 inst->opcode != BRW_OPCODE_WHILE))) {
1496          fprintf(file, ".f0.%d", inst->flag_subreg);
1497       }
1498    }
1499    fprintf(file, " ");
1500 
1501    switch (inst->dst.file) {
1502    case VGRF:
1503       fprintf(file, "vgrf%d", inst->dst.nr);
1504       break;
1505    case FIXED_GRF:
1506       fprintf(file, "g%d", inst->dst.nr);
1507       break;
1508    case MRF:
1509       fprintf(file, "m%d", inst->dst.nr);
1510       break;
1511    case ARF:
1512       switch (inst->dst.nr) {
1513       case BRW_ARF_NULL:
1514          fprintf(file, "null");
1515          break;
1516       case BRW_ARF_ADDRESS:
1517          fprintf(file, "a0.%d", inst->dst.subnr);
1518          break;
1519       case BRW_ARF_ACCUMULATOR:
1520          fprintf(file, "acc%d", inst->dst.subnr);
1521          break;
1522       case BRW_ARF_FLAG:
1523          fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
1524          break;
1525       default:
1526          fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
1527          break;
1528       }
1529       break;
1530    case BAD_FILE:
1531       fprintf(file, "(null)");
1532       break;
1533    case IMM:
1534    case ATTR:
1535    case UNIFORM:
1536       unreachable("not reached");
1537    }
1538    if (inst->dst.offset ||
1539        (inst->dst.file == VGRF &&
1540         alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
1541       const unsigned reg_size = (inst->dst.file == UNIFORM ? 16 : REG_SIZE);
1542       fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
1543               inst->dst.offset % reg_size);
1544    }
1545    if (inst->dst.writemask != WRITEMASK_XYZW) {
1546       fprintf(file, ".");
1547       if (inst->dst.writemask & 1)
1548          fprintf(file, "x");
1549       if (inst->dst.writemask & 2)
1550          fprintf(file, "y");
1551       if (inst->dst.writemask & 4)
1552          fprintf(file, "z");
1553       if (inst->dst.writemask & 8)
1554          fprintf(file, "w");
1555    }
1556    fprintf(file, ":%s", brw_reg_type_letters(inst->dst.type));
1557 
1558    if (inst->src[0].file != BAD_FILE)
1559       fprintf(file, ", ");
1560 
1561    for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
1562       if (inst->src[i].negate)
1563          fprintf(file, "-");
1564       if (inst->src[i].abs)
1565          fprintf(file, "|");
1566       switch (inst->src[i].file) {
1567       case VGRF:
1568          fprintf(file, "vgrf%d", inst->src[i].nr);
1569          break;
1570       case FIXED_GRF:
1571          fprintf(file, "g%d.%d", inst->src[i].nr, inst->src[i].subnr);
1572          break;
1573       case ATTR:
1574          fprintf(file, "attr%d", inst->src[i].nr);
1575          break;
1576       case UNIFORM:
1577          fprintf(file, "u%d", inst->src[i].nr);
1578          break;
1579       case IMM:
1580          switch (inst->src[i].type) {
1581          case BRW_REGISTER_TYPE_F:
1582             fprintf(file, "%fF", inst->src[i].f);
1583             break;
1584          case BRW_REGISTER_TYPE_DF:
1585             fprintf(file, "%fDF", inst->src[i].df);
1586             break;
1587          case BRW_REGISTER_TYPE_D:
1588             fprintf(file, "%dD", inst->src[i].d);
1589             break;
1590          case BRW_REGISTER_TYPE_UD:
1591             fprintf(file, "%uU", inst->src[i].ud);
1592             break;
1593          case BRW_REGISTER_TYPE_VF:
1594             fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
1595                     brw_vf_to_float((inst->src[i].ud >>  0) & 0xff),
1596                     brw_vf_to_float((inst->src[i].ud >>  8) & 0xff),
1597                     brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
1598                     brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
1599             break;
1600          default:
1601             fprintf(file, "???");
1602             break;
1603          }
1604          break;
1605       case ARF:
1606          switch (inst->src[i].nr) {
1607          case BRW_ARF_NULL:
1608             fprintf(file, "null");
1609             break;
1610          case BRW_ARF_ADDRESS:
1611             fprintf(file, "a0.%d", inst->src[i].subnr);
1612             break;
1613          case BRW_ARF_ACCUMULATOR:
1614             fprintf(file, "acc%d", inst->src[i].subnr);
1615             break;
1616          case BRW_ARF_FLAG:
1617             fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
1618             break;
1619          default:
1620             fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
1621             break;
1622          }
1623          break;
1624       case BAD_FILE:
1625          fprintf(file, "(null)");
1626          break;
1627       case MRF:
1628          unreachable("not reached");
1629       }
1630 
1631       if (inst->src[i].offset ||
1632           (inst->src[i].file == VGRF &&
1633            alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
1634          const unsigned reg_size = (inst->src[i].file == UNIFORM ? 16 : REG_SIZE);
1635          fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
1636                  inst->src[i].offset % reg_size);
1637       }
1638 
1639       if (inst->src[i].file != IMM) {
1640          static const char *chans[4] = {"x", "y", "z", "w"};
1641          fprintf(file, ".");
1642          for (int c = 0; c < 4; c++) {
1643             fprintf(file, "%s", chans[BRW_GET_SWZ(inst->src[i].swizzle, c)]);
1644          }
1645       }
1646 
1647       if (inst->src[i].abs)
1648          fprintf(file, "|");
1649 
1650       if (inst->src[i].file != IMM) {
1651          fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
1652       }
1653 
1654       if (i < 2 && inst->src[i + 1].file != BAD_FILE)
1655          fprintf(file, ", ");
1656    }
1657 
1658    if (inst->force_writemask_all)
1659       fprintf(file, " NoMask");
1660 
1661    if (inst->exec_size != 8)
1662       fprintf(file, " group%d", inst->group);
1663 
1664    fprintf(file, "\n");
1665 }
1666 
1667 
1668 static inline struct brw_reg
attribute_to_hw_reg(int attr,brw_reg_type type,bool interleaved)1669 attribute_to_hw_reg(int attr, brw_reg_type type, bool interleaved)
1670 {
1671    struct brw_reg reg;
1672 
1673    unsigned width = REG_SIZE / 2 / MAX2(4, type_sz(type));
1674    if (interleaved) {
1675       reg = stride(brw_vecn_grf(width, attr / 2, (attr % 2) * 4), 0, width, 1);
1676    } else {
1677       reg = brw_vecn_grf(width, attr, 0);
1678    }
1679 
1680    reg.type = type;
1681    return reg;
1682 }
1683 
1684 
1685 /**
1686  * Replace each register of type ATTR in this->instructions with a reference
1687  * to a fixed HW register.
1688  *
1689  * If interleaved is true, then each attribute takes up half a register, with
1690  * register N containing attribute 2*N in its first half and attribute 2*N+1
1691  * in its second half (this corresponds to the payload setup used by geometry
1692  * shaders in "single" or "dual instanced" dispatch mode).  If interleaved is
1693  * false, then each attribute takes up a whole register, with register N
1694  * containing attribute N (this corresponds to the payload setup used by
1695  * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
1696  */
1697 void
lower_attributes_to_hw_regs(const int * attribute_map,bool interleaved)1698 vec4_visitor::lower_attributes_to_hw_regs(const int *attribute_map,
1699                                           bool interleaved)
1700 {
1701    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1702       for (int i = 0; i < 3; i++) {
1703          if (inst->src[i].file != ATTR)
1704             continue;
1705 
1706          int grf = attribute_map[inst->src[i].nr +
1707                                  inst->src[i].offset / REG_SIZE];
1708          assert(inst->src[i].offset % REG_SIZE == 0);
1709 
1710          /* All attributes used in the shader need to have been assigned a
1711           * hardware register by the caller
1712           */
1713          assert(grf != 0);
1714 
1715          struct brw_reg reg =
1716             attribute_to_hw_reg(grf, inst->src[i].type, interleaved);
1717          reg.swizzle = inst->src[i].swizzle;
1718          if (inst->src[i].abs)
1719             reg = brw_abs(reg);
1720          if (inst->src[i].negate)
1721             reg = negate(reg);
1722 
1723          inst->src[i] = reg;
1724       }
1725    }
1726 }
1727 
1728 int
setup_attributes(int payload_reg)1729 vec4_vs_visitor::setup_attributes(int payload_reg)
1730 {
1731    int nr_attributes;
1732    int attribute_map[VERT_ATTRIB_MAX + 2];
1733    memset(attribute_map, 0, sizeof(attribute_map));
1734 
1735    nr_attributes = 0;
1736    GLbitfield64 vs_inputs = vs_prog_data->inputs_read;
1737    while (vs_inputs) {
1738       GLuint first = ffsll(vs_inputs) - 1;
1739       int needed_slots =
1740          (vs_prog_data->double_inputs_read & BITFIELD64_BIT(first)) ? 2 : 1;
1741       for (int c = 0; c < needed_slots; c++) {
1742          attribute_map[first + c] = payload_reg + nr_attributes;
1743          nr_attributes++;
1744          vs_inputs &= ~BITFIELD64_BIT(first + c);
1745       }
1746    }
1747 
1748    /* VertexID is stored by the VF as the last vertex element, but we
1749     * don't represent it with a flag in inputs_read, so we call it
1750     * VERT_ATTRIB_MAX.
1751     */
1752    if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid ||
1753        vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) {
1754       attribute_map[VERT_ATTRIB_MAX] = payload_reg + nr_attributes;
1755       nr_attributes++;
1756    }
1757 
1758    if (vs_prog_data->uses_drawid) {
1759       attribute_map[VERT_ATTRIB_MAX + 1] = payload_reg + nr_attributes;
1760       nr_attributes++;
1761    }
1762 
1763    lower_attributes_to_hw_regs(attribute_map, false /* interleaved */);
1764 
1765    return payload_reg + vs_prog_data->nr_attribute_slots;
1766 }
1767 
1768 int
setup_uniforms(int reg)1769 vec4_visitor::setup_uniforms(int reg)
1770 {
1771    prog_data->base.dispatch_grf_start_reg = reg;
1772 
1773    /* The pre-gen6 VS requires that some push constants get loaded no
1774     * matter what, or the GPU would hang.
1775     */
1776    if (devinfo->gen < 6 && this->uniforms == 0) {
1777       stage_prog_data->param =
1778          reralloc(NULL, stage_prog_data->param, const gl_constant_value *, 4);
1779       for (unsigned int i = 0; i < 4; i++) {
1780 	 unsigned int slot = this->uniforms * 4 + i;
1781 	 static gl_constant_value zero = { 0.0 };
1782 	 stage_prog_data->param[slot] = &zero;
1783       }
1784 
1785       this->uniforms++;
1786       reg++;
1787    } else {
1788       reg += ALIGN(uniforms, 2) / 2;
1789    }
1790 
1791    stage_prog_data->nr_params = this->uniforms * 4;
1792 
1793    prog_data->base.curb_read_length =
1794       reg - prog_data->base.dispatch_grf_start_reg;
1795 
1796    return reg;
1797 }
1798 
1799 void
setup_payload(void)1800 vec4_vs_visitor::setup_payload(void)
1801 {
1802    int reg = 0;
1803 
1804    /* The payload always contains important data in g0, which contains
1805     * the URB handles that are passed on to the URB write at the end
1806     * of the thread.  So, we always start push constants at g1.
1807     */
1808    reg++;
1809 
1810    reg = setup_uniforms(reg);
1811 
1812    reg = setup_attributes(reg);
1813 
1814    this->first_non_payload_grf = reg;
1815 }
1816 
1817 bool
lower_minmax()1818 vec4_visitor::lower_minmax()
1819 {
1820    assert(devinfo->gen < 6);
1821 
1822    bool progress = false;
1823 
1824    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
1825       const vec4_builder ibld(this, block, inst);
1826 
1827       if (inst->opcode == BRW_OPCODE_SEL &&
1828           inst->predicate == BRW_PREDICATE_NONE) {
1829          /* FIXME: Using CMP doesn't preserve the NaN propagation semantics of
1830           *        the original SEL.L/GE instruction
1831           */
1832          ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
1833                   inst->conditional_mod);
1834          inst->predicate = BRW_PREDICATE_NORMAL;
1835          inst->conditional_mod = BRW_CONDITIONAL_NONE;
1836 
1837          progress = true;
1838       }
1839    }
1840 
1841    if (progress)
1842       invalidate_live_intervals();
1843 
1844    return progress;
1845 }
1846 
1847 src_reg
get_timestamp()1848 vec4_visitor::get_timestamp()
1849 {
1850    assert(devinfo->gen >= 7);
1851 
1852    src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
1853                                 BRW_ARF_TIMESTAMP,
1854                                 0,
1855                                 0,
1856                                 0,
1857                                 BRW_REGISTER_TYPE_UD,
1858                                 BRW_VERTICAL_STRIDE_0,
1859                                 BRW_WIDTH_4,
1860                                 BRW_HORIZONTAL_STRIDE_4,
1861                                 BRW_SWIZZLE_XYZW,
1862                                 WRITEMASK_XYZW));
1863 
1864    dst_reg dst = dst_reg(this, glsl_type::uvec4_type);
1865 
1866    vec4_instruction *mov = emit(MOV(dst, ts));
1867    /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1868     * even if it's not enabled in the dispatch.
1869     */
1870    mov->force_writemask_all = true;
1871 
1872    return src_reg(dst);
1873 }
1874 
1875 void
emit_shader_time_begin()1876 vec4_visitor::emit_shader_time_begin()
1877 {
1878    current_annotation = "shader time start";
1879    shader_start_time = get_timestamp();
1880 }
1881 
1882 void
emit_shader_time_end()1883 vec4_visitor::emit_shader_time_end()
1884 {
1885    current_annotation = "shader time end";
1886    src_reg shader_end_time = get_timestamp();
1887 
1888 
1889    /* Check that there weren't any timestamp reset events (assuming these
1890     * were the only two timestamp reads that happened).
1891     */
1892    src_reg reset_end = shader_end_time;
1893    reset_end.swizzle = BRW_SWIZZLE_ZZZZ;
1894    vec4_instruction *test = emit(AND(dst_null_ud(), reset_end, brw_imm_ud(1u)));
1895    test->conditional_mod = BRW_CONDITIONAL_Z;
1896 
1897    emit(IF(BRW_PREDICATE_NORMAL));
1898 
1899    /* Take the current timestamp and get the delta. */
1900    shader_start_time.negate = true;
1901    dst_reg diff = dst_reg(this, glsl_type::uint_type);
1902    emit(ADD(diff, shader_start_time, shader_end_time));
1903 
1904    /* If there were no instructions between the two timestamp gets, the diff
1905     * is 2 cycles.  Remove that overhead, so I can forget about that when
1906     * trying to determine the time taken for single instructions.
1907     */
1908    emit(ADD(diff, src_reg(diff), brw_imm_ud(-2u)));
1909 
1910    emit_shader_time_write(0, src_reg(diff));
1911    emit_shader_time_write(1, brw_imm_ud(1u));
1912    emit(BRW_OPCODE_ELSE);
1913    emit_shader_time_write(2, brw_imm_ud(1u));
1914    emit(BRW_OPCODE_ENDIF);
1915 }
1916 
1917 void
emit_shader_time_write(int shader_time_subindex,src_reg value)1918 vec4_visitor::emit_shader_time_write(int shader_time_subindex, src_reg value)
1919 {
1920    dst_reg dst =
1921       dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type, 2));
1922 
1923    dst_reg offset = dst;
1924    dst_reg time = dst;
1925    time.offset += REG_SIZE;
1926 
1927    offset.type = BRW_REGISTER_TYPE_UD;
1928    int index = shader_time_index * 3 + shader_time_subindex;
1929    emit(MOV(offset, brw_imm_d(index * SHADER_TIME_STRIDE)));
1930 
1931    time.type = BRW_REGISTER_TYPE_UD;
1932    emit(MOV(time, value));
1933 
1934    vec4_instruction *inst =
1935       emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst));
1936    inst->mlen = 2;
1937 }
1938 
1939 void
convert_to_hw_regs()1940 vec4_visitor::convert_to_hw_regs()
1941 {
1942    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1943       for (int i = 0; i < 3; i++) {
1944          struct src_reg &src = inst->src[i];
1945          struct brw_reg reg;
1946          switch (src.file) {
1947          case VGRF: {
1948             const unsigned type_size = type_sz(src.type);
1949             const unsigned width = REG_SIZE / 2 / MAX2(4, type_size);
1950             reg = byte_offset(brw_vecn_grf(width, src.nr, 0), src.offset);
1951             reg.type = src.type;
1952             reg.abs = src.abs;
1953             reg.negate = src.negate;
1954             break;
1955          }
1956 
1957          case UNIFORM: {
1958             const unsigned width = REG_SIZE / 2 / MAX2(4, type_sz(src.type));
1959             reg = stride(byte_offset(brw_vec4_grf(
1960                                         prog_data->base.dispatch_grf_start_reg +
1961                                         src.nr / 2, src.nr % 2 * 4),
1962                                      src.offset),
1963                          0, width, 1);
1964             reg.type = src.type;
1965             reg.abs = src.abs;
1966             reg.negate = src.negate;
1967 
1968             /* This should have been moved to pull constants. */
1969             assert(!src.reladdr);
1970             break;
1971          }
1972 
1973          case FIXED_GRF:
1974             if (type_sz(src.type) == 8) {
1975                reg = src.as_brw_reg();
1976                break;
1977             }
1978             /* fallthrough */
1979          case ARF:
1980          case IMM:
1981             continue;
1982 
1983          case BAD_FILE:
1984             /* Probably unused. */
1985             reg = brw_null_reg();
1986             break;
1987 
1988          case MRF:
1989          case ATTR:
1990             unreachable("not reached");
1991          }
1992 
1993          apply_logical_swizzle(&reg, inst, i);
1994          src = reg;
1995       }
1996 
1997       if (inst->is_3src(devinfo)) {
1998          /* 3-src instructions with scalar sources support arbitrary subnr,
1999           * but don't actually use swizzles.  Convert swizzle into subnr.
2000           * Skip this for double-precision instructions: RepCtrl=1 is not
2001           * allowed for them and needs special handling.
2002           */
2003          for (int i = 0; i < 3; i++) {
2004             if (inst->src[i].vstride == BRW_VERTICAL_STRIDE_0 &&
2005                 type_sz(inst->src[i].type) < 8) {
2006                assert(brw_is_single_value_swizzle(inst->src[i].swizzle));
2007                inst->src[i].subnr += 4 * BRW_GET_SWZ(inst->src[i].swizzle, 0);
2008             }
2009          }
2010       }
2011 
2012       dst_reg &dst = inst->dst;
2013       struct brw_reg reg;
2014 
2015       switch (inst->dst.file) {
2016       case VGRF:
2017          reg = byte_offset(brw_vec8_grf(dst.nr, 0), dst.offset);
2018          reg.type = dst.type;
2019          reg.writemask = dst.writemask;
2020          break;
2021 
2022       case MRF:
2023          reg = byte_offset(brw_message_reg(dst.nr), dst.offset);
2024          assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
2025          reg.type = dst.type;
2026          reg.writemask = dst.writemask;
2027          break;
2028 
2029       case ARF:
2030       case FIXED_GRF:
2031          reg = dst.as_brw_reg();
2032          break;
2033 
2034       case BAD_FILE:
2035          reg = brw_null_reg();
2036          break;
2037 
2038       case IMM:
2039       case ATTR:
2040       case UNIFORM:
2041          unreachable("not reached");
2042       }
2043 
2044       dst = reg;
2045    }
2046 }
2047 
2048 static bool
stage_uses_interleaved_attributes(unsigned stage,enum shader_dispatch_mode dispatch_mode)2049 stage_uses_interleaved_attributes(unsigned stage,
2050                                   enum shader_dispatch_mode dispatch_mode)
2051 {
2052    switch (stage) {
2053    case MESA_SHADER_TESS_EVAL:
2054       return true;
2055    case MESA_SHADER_GEOMETRY:
2056       return dispatch_mode != DISPATCH_MODE_4X2_DUAL_OBJECT;
2057    default:
2058       return false;
2059    }
2060 }
2061 
2062 /**
2063  * Get the closest native SIMD width supported by the hardware for instruction
2064  * \p inst.  The instruction will be left untouched by
2065  * vec4_visitor::lower_simd_width() if the returned value matches the
2066  * instruction's original execution size.
2067  */
2068 static unsigned
get_lowered_simd_width(const struct gen_device_info * devinfo,enum shader_dispatch_mode dispatch_mode,unsigned stage,const vec4_instruction * inst)2069 get_lowered_simd_width(const struct gen_device_info *devinfo,
2070                        enum shader_dispatch_mode dispatch_mode,
2071                        unsigned stage, const vec4_instruction *inst)
2072 {
2073    /* Do not split some instructions that require special handling */
2074    switch (inst->opcode) {
2075    case SHADER_OPCODE_GEN4_SCRATCH_READ:
2076    case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
2077       return inst->exec_size;
2078    default:
2079       break;
2080    }
2081 
2082    unsigned lowered_width = MIN2(16, inst->exec_size);
2083 
2084    /* We need to split some cases of double-precision instructions that write
2085     * 2 registers. We only need to care about this in gen7 because that is the
2086     * only hardware that implements fp64 in Align16.
2087     */
2088    if (devinfo->gen == 7 && inst->size_written > REG_SIZE) {
2089       /* Align16 8-wide double-precision SEL does not work well. Verified
2090        * empirically.
2091        */
2092       if (inst->opcode == BRW_OPCODE_SEL && type_sz(inst->dst.type) == 8)
2093          lowered_width = MIN2(lowered_width, 4);
2094 
2095       /* HSW PRM, 3D Media GPGPU Engine, Region Alignment Rules for Direct
2096        * Register Addressing:
2097        *
2098        *    "When destination spans two registers, the source MUST span two
2099        *     registers."
2100        */
2101       for (unsigned i = 0; i < 3; i++) {
2102          if (inst->src[i].file == BAD_FILE)
2103             continue;
2104          if (inst->size_read(i) <= REG_SIZE)
2105             lowered_width = MIN2(lowered_width, 4);
2106 
2107          /* Interleaved attribute setups use a vertical stride of 0, which
2108           * makes them hit the associated instruction decompression bug in gen7.
2109           * Split them to prevent this.
2110           */
2111          if (inst->src[i].file == ATTR &&
2112              stage_uses_interleaved_attributes(stage, dispatch_mode))
2113             lowered_width = MIN2(lowered_width, 4);
2114       }
2115    }
2116 
2117    return lowered_width;
2118 }
2119 
2120 static bool
dst_src_regions_overlap(vec4_instruction * inst)2121 dst_src_regions_overlap(vec4_instruction *inst)
2122 {
2123    if (inst->size_written == 0)
2124       return false;
2125 
2126    unsigned dst_start = inst->dst.offset;
2127    unsigned dst_end = dst_start + inst->size_written - 1;
2128    for (int i = 0; i < 3; i++) {
2129       if (inst->src[i].file == BAD_FILE)
2130          continue;
2131 
2132       if (inst->dst.file != inst->src[i].file ||
2133           inst->dst.nr != inst->src[i].nr)
2134          continue;
2135 
2136       unsigned src_start = inst->src[i].offset;
2137       unsigned src_end = src_start + inst->size_read(i) - 1;
2138 
2139       if ((dst_start >= src_start && dst_start <= src_end) ||
2140           (dst_end >= src_start && dst_end <= src_end) ||
2141           (dst_start <= src_start && dst_end >= src_end)) {
2142          return true;
2143       }
2144    }
2145 
2146    return false;
2147 }
2148 
2149 bool
lower_simd_width()2150 vec4_visitor::lower_simd_width()
2151 {
2152    bool progress = false;
2153 
2154    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2155       const unsigned lowered_width =
2156          get_lowered_simd_width(devinfo, prog_data->dispatch_mode, stage, inst);
2157       assert(lowered_width <= inst->exec_size);
2158       if (lowered_width == inst->exec_size)
2159          continue;
2160 
2161       /* We need to deal with source / destination overlaps when splitting.
2162        * The hardware supports reading from and writing to the same register
2163        * in the same instruction, but we need to be careful that each split
2164        * instruction we produce does not corrupt the source of the next.
2165        *
2166        * The easiest way to handle this is to make the split instructions write
2167        * to temporaries if there is an src/dst overlap and then move from the
2168        * temporaries to the original destination. We also need to consider
2169        * instructions that do partial writes via align1 opcodes, in which case
2170        * we need to make sure that the we initialize the temporary with the
2171        * value of the instruction's dst.
2172        */
2173       bool needs_temp = dst_src_regions_overlap(inst);
2174       for (unsigned n = 0; n < inst->exec_size / lowered_width; n++)  {
2175          unsigned channel_offset = lowered_width * n;
2176 
2177          unsigned size_written = lowered_width * type_sz(inst->dst.type);
2178 
2179          /* Create the split instruction from the original so that we copy all
2180           * relevant instruction fields, then set the width and calculate the
2181           * new dst/src regions.
2182           */
2183          vec4_instruction *linst = new(mem_ctx) vec4_instruction(*inst);
2184          linst->exec_size = lowered_width;
2185          linst->group = channel_offset;
2186          linst->size_written = size_written;
2187 
2188          /* Compute split dst region */
2189          dst_reg dst;
2190          if (needs_temp) {
2191             unsigned num_regs = DIV_ROUND_UP(size_written, REG_SIZE);
2192             dst = retype(dst_reg(VGRF, alloc.allocate(num_regs)),
2193                          inst->dst.type);
2194             if (inst->is_align1_partial_write()) {
2195                vec4_instruction *copy = MOV(dst, src_reg(inst->dst));
2196                copy->exec_size = lowered_width;
2197                copy->group = channel_offset;
2198                copy->size_written = size_written;
2199                inst->insert_before(block, copy);
2200             }
2201          } else {
2202             dst = horiz_offset(inst->dst, channel_offset);
2203          }
2204          linst->dst = dst;
2205 
2206          /* Compute split source regions */
2207          for (int i = 0; i < 3; i++) {
2208             if (linst->src[i].file == BAD_FILE)
2209                continue;
2210 
2211             if (!is_uniform(linst->src[i]))
2212                linst->src[i] = horiz_offset(linst->src[i], channel_offset);
2213          }
2214 
2215          inst->insert_before(block, linst);
2216 
2217          /* If we used a temporary to store the result of the split
2218           * instruction, copy the result to the original destination
2219           */
2220          if (needs_temp) {
2221             vec4_instruction *mov =
2222                MOV(offset(inst->dst, lowered_width, n), src_reg(dst));
2223             mov->exec_size = lowered_width;
2224             mov->group = channel_offset;
2225             mov->size_written = size_written;
2226             mov->predicate = inst->predicate;
2227             inst->insert_before(block, mov);
2228          }
2229       }
2230 
2231       inst->remove(block);
2232       progress = true;
2233    }
2234 
2235    if (progress)
2236       invalidate_live_intervals();
2237 
2238    return progress;
2239 }
2240 
2241 static bool
is_align1_df(vec4_instruction * inst)2242 is_align1_df(vec4_instruction *inst)
2243 {
2244    switch (inst->opcode) {
2245    case VEC4_OPCODE_FROM_DOUBLE:
2246    case VEC4_OPCODE_TO_DOUBLE:
2247    case VEC4_OPCODE_PICK_LOW_32BIT:
2248    case VEC4_OPCODE_PICK_HIGH_32BIT:
2249    case VEC4_OPCODE_SET_LOW_32BIT:
2250    case VEC4_OPCODE_SET_HIGH_32BIT:
2251       return true;
2252    default:
2253       return false;
2254    }
2255 }
2256 
2257 static brw_predicate
scalarize_predicate(brw_predicate predicate,unsigned writemask)2258 scalarize_predicate(brw_predicate predicate, unsigned writemask)
2259 {
2260    if (predicate != BRW_PREDICATE_NORMAL)
2261       return predicate;
2262 
2263    switch (writemask) {
2264    case WRITEMASK_X:
2265       return BRW_PREDICATE_ALIGN16_REPLICATE_X;
2266    case WRITEMASK_Y:
2267       return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
2268    case WRITEMASK_Z:
2269       return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
2270    case WRITEMASK_W:
2271       return BRW_PREDICATE_ALIGN16_REPLICATE_W;
2272    default:
2273       unreachable("invalid writemask");
2274    }
2275 }
2276 
2277 /* Gen7 has a hardware decompression bug that we can exploit to represent
2278  * handful of additional swizzles natively.
2279  */
2280 static bool
is_gen7_supported_64bit_swizzle(vec4_instruction * inst,unsigned arg)2281 is_gen7_supported_64bit_swizzle(vec4_instruction *inst, unsigned arg)
2282 {
2283    switch (inst->src[arg].swizzle) {
2284    case BRW_SWIZZLE_XXXX:
2285    case BRW_SWIZZLE_YYYY:
2286    case BRW_SWIZZLE_ZZZZ:
2287    case BRW_SWIZZLE_WWWW:
2288    case BRW_SWIZZLE_XYXY:
2289    case BRW_SWIZZLE_YXYX:
2290    case BRW_SWIZZLE_ZWZW:
2291    case BRW_SWIZZLE_WZWZ:
2292       return true;
2293    default:
2294       return false;
2295    }
2296 }
2297 
2298 /* 64-bit sources use regions with a width of 2. These 2 elements in each row
2299  * can be addressed using 32-bit swizzles (which is what the hardware supports)
2300  * but it also means that the swizzle we apply on the first two components of a
2301  * dvec4 is coupled with the swizzle we use for the last 2. In other words,
2302  * only some specific swizzle combinations can be natively supported.
2303  *
2304  * FIXME: we can go an step further and implement even more swizzle
2305  *        variations using only partial scalarization.
2306  *
2307  * For more details see:
2308  * https://bugs.freedesktop.org/show_bug.cgi?id=92760#c82
2309  */
2310 bool
is_supported_64bit_region(vec4_instruction * inst,unsigned arg)2311 vec4_visitor::is_supported_64bit_region(vec4_instruction *inst, unsigned arg)
2312 {
2313    const src_reg &src = inst->src[arg];
2314    assert(type_sz(src.type) == 8);
2315 
2316    /* Uniform regions have a vstride=0. Because we use 2-wide rows with
2317     * 64-bit regions it means that we cannot access components Z/W, so
2318     * return false for any such case. Interleaved attributes will also be
2319     * mapped to GRF registers with a vstride of 0, so apply the same
2320     * treatment.
2321     */
2322    if ((is_uniform(src) ||
2323         (stage_uses_interleaved_attributes(stage, prog_data->dispatch_mode) &&
2324          src.file == ATTR)) &&
2325        (brw_mask_for_swizzle(src.swizzle) & 12))
2326       return false;
2327 
2328    switch (src.swizzle) {
2329    case BRW_SWIZZLE_XYZW:
2330    case BRW_SWIZZLE_XXZZ:
2331    case BRW_SWIZZLE_YYWW:
2332    case BRW_SWIZZLE_YXWZ:
2333       return true;
2334    default:
2335       return devinfo->gen == 7 && is_gen7_supported_64bit_swizzle(inst, arg);
2336    }
2337 }
2338 
2339 bool
scalarize_df()2340 vec4_visitor::scalarize_df()
2341 {
2342    bool progress = false;
2343 
2344    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2345       /* Skip DF instructions that operate in Align1 mode */
2346       if (is_align1_df(inst))
2347          continue;
2348 
2349       /* Check if this is a double-precision instruction */
2350       bool is_double = type_sz(inst->dst.type) == 8;
2351       for (int arg = 0; !is_double && arg < 3; arg++) {
2352          is_double = inst->src[arg].file != BAD_FILE &&
2353                      type_sz(inst->src[arg].type) == 8;
2354       }
2355 
2356       if (!is_double)
2357          continue;
2358 
2359       /* Skip the lowering for specific regioning scenarios that we can
2360        * support natively.
2361        */
2362       bool skip_lowering = true;
2363 
2364       /* XY and ZW writemasks operate in 32-bit, which means that they don't
2365        * have a native 64-bit representation and they should always be split.
2366        */
2367       if (inst->dst.writemask == WRITEMASK_XY ||
2368           inst->dst.writemask == WRITEMASK_ZW) {
2369          skip_lowering = false;
2370       } else {
2371          for (unsigned i = 0; i < 3; i++) {
2372             if (inst->src[i].file == BAD_FILE || type_sz(inst->src[i].type) < 8)
2373                continue;
2374             skip_lowering = skip_lowering && is_supported_64bit_region(inst, i);
2375          }
2376       }
2377 
2378       if (skip_lowering)
2379          continue;
2380 
2381       /* Generate scalar instructions for each enabled channel */
2382       for (unsigned chan = 0; chan < 4; chan++) {
2383          unsigned chan_mask = 1 << chan;
2384          if (!(inst->dst.writemask & chan_mask))
2385             continue;
2386 
2387          vec4_instruction *scalar_inst = new(mem_ctx) vec4_instruction(*inst);
2388 
2389          for (unsigned i = 0; i < 3; i++) {
2390             unsigned swz = BRW_GET_SWZ(inst->src[i].swizzle, chan);
2391             scalar_inst->src[i].swizzle = BRW_SWIZZLE4(swz, swz, swz, swz);
2392          }
2393 
2394          scalar_inst->dst.writemask = chan_mask;
2395 
2396          if (inst->predicate != BRW_PREDICATE_NONE) {
2397             scalar_inst->predicate =
2398                scalarize_predicate(inst->predicate, chan_mask);
2399          }
2400 
2401          inst->insert_before(block, scalar_inst);
2402       }
2403 
2404       inst->remove(block);
2405       progress = true;
2406    }
2407 
2408    if (progress)
2409       invalidate_live_intervals();
2410 
2411    return progress;
2412 }
2413 
2414 bool
lower_64bit_mad_to_mul_add()2415 vec4_visitor::lower_64bit_mad_to_mul_add()
2416 {
2417    bool progress = false;
2418 
2419    foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2420       if (inst->opcode != BRW_OPCODE_MAD)
2421          continue;
2422 
2423       if (type_sz(inst->dst.type) != 8)
2424          continue;
2425 
2426       dst_reg mul_dst = dst_reg(this, glsl_type::dvec4_type);
2427 
2428       /* Use the copy constructor so we copy all relevant instruction fields
2429        * from the original mad into the add and mul instructions
2430        */
2431       vec4_instruction *mul = new(mem_ctx) vec4_instruction(*inst);
2432       mul->opcode = BRW_OPCODE_MUL;
2433       mul->dst = mul_dst;
2434       mul->src[0] = inst->src[1];
2435       mul->src[1] = inst->src[2];
2436       mul->src[2].file = BAD_FILE;
2437 
2438       vec4_instruction *add = new(mem_ctx) vec4_instruction(*inst);
2439       add->opcode = BRW_OPCODE_ADD;
2440       add->src[0] = src_reg(mul_dst);
2441       add->src[1] = inst->src[0];
2442       add->src[2].file = BAD_FILE;
2443 
2444       inst->insert_before(block, mul);
2445       inst->insert_before(block, add);
2446       inst->remove(block);
2447 
2448       progress = true;
2449    }
2450 
2451    if (progress)
2452       invalidate_live_intervals();
2453 
2454    return progress;
2455 }
2456 
2457 /* The align16 hardware can only do 32-bit swizzle channels, so we need to
2458  * translate the logical 64-bit swizzle channels that we use in the Vec4 IR
2459  * to 32-bit swizzle channels in hardware registers.
2460  *
2461  * @inst and @arg identify the original vec4 IR source operand we need to
2462  * translate the swizzle for and @hw_reg is the hardware register where we
2463  * will write the hardware swizzle to use.
2464  *
2465  * This pass assumes that Align16/DF instructions have been fully scalarized
2466  * previously so there is just one 64-bit swizzle channel to deal with for any
2467  * given Vec4 IR source.
2468  */
2469 void
apply_logical_swizzle(struct brw_reg * hw_reg,vec4_instruction * inst,int arg)2470 vec4_visitor::apply_logical_swizzle(struct brw_reg *hw_reg,
2471                                     vec4_instruction *inst, int arg)
2472 {
2473    src_reg reg = inst->src[arg];
2474 
2475    if (reg.file == BAD_FILE || reg.file == BRW_IMMEDIATE_VALUE)
2476       return;
2477 
2478    /* If this is not a 64-bit operand or this is a scalar instruction we don't
2479     * need to do anything about the swizzles.
2480     */
2481    if(type_sz(reg.type) < 8 || is_align1_df(inst)) {
2482       hw_reg->swizzle = reg.swizzle;
2483       return;
2484    }
2485 
2486    /* Take the 64-bit logical swizzle channel and translate it to 32-bit */
2487    assert(brw_is_single_value_swizzle(reg.swizzle) ||
2488           is_supported_64bit_region(inst, arg));
2489 
2490    if (is_supported_64bit_region(inst, arg) &&
2491        !is_gen7_supported_64bit_swizzle(inst, arg)) {
2492       /* Supported 64-bit swizzles are those such that their first two
2493        * components, when expanded to 32-bit swizzles, match the semantics
2494        * of the original 64-bit swizzle with 2-wide row regioning.
2495        */
2496       unsigned swizzle0 = BRW_GET_SWZ(reg.swizzle, 0);
2497       unsigned swizzle1 = BRW_GET_SWZ(reg.swizzle, 1);
2498       hw_reg->swizzle = BRW_SWIZZLE4(swizzle0 * 2, swizzle0 * 2 + 1,
2499                                      swizzle1 * 2, swizzle1 * 2 + 1);
2500    } else {
2501       /* If we got here then we have one of the following:
2502        *
2503        * 1. An unsupported swizzle, which should be single-value thanks to the
2504        *    scalarization pass.
2505        *
2506        * 2. A gen7 supported swizzle. These can be single-value or double-value
2507        *    swizzles. If the latter, they are never cross-dvec2 channels. For
2508        *    these we always need to activate the gen7 vstride=0 exploit.
2509        */
2510       unsigned swizzle0 = BRW_GET_SWZ(reg.swizzle, 0);
2511       unsigned swizzle1 = BRW_GET_SWZ(reg.swizzle, 1);
2512       assert((swizzle0 < 2) == (swizzle1 < 2));
2513 
2514       /* To gain access to Z/W components we need to select the second half
2515        * of the register and then use a X/Y swizzle to select Z/W respectively.
2516        */
2517       if (swizzle0 >= 2) {
2518          *hw_reg = suboffset(*hw_reg, 2);
2519          swizzle0 -= 2;
2520          swizzle1 -= 2;
2521       }
2522 
2523       /* All gen7-specific supported swizzles require the vstride=0 exploit */
2524       if (devinfo->gen == 7 && is_gen7_supported_64bit_swizzle(inst, arg))
2525          hw_reg->vstride = BRW_VERTICAL_STRIDE_0;
2526 
2527       /* Any 64-bit source with an offset at 16B is intended to address the
2528        * second half of a register and needs a vertical stride of 0 so we:
2529        *
2530        * 1. Don't violate register region restrictions.
2531        * 2. Activate the gen7 instruction decompresion bug exploit when
2532        *    execsize > 4
2533        */
2534       if (hw_reg->subnr % REG_SIZE == 16) {
2535          assert(devinfo->gen == 7);
2536          hw_reg->vstride = BRW_VERTICAL_STRIDE_0;
2537       }
2538 
2539       hw_reg->swizzle = BRW_SWIZZLE4(swizzle0 * 2, swizzle0 * 2 + 1,
2540                                      swizzle1 * 2, swizzle1 * 2 + 1);
2541    }
2542 }
2543 
2544 bool
run()2545 vec4_visitor::run()
2546 {
2547    if (shader_time_index >= 0)
2548       emit_shader_time_begin();
2549 
2550    emit_prolog();
2551 
2552    emit_nir_code();
2553    if (failed)
2554       return false;
2555    base_ir = NULL;
2556 
2557    emit_thread_end();
2558 
2559    calculate_cfg();
2560 
2561    /* Before any optimization, push array accesses out to scratch
2562     * space where we need them to be.  This pass may allocate new
2563     * virtual GRFs, so we want to do it early.  It also makes sure
2564     * that we have reladdr computations available for CSE, since we'll
2565     * often do repeated subexpressions for those.
2566     */
2567    move_grf_array_access_to_scratch();
2568    move_uniform_array_access_to_pull_constants();
2569 
2570    pack_uniform_registers();
2571    move_push_constants_to_pull_constants();
2572    split_virtual_grfs();
2573 
2574 #define OPT(pass, args...) ({                                          \
2575       pass_num++;                                                      \
2576       bool this_progress = pass(args);                                 \
2577                                                                        \
2578       if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) {  \
2579          char filename[64];                                            \
2580          snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass,              \
2581                   stage_abbrev, nir->info->name, iteration, pass_num); \
2582                                                                        \
2583          backend_shader::dump_instructions(filename);                  \
2584       }                                                                \
2585                                                                        \
2586       progress = progress || this_progress;                            \
2587       this_progress;                                                   \
2588    })
2589 
2590 
2591    if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
2592       char filename[64];
2593       snprintf(filename, 64, "%s-%s-00-00-start",
2594                stage_abbrev, nir->info->name);
2595 
2596       backend_shader::dump_instructions(filename);
2597    }
2598 
2599    bool progress;
2600    int iteration = 0;
2601    int pass_num = 0;
2602    do {
2603       progress = false;
2604       pass_num = 0;
2605       iteration++;
2606 
2607       OPT(opt_predicated_break, this);
2608       OPT(opt_reduce_swizzle);
2609       OPT(dead_code_eliminate);
2610       OPT(dead_control_flow_eliminate, this);
2611       OPT(opt_copy_propagation);
2612       OPT(opt_cmod_propagation);
2613       OPT(opt_cse);
2614       OPT(opt_algebraic);
2615       OPT(opt_register_coalesce);
2616       OPT(eliminate_find_live_channel);
2617    } while (progress);
2618 
2619    pass_num = 0;
2620 
2621    if (OPT(opt_vector_float)) {
2622       OPT(opt_cse);
2623       OPT(opt_copy_propagation, false);
2624       OPT(opt_copy_propagation, true);
2625       OPT(dead_code_eliminate);
2626    }
2627 
2628    if (devinfo->gen <= 5 && OPT(lower_minmax)) {
2629       OPT(opt_cmod_propagation);
2630       OPT(opt_cse);
2631       OPT(opt_copy_propagation);
2632       OPT(dead_code_eliminate);
2633    }
2634 
2635    if (OPT(lower_simd_width)) {
2636       OPT(opt_copy_propagation);
2637       OPT(dead_code_eliminate);
2638    }
2639 
2640    if (failed)
2641       return false;
2642 
2643    OPT(lower_64bit_mad_to_mul_add);
2644 
2645    /* Run this before payload setup because tesselation shaders
2646     * rely on it to prevent cross dvec2 regioning on DF attributes
2647     * that are setup so that XY are on the second half of register and
2648     * ZW are in the first half of the next.
2649     */
2650    OPT(scalarize_df);
2651 
2652    setup_payload();
2653 
2654    if (unlikely(INTEL_DEBUG & DEBUG_SPILL_VEC4)) {
2655       /* Debug of register spilling: Go spill everything. */
2656       const int grf_count = alloc.count;
2657       float spill_costs[alloc.count];
2658       bool no_spill[alloc.count];
2659       evaluate_spill_costs(spill_costs, no_spill);
2660       for (int i = 0; i < grf_count; i++) {
2661          if (no_spill[i])
2662             continue;
2663          spill_reg(i);
2664       }
2665 
2666       /* We want to run this after spilling because 64-bit (un)spills need to
2667        * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2668        * messages that can produce unsupported 64-bit swizzle regions.
2669        */
2670       OPT(scalarize_df);
2671    }
2672 
2673    bool allocated_without_spills = reg_allocate();
2674 
2675    if (!allocated_without_spills) {
2676       compiler->shader_perf_log(log_data,
2677                                 "%s shader triggered register spilling.  "
2678                                 "Try reducing the number of live vec4 values "
2679                                 "to improve performance.\n",
2680                                 stage_name);
2681 
2682       while (!reg_allocate()) {
2683          if (failed)
2684             return false;
2685       }
2686 
2687       /* We want to run this after spilling because 64-bit (un)spills need to
2688        * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2689        * messages that can produce unsupported 64-bit swizzle regions.
2690        */
2691       OPT(scalarize_df);
2692    }
2693 
2694    opt_schedule_instructions();
2695 
2696    opt_set_dependency_control();
2697 
2698    convert_to_hw_regs();
2699 
2700    if (last_scratch > 0) {
2701       prog_data->base.total_scratch =
2702          brw_get_scratch_size(last_scratch * REG_SIZE);
2703    }
2704 
2705    return !failed;
2706 }
2707 
2708 } /* namespace brw */
2709 
2710 extern "C" {
2711 
2712 /**
2713  * Compile a vertex shader.
2714  *
2715  * Returns the final assembly and the program's size.
2716  */
2717 const unsigned *
brw_compile_vs(const struct brw_compiler * compiler,void * log_data,void * mem_ctx,const struct brw_vs_prog_key * key,struct brw_vs_prog_data * prog_data,const nir_shader * src_shader,gl_clip_plane * clip_planes,bool use_legacy_snorm_formula,int shader_time_index,unsigned * final_assembly_size,char ** error_str)2718 brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
2719                void *mem_ctx,
2720                const struct brw_vs_prog_key *key,
2721                struct brw_vs_prog_data *prog_data,
2722                const nir_shader *src_shader,
2723                gl_clip_plane *clip_planes,
2724                bool use_legacy_snorm_formula,
2725                int shader_time_index,
2726                unsigned *final_assembly_size,
2727                char **error_str)
2728 {
2729    const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
2730    nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
2731    shader = brw_nir_apply_sampler_key(shader, compiler, &key->tex, is_scalar);
2732    brw_nir_lower_vs_inputs(shader, is_scalar,
2733                            use_legacy_snorm_formula, key->gl_attrib_wa_flags);
2734    brw_nir_lower_vue_outputs(shader, is_scalar);
2735    shader = brw_postprocess_nir(shader, compiler, is_scalar);
2736 
2737    const unsigned *assembly = NULL;
2738 
2739    prog_data->base.clip_distance_mask =
2740       ((1 << shader->info->clip_distance_array_size) - 1);
2741    prog_data->base.cull_distance_mask =
2742       ((1 << shader->info->cull_distance_array_size) - 1) <<
2743       shader->info->clip_distance_array_size;
2744 
2745    unsigned nr_attribute_slots = _mesa_bitcount_64(prog_data->inputs_read);
2746 
2747    /* gl_VertexID and gl_InstanceID are system values, but arrive via an
2748     * incoming vertex attribute.  So, add an extra slot.
2749     */
2750    if (shader->info->system_values_read &
2751        (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
2752         BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
2753         BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
2754         BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID))) {
2755       nr_attribute_slots++;
2756    }
2757 
2758    /* gl_DrawID has its very own vec4 */
2759    if (shader->info->system_values_read &
2760        BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID)) {
2761       nr_attribute_slots++;
2762    }
2763 
2764    unsigned nr_attributes = nr_attribute_slots -
2765       DIV_ROUND_UP(_mesa_bitcount_64(shader->info->double_inputs_read), 2);
2766 
2767    /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
2768     * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode.  Empirically, in
2769     * vec4 mode, the hardware appears to wedge unless we read something.
2770     */
2771    if (is_scalar)
2772       prog_data->base.urb_read_length =
2773          DIV_ROUND_UP(nr_attribute_slots, 2);
2774    else
2775       prog_data->base.urb_read_length =
2776          DIV_ROUND_UP(MAX2(nr_attribute_slots, 1), 2);
2777 
2778    prog_data->nr_attributes = nr_attributes;
2779    prog_data->nr_attribute_slots = nr_attribute_slots;
2780 
2781    /* Since vertex shaders reuse the same VUE entry for inputs and outputs
2782     * (overwriting the original contents), we need to make sure the size is
2783     * the larger of the two.
2784     */
2785    const unsigned vue_entries =
2786       MAX2(nr_attribute_slots, (unsigned)prog_data->base.vue_map.num_slots);
2787 
2788    if (compiler->devinfo->gen == 6)
2789       prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 8);
2790    else
2791       prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 4);
2792 
2793    if (INTEL_DEBUG & DEBUG_VS) {
2794       fprintf(stderr, "VS Output ");
2795       brw_print_vue_map(stderr, &prog_data->base.vue_map);
2796    }
2797 
2798    if (is_scalar) {
2799       prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
2800 
2801       fs_visitor v(compiler, log_data, mem_ctx, key, &prog_data->base.base,
2802                    NULL, /* prog; Only used for TEXTURE_RECTANGLE on gen < 8 */
2803                    shader, 8, shader_time_index);
2804       if (!v.run_vs(clip_planes)) {
2805          if (error_str)
2806             *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
2807 
2808          return NULL;
2809       }
2810 
2811       prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
2812 
2813       fs_generator g(compiler, log_data, mem_ctx, (void *) key,
2814                      &prog_data->base.base, v.promoted_constants,
2815                      v.runtime_check_aads_emit, MESA_SHADER_VERTEX);
2816       if (INTEL_DEBUG & DEBUG_VS) {
2817          const char *debug_name =
2818             ralloc_asprintf(mem_ctx, "%s vertex shader %s",
2819                             shader->info->label ? shader->info->label :
2820                                "unnamed",
2821                             shader->info->name);
2822 
2823          g.enable_debug(debug_name);
2824       }
2825       g.generate_code(v.cfg, 8);
2826       assembly = g.get_assembly(final_assembly_size);
2827    }
2828 
2829    if (!assembly) {
2830       prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
2831 
2832       vec4_vs_visitor v(compiler, log_data, key, prog_data,
2833                         shader, clip_planes, mem_ctx,
2834                         shader_time_index, use_legacy_snorm_formula);
2835       if (!v.run()) {
2836          if (error_str)
2837             *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
2838 
2839          return NULL;
2840       }
2841 
2842       assembly = brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
2843                                             shader, &prog_data->base, v.cfg,
2844                                             final_assembly_size);
2845    }
2846 
2847    return assembly;
2848 }
2849 
2850 } /* extern "C" */
2851