• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_builder.h"
26 #include "aco_ir.h"
27 
28 #include "common/sid.h"
29 
30 #include <map>
31 #include <vector>
32 
33 namespace aco {
34 
35 struct lower_context {
36    Program* program;
37    Block* block;
38    std::vector<aco_ptr<Instruction>> instructions;
39 };
40 
41 /* used by handle_operands() indirectly through Builder::copy */
42 uint8_t int8_mul_table[512] = {
43    0, 20,  1,  1,   1,  2,   1,  3,   1,  4,   1, 5,   1,  6,   1,  7,   1,  8,   1,  9,
44    1, 10,  1,  11,  1,  12,  1,  13,  1,  14,  1, 15,  1,  16,  1,  17,  1,  18,  1,  19,
45    1, 20,  1,  21,  1,  22,  1,  23,  1,  24,  1, 25,  1,  26,  1,  27,  1,  28,  1,  29,
46    1, 30,  1,  31,  1,  32,  1,  33,  1,  34,  1, 35,  1,  36,  1,  37,  1,  38,  1,  39,
47    1, 40,  1,  41,  1,  42,  1,  43,  1,  44,  1, 45,  1,  46,  1,  47,  1,  48,  1,  49,
48    1, 50,  1,  51,  1,  52,  1,  53,  1,  54,  1, 55,  1,  56,  1,  57,  1,  58,  1,  59,
49    1, 60,  1,  61,  1,  62,  1,  63,  1,  64,  5, 13,  2,  33,  17, 19,  2,  34,  3,  23,
50    2, 35,  11, 53,  2,  36,  7,  47,  2,  37,  3, 25,  2,  38,  7,  11,  2,  39,  53, 243,
51    2, 40,  3,  27,  2,  41,  17, 35,  2,  42,  5, 17,  2,  43,  3,  29,  2,  44,  15, 23,
52    2, 45,  7,  13,  2,  46,  3,  31,  2,  47,  5, 19,  2,  48,  19, 59,  2,  49,  3,  33,
53    2, 50,  7,  51,  2,  51,  15, 41,  2,  52,  3, 35,  2,  53,  11, 33,  2,  54,  23, 27,
54    2, 55,  3,  37,  2,  56,  9,  41,  2,  57,  5, 23,  2,  58,  3,  39,  2,  59,  7,  17,
55    2, 60,  9,  241, 2,  61,  3,  41,  2,  62,  5, 25,  2,  63,  35, 245, 2,  64,  3,  43,
56    5, 26,  9,  43,  3,  44,  7,  19,  10, 39,  3, 45,  4,  34,  11, 59,  3,  46,  9,  243,
57    4, 35,  3,  47,  22, 53,  7,  57,  3,  48,  5, 29,  10, 245, 3,  49,  4,  37,  9,  45,
58    3, 50,  7,  241, 4,  38,  3,  51,  7,  22,  5, 31,  3,  52,  7,  59,  7,  242, 3,  53,
59    4, 40,  7,  23,  3,  54,  15, 45,  4,  41,  3, 55,  6,  241, 9,  47,  3,  56,  13, 13,
60    5, 34,  3,  57,  4,  43,  11, 39,  3,  58,  5, 35,  4,  44,  3,  59,  6,  243, 7,  245,
61    3, 60,  5,  241, 7,  26,  3,  61,  4,  46,  5, 37,  3,  62,  11, 17,  4,  47,  3,  63,
62    5, 38,  5,  243, 3,  64,  7,  247, 9,  50,  5, 39,  4,  241, 33, 37,  6,  33,  13, 35,
63    4, 242, 5,  245, 6,  247, 7,  29,  4,  51,  5, 41,  5,  246, 7,  249, 3,  240, 11, 19,
64    5, 42,  3,  241, 4,  245, 25, 29,  3,  242, 5, 43,  4,  246, 3,  243, 17, 58,  17, 43,
65    3, 244, 5,  249, 6,  37,  3,  245, 2,  240, 5, 45,  2,  241, 21, 23,  2,  242, 3,  247,
66    2, 243, 5,  251, 2,  244, 29, 61,  2,  245, 3, 249, 2,  246, 17, 29,  2,  247, 9,  55,
67    1, 240, 1,  241, 1,  242, 1,  243, 1,  244, 1, 245, 1,  246, 1,  247, 1,  248, 1,  249,
68    1, 250, 1,  251, 1,  252, 1,  253, 1,  254, 1, 255};
69 
70 aco_opcode
get_reduce_opcode(amd_gfx_level gfx_level,ReduceOp op)71 get_reduce_opcode(amd_gfx_level gfx_level, ReduceOp op)
72 {
73    /* Because some 16-bit instructions are already VOP3 on GFX10, we use the
74     * 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use
75     * DPP with the arithmetic instructions. This requires to sign-extend.
76     */
77    switch (op) {
78    case iadd8:
79    case iadd16:
80       if (gfx_level >= GFX10) {
81          return aco_opcode::v_add_u32;
82       } else if (gfx_level >= GFX8) {
83          return aco_opcode::v_add_u16;
84       } else {
85          return aco_opcode::v_add_co_u32;
86       }
87       break;
88    case imul8:
89    case imul16:
90       if (gfx_level >= GFX10) {
91          return aco_opcode::v_mul_lo_u16_e64;
92       } else if (gfx_level >= GFX8) {
93          return aco_opcode::v_mul_lo_u16;
94       } else {
95          return aco_opcode::v_mul_u32_u24;
96       }
97       break;
98    case fadd16: return aco_opcode::v_add_f16;
99    case fmul16: return aco_opcode::v_mul_f16;
100    case imax8:
101    case imax16:
102       if (gfx_level >= GFX10) {
103          return aco_opcode::v_max_i32;
104       } else if (gfx_level >= GFX8) {
105          return aco_opcode::v_max_i16;
106       } else {
107          return aco_opcode::v_max_i32;
108       }
109       break;
110    case imin8:
111    case imin16:
112       if (gfx_level >= GFX10) {
113          return aco_opcode::v_min_i32;
114       } else if (gfx_level >= GFX8) {
115          return aco_opcode::v_min_i16;
116       } else {
117          return aco_opcode::v_min_i32;
118       }
119       break;
120    case umin8:
121    case umin16:
122       if (gfx_level >= GFX10) {
123          return aco_opcode::v_min_u32;
124       } else if (gfx_level >= GFX8) {
125          return aco_opcode::v_min_u16;
126       } else {
127          return aco_opcode::v_min_u32;
128       }
129       break;
130    case umax8:
131    case umax16:
132       if (gfx_level >= GFX10) {
133          return aco_opcode::v_max_u32;
134       } else if (gfx_level >= GFX8) {
135          return aco_opcode::v_max_u16;
136       } else {
137          return aco_opcode::v_max_u32;
138       }
139       break;
140    case fmin16: return aco_opcode::v_min_f16;
141    case fmax16: return aco_opcode::v_max_f16;
142    case iadd32: return gfx_level >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
143    case imul32: return aco_opcode::v_mul_lo_u32;
144    case fadd32: return aco_opcode::v_add_f32;
145    case fmul32: return aco_opcode::v_mul_f32;
146    case imax32: return aco_opcode::v_max_i32;
147    case imin32: return aco_opcode::v_min_i32;
148    case umin32: return aco_opcode::v_min_u32;
149    case umax32: return aco_opcode::v_max_u32;
150    case fmin32: return aco_opcode::v_min_f32;
151    case fmax32: return aco_opcode::v_max_f32;
152    case iand8:
153    case iand16:
154    case iand32: return aco_opcode::v_and_b32;
155    case ixor8:
156    case ixor16:
157    case ixor32: return aco_opcode::v_xor_b32;
158    case ior8:
159    case ior16:
160    case ior32: return aco_opcode::v_or_b32;
161    case iadd64: return aco_opcode::num_opcodes;
162    case imul64: return aco_opcode::num_opcodes;
163    case fadd64: return aco_opcode::v_add_f64;
164    case fmul64: return aco_opcode::v_mul_f64;
165    case imin64: return aco_opcode::num_opcodes;
166    case imax64: return aco_opcode::num_opcodes;
167    case umin64: return aco_opcode::num_opcodes;
168    case umax64: return aco_opcode::num_opcodes;
169    case fmin64: return aco_opcode::v_min_f64;
170    case fmax64: return aco_opcode::v_max_f64;
171    case iand64: return aco_opcode::num_opcodes;
172    case ior64: return aco_opcode::num_opcodes;
173    case ixor64: return aco_opcode::num_opcodes;
174    default: return aco_opcode::num_opcodes;
175    }
176 }
177 
178 bool
is_vop3_reduce_opcode(aco_opcode opcode)179 is_vop3_reduce_opcode(aco_opcode opcode)
180 {
181    /* 64-bit reductions are VOP3. */
182    if (opcode == aco_opcode::num_opcodes)
183       return true;
184 
185    return instr_info.format[(int)opcode] == Format::VOP3;
186 }
187 
188 void
emit_vadd32(Builder & bld,Definition def,Operand src0,Operand src1)189 emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
190 {
191    Instruction* instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
192    if (instr->definitions.size() >= 2) {
193       assert(instr->definitions[1].regClass() == bld.lm);
194       instr->definitions[1].setFixed(vcc);
195    }
196 }
197 
198 void
emit_int64_dpp_op(lower_context * ctx,PhysReg dst_reg,PhysReg src0_reg,PhysReg src1_reg,PhysReg vtmp_reg,ReduceOp op,unsigned dpp_ctrl,unsigned row_mask,unsigned bank_mask,bool bound_ctrl,Operand * identity=NULL)199 emit_int64_dpp_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
200                   PhysReg vtmp_reg, ReduceOp op, unsigned dpp_ctrl, unsigned row_mask,
201                   unsigned bank_mask, bool bound_ctrl, Operand* identity = NULL)
202 {
203    Builder bld(ctx->program, &ctx->instructions);
204    Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg + 1}, v1)};
205    Definition vtmp_def[] = {Definition(vtmp_reg, v1), Definition(PhysReg{vtmp_reg + 1}, v1)};
206    Operand src0[] = {Operand(src0_reg, v1), Operand(PhysReg{src0_reg + 1}, v1)};
207    Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg + 1}, v1)};
208    Operand src1_64 = Operand(src1_reg, v2);
209    Operand vtmp_op[] = {Operand(vtmp_reg, v1), Operand(PhysReg{vtmp_reg + 1}, v1)};
210    Operand vtmp_op64 = Operand(vtmp_reg, v2);
211    if (op == iadd64) {
212       if (ctx->program->gfx_level >= GFX10) {
213          if (identity)
214             bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
215          bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
216                       bound_ctrl);
217          bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), vtmp_op[0], src1[0]);
218       } else {
219          bld.vop2_dpp(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0],
220                       dpp_ctrl, row_mask, bank_mask, bound_ctrl);
221       }
222       bld.vop2_dpp(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1],
223                    Operand(vcc, bld.lm), dpp_ctrl, row_mask, bank_mask, bound_ctrl);
224    } else if (op == iand64) {
225       bld.vop2_dpp(aco_opcode::v_and_b32, dst[0], src0[0], src1[0], dpp_ctrl, row_mask, bank_mask,
226                    bound_ctrl);
227       bld.vop2_dpp(aco_opcode::v_and_b32, dst[1], src0[1], src1[1], dpp_ctrl, row_mask, bank_mask,
228                    bound_ctrl);
229    } else if (op == ior64) {
230       bld.vop2_dpp(aco_opcode::v_or_b32, dst[0], src0[0], src1[0], dpp_ctrl, row_mask, bank_mask,
231                    bound_ctrl);
232       bld.vop2_dpp(aco_opcode::v_or_b32, dst[1], src0[1], src1[1], dpp_ctrl, row_mask, bank_mask,
233                    bound_ctrl);
234    } else if (op == ixor64) {
235       bld.vop2_dpp(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0], dpp_ctrl, row_mask, bank_mask,
236                    bound_ctrl);
237       bld.vop2_dpp(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1], dpp_ctrl, row_mask, bank_mask,
238                    bound_ctrl);
239    } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
240       aco_opcode cmp = aco_opcode::num_opcodes;
241       switch (op) {
242       case umin64: cmp = aco_opcode::v_cmp_gt_u64; break;
243       case umax64: cmp = aco_opcode::v_cmp_lt_u64; break;
244       case imin64: cmp = aco_opcode::v_cmp_gt_i64; break;
245       case imax64: cmp = aco_opcode::v_cmp_lt_i64; break;
246       default: break;
247       }
248 
249       if (identity) {
250          bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
251          bld.vop1(aco_opcode::v_mov_b32, vtmp_def[1], identity[1]);
252       }
253       bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
254                    bound_ctrl);
255       bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[1], src0[1], dpp_ctrl, row_mask, bank_mask,
256                    bound_ctrl);
257 
258       bld.vopc(cmp, bld.def(bld.lm, vcc), vtmp_op64, src1_64);
259       bld.vop2(aco_opcode::v_cndmask_b32, dst[0], vtmp_op[0], src1[0], Operand(vcc, bld.lm));
260       bld.vop2(aco_opcode::v_cndmask_b32, dst[1], vtmp_op[1], src1[1], Operand(vcc, bld.lm));
261    } else if (op == imul64) {
262       /* t4 = dpp(x_hi)
263        * t1 = umul_lo(t4, y_lo)
264        * t3 = dpp(x_lo)
265        * t0 = umul_lo(t3, y_hi)
266        * t2 = iadd(t0, t1)
267        * t5 = umul_hi(t3, y_lo)
268        * res_hi = iadd(t2, t5)
269        * res_lo = umul_lo(t3, y_lo)
270        * Requires that res_hi != src0[0] and res_hi != src1[0]
271        * and that vtmp[0] != res_hi.
272        */
273       if (identity)
274          bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[1]);
275       bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[1], dpp_ctrl, row_mask, bank_mask,
276                    bound_ctrl);
277       bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[1], vtmp_op[0], src1[0]);
278       if (identity)
279          bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
280       bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
281                    bound_ctrl);
282       bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[0], vtmp_op[0], src1[1]);
283       emit_vadd32(bld, vtmp_def[1], vtmp_op[0], vtmp_op[1]);
284       if (identity)
285          bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
286       bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
287                    bound_ctrl);
288       bld.vop3(aco_opcode::v_mul_hi_u32, vtmp_def[0], vtmp_op[0], src1[0]);
289       emit_vadd32(bld, dst[1], vtmp_op[1], vtmp_op[0]);
290       if (identity)
291          bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
292       bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
293                    bound_ctrl);
294       bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], vtmp_op[0], src1[0]);
295    }
296 }
297 
298 void
emit_int64_op(lower_context * ctx,PhysReg dst_reg,PhysReg src0_reg,PhysReg src1_reg,PhysReg vtmp,ReduceOp op)299 emit_int64_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp,
300               ReduceOp op)
301 {
302    Builder bld(ctx->program, &ctx->instructions);
303    Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg + 1}, v1)};
304    RegClass src0_rc = src0_reg.reg() >= 256 ? v1 : s1;
305    Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg + 1}, src0_rc)};
306    Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg + 1}, v1)};
307    Operand src0_64 = Operand(src0_reg, src0_reg.reg() >= 256 ? v2 : s2);
308    Operand src1_64 = Operand(src1_reg, v2);
309 
310    if (src0_rc == s1 &&
311        (op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) {
312       assert(vtmp.reg() != 0);
313       bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]);
314       bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + 1}, v1), src0[1]);
315       src0_reg = vtmp;
316       src0[0] = Operand(vtmp, v1);
317       src0[1] = Operand(PhysReg{vtmp + 1}, v1);
318       src0_64 = Operand(vtmp, v2);
319    } else if (src0_rc == s1 && op == iadd64) {
320       assert(vtmp.reg() != 0);
321       bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + 1}, v1), src0[1]);
322       src0[1] = Operand(PhysReg{vtmp + 1}, v1);
323    }
324 
325    if (op == iadd64) {
326       if (ctx->program->gfx_level >= GFX10) {
327          bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
328       } else {
329          bld.vop2(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
330       }
331       bld.vop2(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1],
332                Operand(vcc, bld.lm));
333    } else if (op == iand64) {
334       bld.vop2(aco_opcode::v_and_b32, dst[0], src0[0], src1[0]);
335       bld.vop2(aco_opcode::v_and_b32, dst[1], src0[1], src1[1]);
336    } else if (op == ior64) {
337       bld.vop2(aco_opcode::v_or_b32, dst[0], src0[0], src1[0]);
338       bld.vop2(aco_opcode::v_or_b32, dst[1], src0[1], src1[1]);
339    } else if (op == ixor64) {
340       bld.vop2(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0]);
341       bld.vop2(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1]);
342    } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
343       aco_opcode cmp = aco_opcode::num_opcodes;
344       switch (op) {
345       case umin64: cmp = aco_opcode::v_cmp_gt_u64; break;
346       case umax64: cmp = aco_opcode::v_cmp_lt_u64; break;
347       case imin64: cmp = aco_opcode::v_cmp_gt_i64; break;
348       case imax64: cmp = aco_opcode::v_cmp_lt_i64; break;
349       default: break;
350       }
351 
352       bld.vopc(cmp, bld.def(bld.lm, vcc), src0_64, src1_64);
353       bld.vop2(aco_opcode::v_cndmask_b32, dst[0], src0[0], src1[0], Operand(vcc, bld.lm));
354       bld.vop2(aco_opcode::v_cndmask_b32, dst[1], src0[1], src1[1], Operand(vcc, bld.lm));
355    } else if (op == imul64) {
356       if (src1_reg == dst_reg) {
357          /* it's fine if src0==dst but not if src1==dst */
358          std::swap(src0_reg, src1_reg);
359          std::swap(src0[0], src1[0]);
360          std::swap(src0[1], src1[1]);
361          std::swap(src0_64, src1_64);
362       }
363       assert(!(src0_reg == src1_reg));
364       /* t1 = umul_lo(x_hi, y_lo)
365        * t0 = umul_lo(x_lo, y_hi)
366        * t2 = iadd(t0, t1)
367        * t5 = umul_hi(x_lo, y_lo)
368        * res_hi = iadd(t2, t5)
369        * res_lo = umul_lo(x_lo, y_lo)
370        * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
371        */
372       Definition tmp0_def(PhysReg{src0_reg + 1}, v1);
373       Definition tmp1_def(PhysReg{src1_reg + 1}, v1);
374       Operand tmp0_op = src0[1];
375       Operand tmp1_op = src1[1];
376       bld.vop3(aco_opcode::v_mul_lo_u32, tmp0_def, src0[1], src1[0]);
377       bld.vop3(aco_opcode::v_mul_lo_u32, tmp1_def, src0[0], src1[1]);
378       emit_vadd32(bld, tmp0_def, tmp1_op, tmp0_op);
379       bld.vop3(aco_opcode::v_mul_hi_u32, tmp1_def, src0[0], src1[0]);
380       emit_vadd32(bld, dst[1], tmp0_op, tmp1_op);
381       bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], src0[0], src1[0]);
382    }
383 }
384 
385 void
emit_dpp_op(lower_context * ctx,PhysReg dst_reg,PhysReg src0_reg,PhysReg src1_reg,PhysReg vtmp,ReduceOp op,unsigned size,unsigned dpp_ctrl,unsigned row_mask,unsigned bank_mask,bool bound_ctrl,Operand * identity=NULL)386 emit_dpp_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp,
387             ReduceOp op, unsigned size, unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask,
388             bool bound_ctrl, Operand* identity = NULL) /* for VOP3 with sparse writes */
389 {
390    Builder bld(ctx->program, &ctx->instructions);
391    RegClass rc = RegClass(RegType::vgpr, size);
392    Definition dst(dst_reg, rc);
393    Operand src0(src0_reg, rc);
394    Operand src1(src1_reg, rc);
395 
396    aco_opcode opcode = get_reduce_opcode(ctx->program->gfx_level, op);
397    bool vop3 = is_vop3_reduce_opcode(opcode);
398 
399    if (!vop3) {
400       if (opcode == aco_opcode::v_add_co_u32)
401          bld.vop2_dpp(opcode, dst, bld.def(bld.lm, vcc), src0, src1, dpp_ctrl, row_mask, bank_mask,
402                       bound_ctrl);
403       else
404          bld.vop2_dpp(opcode, dst, src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
405       return;
406    }
407 
408    if (opcode == aco_opcode::num_opcodes) {
409       emit_int64_dpp_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op, dpp_ctrl, row_mask, bank_mask,
410                         bound_ctrl, identity);
411       return;
412    }
413 
414    if (identity)
415       bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
416    if (identity && size >= 2)
417       bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + 1}, v1), identity[1]);
418 
419    for (unsigned i = 0; i < size; i++)
420       bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
421                    Operand(PhysReg{src0_reg + i}, v1), dpp_ctrl, row_mask, bank_mask, bound_ctrl);
422 
423    bld.vop3(opcode, dst, Operand(vtmp, rc), src1);
424 }
425 
426 void
emit_op(lower_context * ctx,PhysReg dst_reg,PhysReg src0_reg,PhysReg src1_reg,PhysReg vtmp,ReduceOp op,unsigned size)427 emit_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp,
428         ReduceOp op, unsigned size)
429 {
430    Builder bld(ctx->program, &ctx->instructions);
431    RegClass rc = RegClass(RegType::vgpr, size);
432    Definition dst(dst_reg, rc);
433    Operand src0(src0_reg, RegClass(src0_reg.reg() >= 256 ? RegType::vgpr : RegType::sgpr, size));
434    Operand src1(src1_reg, rc);
435 
436    aco_opcode opcode = get_reduce_opcode(ctx->program->gfx_level, op);
437    bool vop3 = is_vop3_reduce_opcode(opcode);
438 
439    if (opcode == aco_opcode::num_opcodes) {
440       emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op);
441       return;
442    }
443 
444    if (vop3) {
445       bld.vop3(opcode, dst, src0, src1);
446    } else if (opcode == aco_opcode::v_add_co_u32) {
447       bld.vop2(opcode, dst, bld.def(bld.lm, vcc), src0, src1);
448    } else {
449       bld.vop2(opcode, dst, src0, src1);
450    }
451 }
452 
453 void
emit_dpp_mov(lower_context * ctx,PhysReg dst,PhysReg src0,unsigned size,unsigned dpp_ctrl,unsigned row_mask,unsigned bank_mask,bool bound_ctrl)454 emit_dpp_mov(lower_context* ctx, PhysReg dst, PhysReg src0, unsigned size, unsigned dpp_ctrl,
455              unsigned row_mask, unsigned bank_mask, bool bound_ctrl)
456 {
457    Builder bld(ctx->program, &ctx->instructions);
458    for (unsigned i = 0; i < size; i++) {
459       bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{dst + i}, v1),
460                    Operand(PhysReg{src0 + i}, v1), dpp_ctrl, row_mask, bank_mask, bound_ctrl);
461    }
462 }
463 
464 void
emit_ds_swizzle(Builder bld,PhysReg dst,PhysReg src,unsigned size,unsigned ds_pattern)465 emit_ds_swizzle(Builder bld, PhysReg dst, PhysReg src, unsigned size, unsigned ds_pattern)
466 {
467    for (unsigned i = 0; i < size; i++) {
468       bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{dst + i}, v1),
469              Operand(PhysReg{src + i}, v1), ds_pattern);
470    }
471 }
472 
473 void
emit_reduction(lower_context * ctx,aco_opcode op,ReduceOp reduce_op,unsigned cluster_size,PhysReg tmp,PhysReg stmp,PhysReg vtmp,PhysReg sitmp,Operand src,Definition dst)474 emit_reduction(lower_context* ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size,
475                PhysReg tmp, PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
476 {
477    assert(cluster_size == ctx->program->wave_size || op == aco_opcode::p_reduce);
478    assert(cluster_size <= ctx->program->wave_size);
479 
480    Builder bld(ctx->program, &ctx->instructions);
481 
482    Operand identity[2];
483    identity[0] = Operand::c32(get_reduction_identity(reduce_op, 0));
484    identity[1] = Operand::c32(get_reduction_identity(reduce_op, 1));
485    Operand vcndmask_identity[2] = {identity[0], identity[1]};
486 
487    /* First, copy the source to tmp and set inactive lanes to the identity */
488    bld.sop1(Builder::s_or_saveexec, Definition(stmp, bld.lm), Definition(scc, s1),
489             Definition(exec, bld.lm), Operand::c64(UINT64_MAX), Operand(exec, bld.lm));
490 
491    for (unsigned i = 0; i < src.size(); i++) {
492       /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
493        * except on GFX10, where v_writelane_b32 can take a literal. */
494       if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan &&
495           ctx->program->gfx_level < GFX10) {
496          bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp + i}, s1), identity[i]);
497          identity[i] = Operand(PhysReg{sitmp + i}, s1);
498 
499          bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp + i}, v1), identity[i]);
500          vcndmask_identity[i] = Operand(PhysReg{tmp + i}, v1);
501       } else if (identity[i].isLiteral()) {
502          bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp + i}, v1), identity[i]);
503          vcndmask_identity[i] = Operand(PhysReg{tmp + i}, v1);
504       }
505    }
506 
507    for (unsigned i = 0; i < src.size(); i++) {
508       bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
509                    vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
510                    Operand(stmp, bld.lm));
511    }
512 
513    if (src.regClass() == v1b) {
514       if (ctx->program->gfx_level >= GFX8 && ctx->program->gfx_level < GFX11) {
515          aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(
516             aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
517          sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
518          sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
519          bool sext = reduce_op == imin8 || reduce_op == imax8;
520          sdwa->sel[0] = SubdwordSel(1, 0, sext);
521          sdwa->dst_sel = SubdwordSel::dword;
522          bld.insert(std::move(sdwa));
523       } else {
524          aco_opcode opcode;
525 
526          if (reduce_op == imin8 || reduce_op == imax8)
527             opcode = aco_opcode::v_bfe_i32;
528          else
529             opcode = aco_opcode::v_bfe_u32;
530 
531          bld.vop3(opcode, Definition(PhysReg{tmp}, v1), Operand(PhysReg{tmp}, v1), Operand::zero(),
532                   Operand::c32(8u));
533       }
534    } else if (src.regClass() == v2b) {
535       bool is_add_cmp = reduce_op == iadd16 || reduce_op == imax16 || reduce_op == imin16 ||
536                         reduce_op == umin16 || reduce_op == umax16;
537       if (ctx->program->gfx_level >= GFX10 && ctx->program->gfx_level < GFX11 && is_add_cmp) {
538          aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(
539             aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
540          sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
541          sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
542          bool sext = reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16;
543          sdwa->sel[0] = SubdwordSel(2, 0, sext);
544          sdwa->dst_sel = SubdwordSel::dword;
545          bld.insert(std::move(sdwa));
546       } else if (ctx->program->gfx_level <= GFX7 ||
547                  (ctx->program->gfx_level >= GFX11 && is_add_cmp)) {
548          aco_opcode opcode;
549 
550          if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16)
551             opcode = aco_opcode::v_bfe_i32;
552          else
553             opcode = aco_opcode::v_bfe_u32;
554 
555          bld.vop3(opcode, Definition(PhysReg{tmp}, v1), Operand(PhysReg{tmp}, v1), Operand::zero(),
556                   Operand::c32(16u));
557       }
558    }
559 
560    bool reduction_needs_last_op = false;
561    switch (op) {
562    case aco_opcode::p_reduce:
563       if (cluster_size == 1)
564          break;
565 
566       if (ctx->program->gfx_level <= GFX7) {
567          reduction_needs_last_op = true;
568          emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
569          if (cluster_size == 2)
570             break;
571          emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
572          emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
573          if (cluster_size == 4)
574             break;
575          emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
576          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
577          if (cluster_size == 8)
578             break;
579          emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
580          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
581          if (cluster_size == 16)
582             break;
583          emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
584          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
585          if (cluster_size == 32)
586             break;
587          emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
588          for (unsigned i = 0; i < src.size(); i++)
589             bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1),
590                          Operand::zero());
591          // TODO: it would be more effective to do the last reduction step on SALU
592          emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
593          reduction_needs_last_op = false;
594          break;
595       }
596 
597       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(1, 0, 3, 2), 0xf,
598                   0xf, false);
599       if (cluster_size == 2)
600          break;
601       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(2, 3, 0, 1), 0xf,
602                   0xf, false);
603       if (cluster_size == 4)
604          break;
605       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_half_mirror, 0xf, 0xf,
606                   false);
607       if (cluster_size == 8)
608          break;
609       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_mirror, 0xf, 0xf, false);
610       if (cluster_size == 16)
611          break;
612 
613       if (ctx->program->gfx_level >= GFX10) {
614          /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
615          for (unsigned i = 0; i < src.size(); i++)
616             bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp + i}, v1),
617                      Operand(PhysReg{tmp + i}, v1), Operand::zero(), Operand::zero());
618 
619          if (cluster_size == 32) {
620             reduction_needs_last_op = true;
621             break;
622          }
623 
624          emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
625          for (unsigned i = 0; i < src.size(); i++)
626             bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1),
627                          Operand::zero());
628          // TODO: it would be more effective to do the last reduction step on SALU
629          emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
630          break;
631       }
632 
633       if (cluster_size == 32) {
634          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
635          reduction_needs_last_op = true;
636          break;
637       }
638       assert(cluster_size == 64);
639       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast15, 0xa, 0xf,
640                   false);
641       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast31, 0xc, 0xf,
642                   false);
643       break;
644    case aco_opcode::p_exclusive_scan:
645       if (ctx->program->gfx_level >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
646          /* shift rows right */
647          emit_dpp_mov(ctx, vtmp, tmp, src.size(), dpp_row_sr(1), 0xf, 0xf, true);
648 
649          /* fill in the gaps in rows 1 and 3 */
650          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0x10000u));
651          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand::c32(0x10000u));
652          for (unsigned i = 0; i < src.size(); i++) {
653             Instruction* perm =
654                bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp + i}, v1),
655                         Operand(PhysReg{tmp + i}, v1), Operand::c32(0xffffffffu),
656                         Operand::c32(0xffffffffu))
657                   .instr;
658             perm->vop3().opsel = 1; /* FI (Fetch Inactive) */
659          }
660          bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand::c64(UINT64_MAX));
661 
662          if (ctx->program->wave_size == 64) {
663             /* fill in the gap in row 2 */
664             for (unsigned i = 0; i < src.size(); i++) {
665                bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
666                             Operand::c32(31u));
667                bld.writelane(Definition(PhysReg{vtmp + i}, v1), Operand(PhysReg{sitmp + i}, s1),
668                              Operand::c32(32u), Operand(PhysReg{vtmp + i}, v1));
669             }
670          }
671          std::swap(tmp, vtmp);
672       } else if (ctx->program->gfx_level >= GFX8) {
673          emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true);
674       } else {
675          // TODO: use LDS on CS with a single write and shifted read
676          /* wavefront shift_right by 1 on SI/CI */
677          emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
678          emit_ds_swizzle(bld, tmp, tmp, src.size(),
679                          ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
680          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0x10101010u));
681          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
682          for (unsigned i = 0; i < src.size(); i++)
683             bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
684                      Operand(PhysReg{tmp + i}, v1));
685 
686          bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
687          emit_ds_swizzle(bld, tmp, tmp, src.size(),
688                          ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
689          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0x01000100u));
690          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
691          for (unsigned i = 0; i < src.size(); i++)
692             bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
693                      Operand(PhysReg{tmp + i}, v1));
694 
695          bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
696          emit_ds_swizzle(bld, tmp, tmp, src.size(),
697                          ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
698          bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand::c32(1u),
699                   Operand::c32(16u));
700          bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand::c32(1u),
701                   Operand::c32(16u));
702          for (unsigned i = 0; i < src.size(); i++)
703             bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
704                      Operand(PhysReg{tmp + i}, v1));
705 
706          bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
707          for (unsigned i = 0; i < src.size(); i++) {
708             bld.writelane(Definition(PhysReg{vtmp + i}, v1), identity[i], Operand::zero(),
709                           Operand(PhysReg{vtmp + i}, v1));
710             bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
711                          Operand::zero());
712             bld.writelane(Definition(PhysReg{vtmp + i}, v1), Operand(PhysReg{sitmp + i}, s1),
713                           Operand::c32(32u), Operand(PhysReg{vtmp + i}, v1));
714             identity[i] = Operand::zero(); /* prevent further uses of identity */
715          }
716          std::swap(tmp, vtmp);
717       }
718 
719       for (unsigned i = 0; i < src.size(); i++) {
720          if (!identity[i].isConstant() ||
721              identity[i].constantValue()) { /* bound_ctrl should take care of this overwise */
722             if (ctx->program->gfx_level < GFX10)
723                assert((identity[i].isConstant() && !identity[i].isLiteral()) ||
724                       identity[i].physReg() == PhysReg{sitmp + i});
725             bld.writelane(Definition(PhysReg{tmp + i}, v1), identity[i], Operand::zero(),
726                           Operand(PhysReg{tmp + i}, v1));
727          }
728       }
729       FALLTHROUGH;
730    case aco_opcode::p_inclusive_scan:
731       assert(cluster_size == ctx->program->wave_size);
732       if (ctx->program->gfx_level <= GFX7) {
733          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
734          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xAAAAAAAAu));
735          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
736          emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
737 
738          bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
739          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
740          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xCCCCCCCCu));
741          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
742          emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
743 
744          bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
745          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
746          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xF0F0F0F0u));
747          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
748          emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
749 
750          bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
751          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
752          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xFF00FF00u));
753          bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
754          emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
755 
756          bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
757          emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
758          bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand::c32(16u),
759                   Operand::c32(16u));
760          bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand::c32(16u),
761                   Operand::c32(16u));
762          emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
763 
764          for (unsigned i = 0; i < src.size(); i++)
765             bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
766                          Operand::c32(31u));
767          bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u),
768                   Operand::c32(32u));
769          emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
770          break;
771       }
772 
773       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(1), 0xf, 0xf, false,
774                   identity);
775       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(2), 0xf, 0xf, false,
776                   identity);
777       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(4), 0xf, 0xf, false,
778                   identity);
779       emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(8), 0xf, 0xf, false,
780                   identity);
781       if (ctx->program->gfx_level >= GFX10) {
782          bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand::c32(16u),
783                   Operand::c32(16u));
784          bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand::c32(16u),
785                   Operand::c32(16u));
786          for (unsigned i = 0; i < src.size(); i++) {
787             Instruction* perm =
788                bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp + i}, v1),
789                         Operand(PhysReg{tmp + i}, v1), Operand::c32(0xffffffffu),
790                         Operand::c32(0xffffffffu))
791                   .instr;
792             perm->vop3().opsel = 1; /* FI (Fetch Inactive) */
793          }
794          emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
795 
796          if (ctx->program->wave_size == 64) {
797             bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u),
798                      Operand::c32(32u));
799             for (unsigned i = 0; i < src.size(); i++)
800                bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
801                             Operand::c32(31u));
802             emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
803          }
804       } else {
805          emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast15, 0xa, 0xf,
806                      false, identity);
807          emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast31, 0xc, 0xf,
808                      false, identity);
809       }
810       break;
811    default: unreachable("Invalid reduction mode");
812    }
813 
814    if (op == aco_opcode::p_reduce) {
815       if (reduction_needs_last_op && dst.regClass().type() == RegType::vgpr) {
816          bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
817          emit_op(ctx, dst.physReg(), tmp, vtmp, PhysReg{0}, reduce_op, src.size());
818          return;
819       }
820 
821       if (reduction_needs_last_op)
822          emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
823    }
824 
825    /* restore exec */
826    bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
827 
828    if (dst.regClass().type() == RegType::sgpr) {
829       for (unsigned k = 0; k < src.size(); k++) {
830          bld.readlane(Definition(PhysReg{dst.physReg() + k}, s1), Operand(PhysReg{tmp + k}, v1),
831                       Operand::c32(ctx->program->wave_size - 1));
832       }
833    } else if (dst.physReg() != tmp) {
834       for (unsigned k = 0; k < src.size(); k++) {
835          bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, v1),
836                   Operand(PhysReg{tmp + k}, v1));
837       }
838    }
839 }
840 
841 void
emit_gfx10_wave64_bpermute(Program * program,aco_ptr<Instruction> & instr,Builder & bld)842 emit_gfx10_wave64_bpermute(Program* program, aco_ptr<Instruction>& instr, Builder& bld)
843 {
844    /* Emulates proper bpermute on GFX10 in wave64 mode.
845     *
846     * This is necessary because on GFX10 the bpermute instruction only works
847     * on half waves (you can think of it as having a cluster size of 32), so we
848     * manually swap the data between the two halves using two shared VGPRs.
849     */
850 
851    assert(program->gfx_level >= GFX10);
852    assert(program->wave_size == 64);
853 
854    unsigned shared_vgpr_reg_0 = align(program->config->num_vgprs, 4) + 256;
855    Definition dst = instr->definitions[0];
856    Definition tmp_exec = instr->definitions[1];
857    Definition clobber_scc = instr->definitions[2];
858    Operand index_x4 = instr->operands[0];
859    Operand input_data = instr->operands[1];
860    Operand same_half = instr->operands[2];
861 
862    assert(dst.regClass() == v1);
863    assert(tmp_exec.regClass() == bld.lm);
864    assert(clobber_scc.isFixed() && clobber_scc.physReg() == scc);
865    assert(same_half.regClass() == bld.lm);
866    assert(index_x4.regClass() == v1);
867    assert(input_data.regClass().type() == RegType::vgpr);
868    assert(input_data.bytes() <= 4);
869    assert(dst.physReg() != index_x4.physReg());
870    assert(dst.physReg() != input_data.physReg());
871    assert(tmp_exec.physReg() != same_half.physReg());
872 
873    PhysReg shared_vgpr_lo(shared_vgpr_reg_0);
874    PhysReg shared_vgpr_hi(shared_vgpr_reg_0 + 1);
875 
876    /* Permute the input within the same half-wave */
877    bld.ds(aco_opcode::ds_bpermute_b32, dst, index_x4, input_data);
878 
879    /* HI: Copy data from high lanes 32-63 to shared vgpr */
880    bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(shared_vgpr_hi, v1), input_data,
881                 dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
882    /* Save EXEC */
883    bld.sop1(aco_opcode::s_mov_b64, tmp_exec, Operand(exec, s2));
884    /* Set EXEC to enable LO lanes only */
885    bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u), Operand::zero());
886    /* LO: Copy data from low lanes 0-31 to shared vgpr */
887    bld.vop1(aco_opcode::v_mov_b32, Definition(shared_vgpr_lo, v1), input_data);
888    /* LO: bpermute shared vgpr (high lanes' data) */
889    bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_hi, v1), index_x4,
890           Operand(shared_vgpr_hi, v1));
891    /* Set EXEC to enable HI lanes only */
892    bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u), Operand::c32(32u));
893    /* HI: bpermute shared vgpr (low lanes' data) */
894    bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_lo, v1), index_x4,
895           Operand(shared_vgpr_lo, v1));
896 
897    /* Only enable lanes which use the other half's data */
898    bld.sop2(aco_opcode::s_andn2_b64, Definition(exec, s2), clobber_scc,
899             Operand(tmp_exec.physReg(), s2), same_half);
900    /* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */
901    bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_hi, v1), dpp_quad_perm(0, 1, 2, 3),
902                 0x3, 0xf, false);
903    /* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */
904    bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_lo, v1), dpp_quad_perm(0, 1, 2, 3),
905                 0xc, 0xf, false);
906 
907    /* Restore saved EXEC */
908    bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(tmp_exec.physReg(), s2));
909 
910    /* RA assumes that the result is always in the low part of the register, so we have to shift, if
911     * it's not there already */
912    if (input_data.physReg().byte()) {
913       unsigned right_shift = input_data.physReg().byte() * 8;
914       bld.vop2(aco_opcode::v_lshrrev_b32, dst, Operand::c32(right_shift),
915                Operand(dst.physReg(), v1));
916    }
917 }
918 
919 void
emit_gfx6_bpermute(Program * program,aco_ptr<Instruction> & instr,Builder & bld)920 emit_gfx6_bpermute(Program* program, aco_ptr<Instruction>& instr, Builder& bld)
921 {
922    /* Emulates bpermute using readlane instructions */
923 
924    Operand index = instr->operands[0];
925    Operand input = instr->operands[1];
926    Definition dst = instr->definitions[0];
927    Definition temp_exec = instr->definitions[1];
928    Definition clobber_vcc = instr->definitions[2];
929 
930    assert(dst.regClass() == v1);
931    assert(temp_exec.regClass() == bld.lm);
932    assert(clobber_vcc.regClass() == bld.lm);
933    assert(clobber_vcc.physReg() == vcc);
934    assert(index.regClass() == v1);
935    assert(index.physReg() != dst.physReg());
936    assert(input.regClass().type() == RegType::vgpr);
937    assert(input.bytes() <= 4);
938    assert(input.physReg() != dst.physReg());
939 
940    /* Save original EXEC */
941    bld.sop1(aco_opcode::s_mov_b64, temp_exec, Operand(exec, s2));
942 
943    /* An "unrolled loop" that is executed per each lane.
944     * This takes only a few instructions per lane, as opposed to a "real" loop
945     * with branching, where the branch instruction alone would take 16+ cycles.
946     */
947    for (unsigned n = 0; n < program->wave_size; ++n) {
948       /* Activate the lane which has N for its source index */
949       bld.vopc(aco_opcode::v_cmpx_eq_u32, Definition(exec, bld.lm), clobber_vcc, Operand::c32(n),
950                index);
951       /* Read the data from lane N */
952       bld.readlane(Definition(vcc, s1), input, Operand::c32(n));
953       /* On the active lane, move the data we read from lane N to the destination VGPR */
954       bld.vop1(aco_opcode::v_mov_b32, dst, Operand(vcc, s1));
955       /* Restore original EXEC */
956       bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(temp_exec.physReg(), s2));
957    }
958 }
959 
960 struct copy_operation {
961    Operand op;
962    Definition def;
963    unsigned bytes;
964    union {
965       uint8_t uses[8];
966       uint64_t is_used = 0;
967    };
968 };
969 
970 void
split_copy(lower_context * ctx,unsigned offset,Definition * def,Operand * op,const copy_operation & src,bool ignore_uses,unsigned max_size)971 split_copy(lower_context* ctx, unsigned offset, Definition* def, Operand* op,
972            const copy_operation& src, bool ignore_uses, unsigned max_size)
973 {
974    PhysReg def_reg = src.def.physReg();
975    PhysReg op_reg = src.op.physReg();
976    def_reg.reg_b += offset;
977    op_reg.reg_b += offset;
978 
979    /* 64-bit VGPR copies (implemented with v_lshrrev_b64) are slow before GFX10 */
980    if (ctx->program->gfx_level < GFX10 && src.def.regClass().type() == RegType::vgpr)
981       max_size = MIN2(max_size, 4);
982    unsigned max_align = src.def.regClass().type() == RegType::vgpr ? 4 : 16;
983 
984    /* make sure the size is a power of two and reg % bytes == 0 */
985    unsigned bytes = 1;
986    for (; bytes <= max_size; bytes *= 2) {
987       unsigned next = bytes * 2u;
988       bool can_increase = def_reg.reg_b % MIN2(next, max_align) == 0 &&
989                           offset + next <= src.bytes && next <= max_size;
990       if (!src.op.isConstant() && can_increase)
991          can_increase = op_reg.reg_b % MIN2(next, max_align) == 0;
992       for (unsigned i = 0; !ignore_uses && can_increase && (i < bytes); i++)
993          can_increase = (src.uses[offset + bytes + i] == 0) == (src.uses[offset] == 0);
994       if (!can_increase)
995          break;
996    }
997 
998    *def = Definition(src.def.tempId(), def_reg, src.def.regClass().resize(bytes));
999    if (src.op.isConstant()) {
1000       assert(bytes >= 1 && bytes <= 8);
1001       uint64_t val = src.op.constantValue64() >> (offset * 8u);
1002       *op = Operand::get_const(ctx->program->gfx_level, val, bytes);
1003    } else {
1004       RegClass op_cls = src.op.regClass().resize(bytes);
1005       *op = Operand(op_reg, op_cls);
1006       op->setTemp(Temp(src.op.tempId(), op_cls));
1007    }
1008 }
1009 
1010 uint32_t
get_intersection_mask(int a_start,int a_size,int b_start,int b_size)1011 get_intersection_mask(int a_start, int a_size, int b_start, int b_size)
1012 {
1013    int intersection_start = MAX2(b_start - a_start, 0);
1014    int intersection_end = MAX2(b_start + b_size - a_start, 0);
1015    if (intersection_start >= a_size || intersection_end == 0)
1016       return 0;
1017 
1018    uint32_t mask = u_bit_consecutive(0, a_size);
1019    return u_bit_consecutive(intersection_start, intersection_end - intersection_start) & mask;
1020 }
1021 
1022 /* src1 are bytes 0-3. dst/src0 are bytes 4-7. */
1023 void
create_bperm(Builder & bld,uint8_t swiz[4],Definition dst,Operand src1,Operand src0=Operand (v1))1024 create_bperm(Builder& bld, uint8_t swiz[4], Definition dst, Operand src1,
1025              Operand src0 = Operand(v1))
1026 {
1027    uint32_t swiz_packed =
1028       swiz[0] | ((uint32_t)swiz[1] << 8) | ((uint32_t)swiz[2] << 16) | ((uint32_t)swiz[3] << 24);
1029 
1030    dst = Definition(PhysReg(dst.physReg().reg()), v1);
1031    if (!src1.isConstant())
1032       src1 = Operand(PhysReg(src1.physReg().reg()), v1);
1033    if (src0.isUndefined())
1034       src0 = Operand(dst.physReg(), v1);
1035    else if (!src0.isConstant())
1036       src0 = Operand(PhysReg(src0.physReg().reg()), v1);
1037    bld.vop3(aco_opcode::v_perm_b32, dst, src0, src1, Operand::c32(swiz_packed));
1038 }
1039 
1040 void
copy_constant(lower_context * ctx,Builder & bld,Definition dst,Operand op)1041 copy_constant(lower_context* ctx, Builder& bld, Definition dst, Operand op)
1042 {
1043    assert(op.bytes() == dst.bytes());
1044 
1045    if (dst.bytes() == 4 && op.isLiteral()) {
1046       uint32_t imm = op.constantValue();
1047       if (dst.regClass() == s1 && (imm >= 0xffff8000 || imm <= 0x7fff)) {
1048          bld.sopk(aco_opcode::s_movk_i32, dst, imm & 0xFFFFu);
1049          return;
1050       } else if (util_bitreverse(imm) <= 64 || util_bitreverse(imm) >= 0xFFFFFFF0) {
1051          uint32_t rev = util_bitreverse(imm);
1052          if (dst.regClass() == s1)
1053             bld.sop1(aco_opcode::s_brev_b32, dst, Operand::c32(rev));
1054          else
1055             bld.vop1(aco_opcode::v_bfrev_b32, dst, Operand::c32(rev));
1056          return;
1057       } else if (dst.regClass() == s1 && imm != 0) {
1058          unsigned start = (ffs(imm) - 1) & 0x1f;
1059          unsigned size = util_bitcount(imm) & 0x1f;
1060          if ((((1u << size) - 1u) << start) == imm) {
1061             bld.sop2(aco_opcode::s_bfm_b32, dst, Operand::c32(size), Operand::c32(start));
1062             return;
1063          }
1064       }
1065    }
1066 
1067    if (op.bytes() == 4 && op.constantEquals(0x3e22f983) && ctx->program->gfx_level >= GFX8)
1068       op.setFixed(PhysReg{248}); /* it can be an inline constant on GFX8+ */
1069 
1070    if (dst.regClass() == s1) {
1071       bld.sop1(aco_opcode::s_mov_b32, dst, op);
1072    } else if (dst.regClass() == s2) {
1073       /* s_ashr_i64 writes SCC, so we can't use it */
1074       assert(Operand::is_constant_representable(op.constantValue64(), 8, true, false));
1075       bld.sop1(aco_opcode::s_mov_b64, dst, op);
1076    } else if (dst.regClass() == v2) {
1077       if (Operand::is_constant_representable(op.constantValue64(), 8, true, false)) {
1078          bld.vop3(aco_opcode::v_lshrrev_b64, dst, Operand::zero(), op);
1079       } else {
1080          assert(Operand::is_constant_representable(op.constantValue64(), 8, false, true));
1081          bld.vop3(aco_opcode::v_ashrrev_i64, dst, Operand::zero(), op);
1082       }
1083    } else if (dst.regClass() == v1) {
1084       bld.vop1(aco_opcode::v_mov_b32, dst, op);
1085    } else {
1086       assert(dst.regClass() == v1b || dst.regClass() == v2b);
1087 
1088       bool use_sdwa = ctx->program->gfx_level >= GFX9 && ctx->program->gfx_level < GFX11;
1089       /* We need the v_perm_b32 (VOP3) to be able to take literals, and that's a GFX10+ feature. */
1090       bool can_use_perm = ctx->program->gfx_level >= GFX10 &&
1091                           (op.constantEquals(0) || op.constantEquals(0xff) ||
1092                            op.constantEquals(0xffff) || op.constantEquals(0xff00));
1093       if (dst.regClass() == v1b && use_sdwa) {
1094          uint8_t val = op.constantValue();
1095          Operand op32 = Operand::c32((uint32_t)val | (val & 0x80u ? 0xffffff00u : 0u));
1096          if (op32.isLiteral()) {
1097             uint32_t a = (uint32_t)int8_mul_table[val * 2];
1098             uint32_t b = (uint32_t)int8_mul_table[val * 2 + 1];
1099             bld.vop2_sdwa(aco_opcode::v_mul_u32_u24, dst,
1100                           Operand::c32(a | (a & 0x80u ? 0xffffff00u : 0x0u)),
1101                           Operand::c32(b | (b & 0x80u ? 0xffffff00u : 0x0u)));
1102          } else {
1103             bld.vop1_sdwa(aco_opcode::v_mov_b32, dst, op32);
1104          }
1105       } else if (dst.regClass() == v2b && use_sdwa && !op.isLiteral()) {
1106          if (op.constantValue() >= 0xfff0 || op.constantValue() <= 64) {
1107             /* use v_mov_b32 to avoid possible issues with denormal flushing or
1108              * NaN. v_add_f16 is still needed for float constants. */
1109             uint32_t val32 = (int32_t)(int16_t)op.constantValue();
1110             bld.vop1_sdwa(aco_opcode::v_mov_b32, dst, Operand::c32(val32));
1111          } else {
1112             bld.vop2_sdwa(aco_opcode::v_add_f16, dst, op, Operand::zero());
1113          }
1114       } else if (dst.regClass() == v2b && ctx->program->gfx_level >= GFX10 &&
1115                  (ctx->block->fp_mode.denorm16_64 & fp_denorm_keep_in)) {
1116          if (dst.physReg().byte() == 2) {
1117             Operand def_lo(dst.physReg().advance(-2), v2b);
1118             Instruction* instr = bld.vop3(aco_opcode::v_pack_b32_f16, dst, def_lo, op);
1119             instr->vop3().opsel = 0;
1120          } else {
1121             assert(dst.physReg().byte() == 0);
1122             Operand def_hi(dst.physReg().advance(2), v2b);
1123             Instruction* instr = bld.vop3(aco_opcode::v_pack_b32_f16, dst, op, def_hi);
1124             instr->vop3().opsel = 2;
1125          }
1126       } else if (can_use_perm) {
1127          uint8_t swiz[] = {4, 5, 6, 7};
1128          swiz[dst.physReg().byte()] = op.constantValue() & 0xff ? bperm_255 : bperm_0;
1129          if (dst.bytes() == 2)
1130             swiz[dst.physReg().byte() + 1] = op.constantValue() >> 8 ? bperm_255 : bperm_0;
1131          create_bperm(bld, swiz, dst, Operand::zero());
1132       } else {
1133          uint32_t offset = dst.physReg().byte() * 8u;
1134          uint32_t mask = ((1u << (dst.bytes() * 8)) - 1) << offset;
1135          uint32_t val = (op.constantValue() << offset) & mask;
1136          dst = Definition(PhysReg(dst.physReg().reg()), v1);
1137          Operand def_op(dst.physReg(), v1);
1138          if (val != mask)
1139             bld.vop2(aco_opcode::v_and_b32, dst, Operand::c32(~mask), def_op);
1140          if (val != 0)
1141             bld.vop2(aco_opcode::v_or_b32, dst, Operand::c32(val), def_op);
1142       }
1143    }
1144 }
1145 
1146 void
copy_linear_vgpr(Builder & bld,Definition def,Operand op,bool preserve_scc,PhysReg scratch_sgpr)1147 copy_linear_vgpr(Builder& bld, Definition def, Operand op, bool preserve_scc, PhysReg scratch_sgpr)
1148 {
1149    if (preserve_scc)
1150       bld.sop1(aco_opcode::s_mov_b32, Definition(scratch_sgpr, s1), Operand(scc, s1));
1151 
1152    for (unsigned i = 0; i < 2; i++) {
1153       if (def.size() == 2)
1154          bld.vop3(aco_opcode::v_lshrrev_b64, def, Operand::zero(), op);
1155       else
1156          bld.vop1(aco_opcode::v_mov_b32, def, op);
1157 
1158       bld.sop1(Builder::s_not, Definition(exec, bld.lm), Definition(scc, s1),
1159                Operand(exec, bld.lm));
1160    }
1161 
1162    if (preserve_scc)
1163       bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(scratch_sgpr, s1),
1164                Operand::zero());
1165 }
1166 
1167 void
swap_linear_vgpr(Builder & bld,Definition def,Operand op,bool preserve_scc,PhysReg scratch_sgpr)1168 swap_linear_vgpr(Builder& bld, Definition def, Operand op, bool preserve_scc, PhysReg scratch_sgpr)
1169 {
1170    if (preserve_scc)
1171       bld.sop1(aco_opcode::s_mov_b32, Definition(scratch_sgpr, s1), Operand(scc, s1));
1172 
1173    Operand def_as_op = Operand(def.physReg(), def.regClass());
1174    Definition op_as_def = Definition(op.physReg(), op.regClass());
1175 
1176    for (unsigned i = 0; i < 2; i++) {
1177       if (bld.program->gfx_level >= GFX9) {
1178          bld.vop1(aco_opcode::v_swap_b32, def, op_as_def, op, def_as_op);
1179       } else {
1180          bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1181          bld.vop2(aco_opcode::v_xor_b32, def, op, def_as_op);
1182          bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1183       }
1184 
1185       bld.sop1(Builder::s_not, Definition(exec, bld.lm), Definition(scc, s1),
1186                Operand(exec, bld.lm));
1187    }
1188 
1189    if (preserve_scc)
1190       bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(scratch_sgpr, s1),
1191                Operand::zero());
1192 }
1193 
1194 void
addsub_subdword_gfx11(Builder & bld,Definition dst,Operand src0,Operand src1,bool sub)1195 addsub_subdword_gfx11(Builder& bld, Definition dst, Operand src0, Operand src1, bool sub)
1196 {
1197    Instruction* instr =
1198       bld.vop3(sub ? aco_opcode::v_sub_u16_e64 : aco_opcode::v_add_u16_e64, dst, src0, src1).instr;
1199    if (src0.physReg().byte() == 2)
1200       instr->vop3().opsel |= 0x1;
1201    if (src1.physReg().byte() == 2)
1202       instr->vop3().opsel |= 0x2;
1203    if (dst.physReg().byte() == 2)
1204       instr->vop3().opsel |= 0x8;
1205 }
1206 
1207 bool
do_copy(lower_context * ctx,Builder & bld,const copy_operation & copy,bool * preserve_scc,PhysReg scratch_sgpr)1208 do_copy(lower_context* ctx, Builder& bld, const copy_operation& copy, bool* preserve_scc,
1209         PhysReg scratch_sgpr)
1210 {
1211    bool did_copy = false;
1212    for (unsigned offset = 0; offset < copy.bytes;) {
1213       if (copy.uses[offset]) {
1214          offset++;
1215          continue;
1216       }
1217 
1218       Definition def;
1219       Operand op;
1220       split_copy(ctx, offset, &def, &op, copy, false, 8);
1221 
1222       if (def.physReg() == scc) {
1223          bld.sopc(aco_opcode::s_cmp_lg_i32, def, op, Operand::zero());
1224          *preserve_scc = true;
1225       } else if (op.isConstant()) {
1226          copy_constant(ctx, bld, def, op);
1227       } else if (def.regClass().is_linear_vgpr()) {
1228          copy_linear_vgpr(bld, def, op, *preserve_scc, scratch_sgpr);
1229       } else if (def.regClass() == v1) {
1230          bld.vop1(aco_opcode::v_mov_b32, def, op);
1231       } else if (def.regClass() == v2) {
1232          bld.vop3(aco_opcode::v_lshrrev_b64, def, Operand::zero(), op);
1233       } else if (def.regClass() == s1) {
1234          bld.sop1(aco_opcode::s_mov_b32, def, op);
1235       } else if (def.regClass() == s2) {
1236          bld.sop1(aco_opcode::s_mov_b64, def, op);
1237       } else if (def.regClass().is_subdword() && ctx->program->gfx_level < GFX8) {
1238          if (op.physReg().byte()) {
1239             assert(def.physReg().byte() == 0);
1240             bld.vop2(aco_opcode::v_lshrrev_b32, def, Operand::c32(op.physReg().byte() * 8), op);
1241          } else if (def.physReg().byte()) {
1242             assert(op.physReg().byte() == 0);
1243             /* preserve the target's lower half */
1244             uint32_t bits = def.physReg().byte() * 8;
1245             PhysReg lo_reg = PhysReg(def.physReg().reg());
1246             Definition lo_half =
1247                Definition(lo_reg, RegClass::get(RegType::vgpr, def.physReg().byte()));
1248             Definition dst =
1249                Definition(lo_reg, RegClass::get(RegType::vgpr, lo_half.bytes() + op.bytes()));
1250 
1251             if (def.physReg().reg() == op.physReg().reg()) {
1252                bld.vop2(aco_opcode::v_and_b32, lo_half, Operand::c32((1 << bits) - 1u),
1253                         Operand(lo_reg, lo_half.regClass()));
1254                if (def.physReg().byte() == 1) {
1255                   bld.vop2(aco_opcode::v_mul_u32_u24, dst, Operand::c32((1 << bits) + 1u), op);
1256                } else if (def.physReg().byte() == 2) {
1257                   bld.vop2(aco_opcode::v_cvt_pk_u16_u32, dst, Operand(lo_reg, v2b), op);
1258                } else if (def.physReg().byte() == 3) {
1259                   bld.sop1(aco_opcode::s_mov_b32, Definition(scratch_sgpr, s1),
1260                            Operand::c32((1 << bits) + 1u));
1261                   bld.vop3(aco_opcode::v_mul_lo_u32, dst, Operand(scratch_sgpr, s1), op);
1262                }
1263             } else {
1264                lo_half.setFixed(lo_half.physReg().advance(4 - def.physReg().byte()));
1265                bld.vop2(aco_opcode::v_lshlrev_b32, lo_half, Operand::c32(32 - bits),
1266                         Operand(lo_reg, lo_half.regClass()));
1267                bld.vop3(aco_opcode::v_alignbyte_b32, dst, op,
1268                         Operand(lo_half.physReg(), lo_half.regClass()),
1269                         Operand::c32(4 - def.physReg().byte()));
1270             }
1271          } else {
1272             bld.vop1(aco_opcode::v_mov_b32, def, op);
1273          }
1274       } else if (def.regClass() == v1b && ctx->program->gfx_level >= GFX11) {
1275          uint8_t swiz[] = {4, 5, 6, 7};
1276          swiz[def.physReg().byte()] = op.physReg().byte();
1277          create_bperm(bld, swiz, def, op);
1278       } else if (def.regClass() == v2b && ctx->program->gfx_level >= GFX11) {
1279          addsub_subdword_gfx11(bld, def, op, Operand::zero(), false);
1280       } else if (def.regClass().is_subdword()) {
1281          bld.vop1_sdwa(aco_opcode::v_mov_b32, def, op);
1282       } else {
1283          unreachable("unsupported copy");
1284       }
1285 
1286       did_copy = true;
1287       offset += def.bytes();
1288    }
1289    return did_copy;
1290 }
1291 
1292 void
swap_subdword_gfx11(Builder & bld,Definition def,Operand op)1293 swap_subdword_gfx11(Builder& bld, Definition def, Operand op)
1294 {
1295    if (def.physReg().reg() == op.physReg().reg()) {
1296       assert(def.bytes() != 2); /* handled by caller */
1297       uint8_t swiz[] = {4, 5, 6, 7};
1298       std::swap(swiz[def.physReg().byte()], swiz[op.physReg().byte()]);
1299       create_bperm(bld, swiz, def, Operand::zero());
1300       return;
1301    }
1302 
1303    if (def.bytes() == 2) {
1304       Operand def_as_op = Operand(def.physReg(), def.regClass());
1305       Definition op_as_def = Definition(op.physReg(), op.regClass());
1306       addsub_subdword_gfx11(bld, def, def_as_op, op, false);
1307       addsub_subdword_gfx11(bld, op_as_def, def_as_op, op, true);
1308       addsub_subdword_gfx11(bld, def, def_as_op, op, true);
1309    } else {
1310       PhysReg op_half = op.physReg();
1311       op_half.reg_b &= ~1;
1312 
1313       PhysReg def_other_half = def.physReg();
1314       def_other_half.reg_b &= ~1;
1315       def_other_half.reg_b ^= 2;
1316 
1317       /* We can only swap individual bytes within a single VGPR, so temporarily move both bytes
1318        * into the same VGPR.
1319        */
1320       swap_subdword_gfx11(bld, Definition(def_other_half, v2b), Operand(op_half, v2b));
1321       swap_subdword_gfx11(bld, def, Operand(def_other_half.advance(op.physReg().byte() & 1), v1b));
1322       swap_subdword_gfx11(bld, Definition(def_other_half, v2b), Operand(op_half, v2b));
1323    }
1324 }
1325 
1326 void
do_swap(lower_context * ctx,Builder & bld,const copy_operation & copy,bool preserve_scc,Pseudo_instruction * pi)1327 do_swap(lower_context* ctx, Builder& bld, const copy_operation& copy, bool preserve_scc,
1328         Pseudo_instruction* pi)
1329 {
1330    unsigned offset = 0;
1331 
1332    if (copy.bytes == 3 && (copy.def.physReg().reg_b % 4 <= 1) &&
1333        (copy.def.physReg().reg_b % 4) == (copy.op.physReg().reg_b % 4)) {
1334       /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte
1335        * swap */
1336       PhysReg op = copy.op.physReg();
1337       PhysReg def = copy.def.physReg();
1338       op.reg_b &= ~0x3;
1339       def.reg_b &= ~0x3;
1340 
1341       copy_operation tmp;
1342       tmp.op = Operand(op, v1);
1343       tmp.def = Definition(def, v1);
1344       tmp.bytes = 4;
1345       memset(tmp.uses, 1, 4);
1346       do_swap(ctx, bld, tmp, preserve_scc, pi);
1347 
1348       op.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1349       def.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1350       tmp.op = Operand(op, v1b);
1351       tmp.def = Definition(def, v1b);
1352       tmp.bytes = 1;
1353       tmp.uses[0] = 1;
1354       do_swap(ctx, bld, tmp, preserve_scc, pi);
1355 
1356       offset = copy.bytes;
1357    }
1358 
1359    for (; offset < copy.bytes;) {
1360       Definition def;
1361       Operand op;
1362       unsigned max_size = copy.def.regClass().type() == RegType::vgpr ? 4 : 8;
1363       split_copy(ctx, offset, &def, &op, copy, true, max_size);
1364 
1365       assert(op.regClass() == def.regClass());
1366       Operand def_as_op = Operand(def.physReg(), def.regClass());
1367       Definition op_as_def = Definition(op.physReg(), op.regClass());
1368       if (def.regClass().is_linear_vgpr()) {
1369          swap_linear_vgpr(bld, def, op, preserve_scc, pi->scratch_sgpr);
1370       } else if (ctx->program->gfx_level >= GFX9 && def.regClass() == v1) {
1371          bld.vop1(aco_opcode::v_swap_b32, def, op_as_def, op, def_as_op);
1372       } else if (def.regClass() == v1) {
1373          assert(def.physReg().byte() == 0 && op.physReg().byte() == 0);
1374          bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1375          bld.vop2(aco_opcode::v_xor_b32, def, op, def_as_op);
1376          bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1377       } else if (op.physReg() == scc || def.physReg() == scc) {
1378          /* we need to swap scc and another sgpr */
1379          assert(!preserve_scc);
1380 
1381          PhysReg other = op.physReg() == scc ? def.physReg() : op.physReg();
1382 
1383          bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1384          bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1),
1385                   Operand::zero());
1386          bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
1387       } else if (def.regClass() == s1) {
1388          if (preserve_scc) {
1389             bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), op);
1390             bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
1391             bld.sop1(aco_opcode::s_mov_b32, def, Operand(pi->scratch_sgpr, s1));
1392          } else {
1393             bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1394             bld.sop2(aco_opcode::s_xor_b32, def, Definition(scc, s1), op, def_as_op);
1395             bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1396          }
1397       } else if (def.regClass() == s2) {
1398          if (preserve_scc)
1399             bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1400          bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1401          bld.sop2(aco_opcode::s_xor_b64, def, Definition(scc, s1), op, def_as_op);
1402          bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1403          if (preserve_scc)
1404             bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(pi->scratch_sgpr, s1),
1405                      Operand::zero());
1406       } else if (def.bytes() == 2 && def.physReg().reg() == op.physReg().reg()) {
1407          bld.vop3(aco_opcode::v_alignbyte_b32, Definition(def.physReg(), v1), def_as_op, op,
1408                   Operand::c32(2u));
1409       } else {
1410          assert(def.regClass().is_subdword());
1411          if (ctx->program->gfx_level >= GFX11) {
1412             swap_subdword_gfx11(bld, def, op);
1413          } else {
1414             bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1415             bld.vop2_sdwa(aco_opcode::v_xor_b32, def, op, def_as_op);
1416             bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1417          }
1418       }
1419 
1420       offset += def.bytes();
1421    }
1422 
1423    if (ctx->program->gfx_level <= GFX7)
1424       return;
1425 
1426    /* fixup in case we swapped bytes we shouldn't have */
1427    copy_operation tmp_copy = copy;
1428    tmp_copy.op.setFixed(copy.def.physReg());
1429    tmp_copy.def.setFixed(copy.op.physReg());
1430    do_copy(ctx, bld, tmp_copy, &preserve_scc, pi->scratch_sgpr);
1431 }
1432 
1433 void
do_pack_2x16(lower_context * ctx,Builder & bld,Definition def,Operand lo,Operand hi)1434 do_pack_2x16(lower_context* ctx, Builder& bld, Definition def, Operand lo, Operand hi)
1435 {
1436    if (lo.isConstant() && hi.isConstant()) {
1437       copy_constant(ctx, bld, def, Operand::c32(lo.constantValue() | (hi.constantValue() << 16)));
1438       return;
1439    }
1440 
1441    bool can_use_pack = (ctx->block->fp_mode.denorm16_64 & fp_denorm_keep_in) &&
1442                        (ctx->program->gfx_level >= GFX10 ||
1443                         (ctx->program->gfx_level >= GFX9 && !lo.isLiteral() && !hi.isLiteral()));
1444 
1445    if (can_use_pack) {
1446       Instruction* instr = bld.vop3(aco_opcode::v_pack_b32_f16, def, lo, hi);
1447       /* opsel: 0 = select low half, 1 = select high half. [0] = src0, [1] = src1 */
1448       instr->vop3().opsel = hi.physReg().byte() | (lo.physReg().byte() >> 1);
1449       return;
1450    }
1451 
1452    /* a single alignbyte can be sufficient: hi can be a 32-bit integer constant */
1453    if (lo.physReg().byte() == 2 && hi.physReg().byte() == 0 &&
1454        (!hi.isConstant() || !Operand::c32(hi.constantValue()).isLiteral() ||
1455         ctx->program->gfx_level >= GFX10)) {
1456       if (hi.isConstant())
1457          bld.vop3(aco_opcode::v_alignbyte_b32, def, Operand::c32(hi.constantValue()), lo,
1458                   Operand::c32(2u));
1459       else
1460          bld.vop3(aco_opcode::v_alignbyte_b32, def, hi, lo, Operand::c32(2u));
1461       return;
1462    }
1463 
1464    Definition def_lo = Definition(def.physReg(), v2b);
1465    Definition def_hi = Definition(def.physReg().advance(2), v2b);
1466 
1467    if (lo.isConstant()) {
1468       /* move hi and zero low bits */
1469       if (hi.physReg().byte() == 0)
1470          bld.vop2(aco_opcode::v_lshlrev_b32, def_hi, Operand::c32(16u), hi);
1471       else
1472          bld.vop2(aco_opcode::v_and_b32, def_hi, Operand::c32(~0xFFFFu), hi);
1473       bld.vop2(aco_opcode::v_or_b32, def, Operand::c32(lo.constantValue()),
1474                Operand(def.physReg(), v1));
1475       return;
1476    }
1477    if (hi.isConstant()) {
1478       /* move lo and zero high bits */
1479       if (lo.physReg().byte() == 2)
1480          bld.vop2(aco_opcode::v_lshrrev_b32, def_lo, Operand::c32(16u), lo);
1481       else
1482          bld.vop2(aco_opcode::v_and_b32, def_lo, Operand::c32(0xFFFFu), lo);
1483       bld.vop2(aco_opcode::v_or_b32, def, Operand::c32(hi.constantValue() << 16u),
1484                Operand(def.physReg(), v1));
1485       return;
1486    }
1487 
1488    if (lo.physReg().reg() == def.physReg().reg()) {
1489       /* lo is in the high bits of def */
1490       assert(lo.physReg().byte() == 2);
1491       bld.vop2(aco_opcode::v_lshrrev_b32, def_lo, Operand::c32(16u), lo);
1492       lo.setFixed(def.physReg());
1493    } else if (hi.physReg() == def.physReg()) {
1494       /* hi is in the low bits of def */
1495       assert(hi.physReg().byte() == 0);
1496       bld.vop2(aco_opcode::v_lshlrev_b32, def_hi, Operand::c32(16u), hi);
1497       hi.setFixed(def.physReg().advance(2));
1498    } else if (ctx->program->gfx_level >= GFX8) {
1499       /* Either lo or hi can be placed with just a v_mov. SDWA is not needed, because
1500        * op.physReg().byte()==def.physReg().byte() and the other half will be overwritten.
1501        */
1502       assert(lo.physReg().byte() == 0 || hi.physReg().byte() == 2);
1503       Operand& op = lo.physReg().byte() == 0 ? lo : hi;
1504       PhysReg reg = def.physReg().advance(op.physReg().byte());
1505       bld.vop1(aco_opcode::v_mov_b32, Definition(reg, v2b), op);
1506       op.setFixed(reg);
1507    }
1508 
1509    /* either hi or lo are already placed correctly */
1510    if (ctx->program->gfx_level >= GFX11) {
1511       if (lo.physReg().reg() == def.physReg().reg())
1512          addsub_subdword_gfx11(bld, def_hi, hi, Operand::zero(), false);
1513       else
1514          addsub_subdword_gfx11(bld, def_lo, lo, Operand::zero(), false);
1515       return;
1516    } else if (ctx->program->gfx_level >= GFX8) {
1517       if (lo.physReg().reg() == def.physReg().reg())
1518          bld.vop1_sdwa(aco_opcode::v_mov_b32, def_hi, hi);
1519       else
1520          bld.vop1_sdwa(aco_opcode::v_mov_b32, def_lo, lo);
1521       return;
1522    }
1523 
1524    /* alignbyte needs the operands in the following way:
1525     * | xx hi | lo xx | >> 2 byte */
1526    if (lo.physReg().byte() != hi.physReg().byte()) {
1527       /* | xx lo | hi xx | => | lo hi | lo hi | */
1528       assert(lo.physReg().byte() == 0 && hi.physReg().byte() == 2);
1529       bld.vop3(aco_opcode::v_alignbyte_b32, def, lo, hi, Operand::c32(2u));
1530       lo = Operand(def_hi.physReg(), v2b);
1531       hi = Operand(def_lo.physReg(), v2b);
1532    } else if (lo.physReg().byte() == 0) {
1533       /* | xx hi | xx lo | => | xx hi | lo 00 | */
1534       bld.vop2(aco_opcode::v_lshlrev_b32, def_hi, Operand::c32(16u), lo);
1535       lo = Operand(def_hi.physReg(), v2b);
1536    } else {
1537       /* | hi xx | lo xx | => | 00 hi | lo xx | */
1538       assert(hi.physReg().byte() == 2);
1539       bld.vop2(aco_opcode::v_lshrrev_b32, def_lo, Operand::c32(16u), hi);
1540       hi = Operand(def_lo.physReg(), v2b);
1541    }
1542    /* perform the alignbyte */
1543    bld.vop3(aco_opcode::v_alignbyte_b32, def, hi, lo, Operand::c32(2u));
1544 }
1545 
1546 void
try_coalesce_copies(lower_context * ctx,std::map<PhysReg,copy_operation> & copy_map,copy_operation & copy)1547 try_coalesce_copies(lower_context* ctx, std::map<PhysReg, copy_operation>& copy_map,
1548                     copy_operation& copy)
1549 {
1550    // TODO try more relaxed alignment for subdword copies
1551    unsigned next_def_align = util_next_power_of_two(copy.bytes + 1);
1552    unsigned next_op_align = next_def_align;
1553    if (copy.def.regClass().type() == RegType::vgpr)
1554       next_def_align = MIN2(next_def_align, 4);
1555    if (copy.op.regClass().type() == RegType::vgpr)
1556       next_op_align = MIN2(next_op_align, 4);
1557 
1558    if (copy.bytes >= 8 || copy.def.physReg().reg_b % next_def_align ||
1559        (!copy.op.isConstant() && copy.op.physReg().reg_b % next_op_align))
1560       return;
1561 
1562    auto other = copy_map.find(copy.def.physReg().advance(copy.bytes));
1563    if (other == copy_map.end() || copy.bytes + other->second.bytes > 8 ||
1564        copy.op.isConstant() != other->second.op.isConstant())
1565       return;
1566 
1567    /* don't create 64-bit copies before GFX10 */
1568    if (copy.bytes >= 4 && copy.def.regClass().type() == RegType::vgpr &&
1569        ctx->program->gfx_level < GFX10)
1570       return;
1571 
1572    unsigned new_size = copy.bytes + other->second.bytes;
1573    if (copy.op.isConstant()) {
1574       uint64_t val =
1575          copy.op.constantValue64() | (other->second.op.constantValue64() << (copy.bytes * 8u));
1576       if (!util_is_power_of_two_or_zero(new_size))
1577          return;
1578       if (!Operand::is_constant_representable(val, new_size, true,
1579                                               copy.def.regClass().type() == RegType::vgpr))
1580          return;
1581       copy.op = Operand::get_const(ctx->program->gfx_level, val, new_size);
1582    } else {
1583       if (other->second.op.physReg() != copy.op.physReg().advance(copy.bytes))
1584          return;
1585       copy.op = Operand(copy.op.physReg(), copy.op.regClass().resize(new_size));
1586    }
1587 
1588    copy.bytes = new_size;
1589    copy.def = Definition(copy.def.physReg(), copy.def.regClass().resize(copy.bytes));
1590    copy_map.erase(other);
1591 }
1592 
1593 void
handle_operands(std::map<PhysReg,copy_operation> & copy_map,lower_context * ctx,amd_gfx_level gfx_level,Pseudo_instruction * pi)1594 handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx,
1595                 amd_gfx_level gfx_level, Pseudo_instruction* pi)
1596 {
1597    Builder bld(ctx->program, &ctx->instructions);
1598    unsigned num_instructions_before = ctx->instructions.size();
1599    aco_ptr<Instruction> mov;
1600    bool writes_scc = false;
1601 
1602    /* count the number of uses for each dst reg */
1603    for (auto it = copy_map.begin(); it != copy_map.end();) {
1604 
1605       if (it->second.def.physReg() == scc)
1606          writes_scc = true;
1607 
1608       assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
1609 
1610       /* if src and dst reg are the same, remove operation */
1611       if (it->first == it->second.op.physReg()) {
1612          it = copy_map.erase(it);
1613          continue;
1614       }
1615 
1616       /* split large copies */
1617       if (it->second.bytes > 8) {
1618          assert(!it->second.op.isConstant());
1619          assert(!it->second.def.regClass().is_subdword());
1620          RegClass rc = RegClass(it->second.def.regClass().type(), it->second.def.size() - 2);
1621          Definition hi_def = Definition(PhysReg{it->first + 2}, rc);
1622          rc = RegClass(it->second.op.regClass().type(), it->second.op.size() - 2);
1623          Operand hi_op = Operand(PhysReg{it->second.op.physReg() + 2}, rc);
1624          copy_operation copy = {hi_op, hi_def, it->second.bytes - 8};
1625          copy_map[hi_def.physReg()] = copy;
1626          assert(it->second.op.physReg().byte() == 0 && it->second.def.physReg().byte() == 0);
1627          it->second.op = Operand(it->second.op.physReg(),
1628                                  it->second.op.regClass().type() == RegType::sgpr ? s2 : v2);
1629          it->second.def = Definition(it->second.def.physReg(),
1630                                      it->second.def.regClass().type() == RegType::sgpr ? s2 : v2);
1631          it->second.bytes = 8;
1632       }
1633 
1634       try_coalesce_copies(ctx, copy_map, it->second);
1635 
1636       /* check if the definition reg is used by another copy operation */
1637       for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1638          if (copy.second.op.isConstant())
1639             continue;
1640          for (uint16_t i = 0; i < it->second.bytes; i++) {
1641             /* distance might underflow */
1642             unsigned distance = it->first.reg_b + i - copy.second.op.physReg().reg_b;
1643             if (distance < copy.second.bytes)
1644                it->second.uses[i] += 1;
1645          }
1646       }
1647 
1648       ++it;
1649    }
1650 
1651    /* first, handle paths in the location transfer graph */
1652    bool preserve_scc = pi->tmp_in_scc && !writes_scc;
1653    bool skip_partial_copies = true;
1654    for (auto it = copy_map.begin();;) {
1655       if (copy_map.empty()) {
1656          ctx->program->statistics[statistic_copies] +=
1657             ctx->instructions.size() - num_instructions_before;
1658          return;
1659       }
1660       if (it == copy_map.end()) {
1661          if (!skip_partial_copies)
1662             break;
1663          skip_partial_copies = false;
1664          it = copy_map.begin();
1665       }
1666 
1667       /* check if we can pack one register at once */
1668       if (it->first.byte() == 0 && it->second.bytes == 2) {
1669          PhysReg reg_hi = it->first.advance(2);
1670          std::map<PhysReg, copy_operation>::iterator other = copy_map.find(reg_hi);
1671          if (other != copy_map.end() && other->second.bytes == 2) {
1672             /* check if the target register is otherwise unused */
1673             bool unused_lo = !it->second.is_used || (it->second.is_used == 0x0101 &&
1674                                                      other->second.op.physReg() == it->first);
1675             bool unused_hi = !other->second.is_used ||
1676                              (other->second.is_used == 0x0101 && it->second.op.physReg() == reg_hi);
1677             if (unused_lo && unused_hi) {
1678                Operand lo = it->second.op;
1679                Operand hi = other->second.op;
1680                do_pack_2x16(ctx, bld, Definition(it->first, v1), lo, hi);
1681                copy_map.erase(it);
1682                copy_map.erase(other);
1683 
1684                for (std::pair<const PhysReg, copy_operation>& other2 : copy_map) {
1685                   for (uint16_t i = 0; i < other2.second.bytes; i++) {
1686                      /* distance might underflow */
1687                      unsigned distance_lo = other2.first.reg_b + i - lo.physReg().reg_b;
1688                      unsigned distance_hi = other2.first.reg_b + i - hi.physReg().reg_b;
1689                      if (distance_lo < 2 || distance_hi < 2)
1690                         other2.second.uses[i] -= 1;
1691                   }
1692                }
1693                it = copy_map.begin();
1694                continue;
1695             }
1696          }
1697       }
1698 
1699       /* on GFX6/7, we need some small workarounds as there is no
1700        * SDWA instruction to do partial register writes */
1701       if (ctx->program->gfx_level < GFX8 && it->second.bytes < 4) {
1702          if (it->first.byte() == 0 && it->second.op.physReg().byte() == 0 && !it->second.is_used &&
1703              pi->opcode == aco_opcode::p_split_vector) {
1704             /* Other operations might overwrite the high bits, so change all users
1705              * of the high bits to the new target where they are still available.
1706              * This mechanism depends on also emitting dead definitions. */
1707             PhysReg reg_hi = it->second.op.physReg().advance(it->second.bytes);
1708             while (reg_hi != PhysReg(it->second.op.physReg().reg() + 1)) {
1709                std::map<PhysReg, copy_operation>::iterator other = copy_map.begin();
1710                for (other = copy_map.begin(); other != copy_map.end(); other++) {
1711                   /* on GFX6/7, if the high bits are used as operand, they cannot be a target */
1712                   if (other->second.op.physReg() == reg_hi) {
1713                      other->second.op.setFixed(it->first.advance(reg_hi.byte()));
1714                      break; /* break because an operand can only be used once */
1715                   }
1716                }
1717                reg_hi = reg_hi.advance(it->second.bytes);
1718             }
1719          } else if (it->first.byte()) {
1720             assert(pi->opcode == aco_opcode::p_create_vector);
1721             /* on GFX6/7, if we target an upper half where the lower half hasn't yet been handled,
1722              * move to the target operand's high bits. This is save to do as it cannot be an operand
1723              */
1724             PhysReg lo = PhysReg(it->first.reg());
1725             std::map<PhysReg, copy_operation>::iterator other = copy_map.find(lo);
1726             if (other != copy_map.end()) {
1727                assert(other->second.bytes == it->first.byte());
1728                PhysReg new_reg_hi = other->second.op.physReg().advance(it->first.byte());
1729                it->second.def = Definition(new_reg_hi, it->second.def.regClass());
1730                it->second.is_used = 0;
1731                other->second.bytes += it->second.bytes;
1732                other->second.def.setTemp(Temp(other->second.def.tempId(),
1733                                               RegClass::get(RegType::vgpr, other->second.bytes)));
1734                other->second.op.setTemp(Temp(other->second.op.tempId(),
1735                                              RegClass::get(RegType::vgpr, other->second.bytes)));
1736                /* if the new target's high bits are also a target, change uses */
1737                std::map<PhysReg, copy_operation>::iterator target = copy_map.find(new_reg_hi);
1738                if (target != copy_map.end()) {
1739                   for (unsigned i = 0; i < it->second.bytes; i++)
1740                      target->second.uses[i]++;
1741                }
1742             }
1743          }
1744       }
1745 
1746       /* find portions where the target reg is not used as operand for any other copy */
1747       if (it->second.is_used) {
1748          if (it->second.op.isConstant() || skip_partial_copies) {
1749             /* we have to skip constants until is_used=0.
1750              * we also skip partial copies at the beginning to help coalescing */
1751             ++it;
1752             continue;
1753          }
1754 
1755          unsigned has_zero_use_bytes = 0;
1756          for (unsigned i = 0; i < it->second.bytes; i++)
1757             has_zero_use_bytes |= (it->second.uses[i] == 0) << i;
1758 
1759          if (has_zero_use_bytes) {
1760             /* Skipping partial copying and doing a v_swap_b32 and then fixup
1761              * copies is usually beneficial for sub-dword copies, but if doing
1762              * a partial copy allows further copies, it should be done instead. */
1763             bool partial_copy = (has_zero_use_bytes == 0xf) || (has_zero_use_bytes == 0xf0);
1764             for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1765                /* on GFX6/7, we can only do copies with full registers */
1766                if (partial_copy || ctx->program->gfx_level <= GFX7)
1767                   break;
1768                for (uint16_t i = 0; i < copy.second.bytes; i++) {
1769                   /* distance might underflow */
1770                   unsigned distance = copy.first.reg_b + i - it->second.op.physReg().reg_b;
1771                   if (distance < it->second.bytes && copy.second.uses[i] == 1 &&
1772                       !it->second.uses[distance])
1773                      partial_copy = true;
1774                }
1775             }
1776 
1777             if (!partial_copy) {
1778                ++it;
1779                continue;
1780             }
1781          } else {
1782             /* full target reg is used: register swapping needed */
1783             ++it;
1784             continue;
1785          }
1786       }
1787 
1788       bool did_copy = do_copy(ctx, bld, it->second, &preserve_scc, pi->scratch_sgpr);
1789       skip_partial_copies = did_copy;
1790       std::pair<PhysReg, copy_operation> copy = *it;
1791 
1792       if (it->second.is_used == 0) {
1793          /* the target reg is not used as operand for any other copy, so we
1794           * copied to all of it */
1795          copy_map.erase(it);
1796          it = copy_map.begin();
1797       } else {
1798          /* we only performed some portions of this copy, so split it to only
1799           * leave the portions that still need to be done */
1800          copy_operation original = it->second; /* the map insertion below can overwrite this */
1801          copy_map.erase(it);
1802          for (unsigned offset = 0; offset < original.bytes;) {
1803             if (original.uses[offset] == 0) {
1804                offset++;
1805                continue;
1806             }
1807             Definition def;
1808             Operand op;
1809             split_copy(ctx, offset, &def, &op, original, false, 8);
1810 
1811             copy_operation new_copy = {op, def, def.bytes()};
1812             for (unsigned i = 0; i < new_copy.bytes; i++)
1813                new_copy.uses[i] = original.uses[i + offset];
1814             copy_map[def.physReg()] = new_copy;
1815 
1816             offset += def.bytes();
1817          }
1818 
1819          it = copy_map.begin();
1820       }
1821 
1822       /* Reduce the number of uses of the operand reg by one. Do this after
1823        * splitting the copy or removing it in case the copy writes to it's own
1824        * operand (for example, v[7:8] = v[8:9]) */
1825       if (did_copy && !copy.second.op.isConstant()) {
1826          for (std::pair<const PhysReg, copy_operation>& other : copy_map) {
1827             for (uint16_t i = 0; i < other.second.bytes; i++) {
1828                /* distance might underflow */
1829                unsigned distance = other.first.reg_b + i - copy.second.op.physReg().reg_b;
1830                if (distance < copy.second.bytes && !copy.second.uses[distance])
1831                   other.second.uses[i] -= 1;
1832             }
1833          }
1834       }
1835    }
1836 
1837    /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1838    unsigned largest = 0;
1839    for (const std::pair<const PhysReg, copy_operation>& op : copy_map)
1840       largest = MAX2(largest, op.second.bytes);
1841 
1842    while (!copy_map.empty()) {
1843 
1844       /* Perform larger swaps first, because larger swaps swaps can make other
1845        * swaps unnecessary. */
1846       auto it = copy_map.begin();
1847       for (auto it2 = copy_map.begin(); it2 != copy_map.end(); ++it2) {
1848          if (it2->second.bytes > it->second.bytes) {
1849             it = it2;
1850             if (it->second.bytes == largest)
1851                break;
1852          }
1853       }
1854 
1855       /* should already be done */
1856       assert(!it->second.op.isConstant());
1857 
1858       assert(it->second.op.isFixed());
1859       assert(it->second.def.regClass() == it->second.op.regClass());
1860 
1861       if (it->first == it->second.op.physReg()) {
1862          copy_map.erase(it);
1863          continue;
1864       }
1865 
1866       if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
1867          assert(!(it->second.def.physReg() == pi->scratch_sgpr));
1868 
1869       /* to resolve the cycle, we have to swap the src reg with the dst reg */
1870       copy_operation swap = it->second;
1871 
1872       /* if this is self-intersecting, we have to split it because
1873        * self-intersecting swaps don't make sense */
1874       PhysReg src = swap.op.physReg(), dst = swap.def.physReg();
1875       if (abs((int)src.reg_b - (int)dst.reg_b) < (int)swap.bytes) {
1876          unsigned offset = abs((int)src.reg_b - (int)dst.reg_b);
1877 
1878          copy_operation remaining;
1879          src.reg_b += offset;
1880          dst.reg_b += offset;
1881          remaining.bytes = swap.bytes - offset;
1882          memcpy(remaining.uses, swap.uses + offset, remaining.bytes);
1883          remaining.op = Operand(src, swap.def.regClass().resize(remaining.bytes));
1884          remaining.def = Definition(dst, swap.def.regClass().resize(remaining.bytes));
1885          copy_map[dst] = remaining;
1886 
1887          memset(swap.uses + offset, 0, swap.bytes - offset);
1888          swap.bytes = offset;
1889       }
1890 
1891       /* GFX6-7 can only swap full registers */
1892       if (ctx->program->gfx_level <= GFX7)
1893          swap.bytes = align(swap.bytes, 4);
1894 
1895       do_swap(ctx, bld, swap, preserve_scc, pi);
1896 
1897       /* remove from map */
1898       copy_map.erase(it);
1899 
1900       /* change the operand reg of the target's uses and split uses if needed */
1901       uint32_t bytes_left = u_bit_consecutive(0, swap.bytes);
1902       for (auto target = copy_map.begin(); target != copy_map.end(); ++target) {
1903          if (target->second.op.physReg() == swap.def.physReg() &&
1904              swap.bytes == target->second.bytes) {
1905             target->second.op.setFixed(swap.op.physReg());
1906             break;
1907          }
1908 
1909          uint32_t imask =
1910             get_intersection_mask(swap.def.physReg().reg_b, swap.bytes,
1911                                   target->second.op.physReg().reg_b, target->second.bytes);
1912 
1913          if (!imask)
1914             continue;
1915 
1916          int offset = (int)target->second.op.physReg().reg_b - (int)swap.def.physReg().reg_b;
1917 
1918          /* split and update the middle (the portion that reads the swap's
1919           * definition) to read the swap's operand instead */
1920          int target_op_end = target->second.op.physReg().reg_b + target->second.bytes;
1921          int swap_def_end = swap.def.physReg().reg_b + swap.bytes;
1922          int before_bytes = MAX2(-offset, 0);
1923          int after_bytes = MAX2(target_op_end - swap_def_end, 0);
1924          int middle_bytes = target->second.bytes - before_bytes - after_bytes;
1925 
1926          if (after_bytes) {
1927             unsigned after_offset = before_bytes + middle_bytes;
1928             assert(after_offset > 0);
1929             copy_operation copy;
1930             copy.bytes = after_bytes;
1931             memcpy(copy.uses, target->second.uses + after_offset, copy.bytes);
1932             RegClass rc = target->second.op.regClass().resize(after_bytes);
1933             copy.op = Operand(target->second.op.physReg().advance(after_offset), rc);
1934             copy.def = Definition(target->second.def.physReg().advance(after_offset), rc);
1935             copy_map[copy.def.physReg()] = copy;
1936          }
1937 
1938          if (middle_bytes) {
1939             copy_operation copy;
1940             copy.bytes = middle_bytes;
1941             memcpy(copy.uses, target->second.uses + before_bytes, copy.bytes);
1942             RegClass rc = target->second.op.regClass().resize(middle_bytes);
1943             copy.op = Operand(swap.op.physReg().advance(MAX2(offset, 0)), rc);
1944             copy.def = Definition(target->second.def.physReg().advance(before_bytes), rc);
1945             copy_map[copy.def.physReg()] = copy;
1946          }
1947 
1948          if (before_bytes) {
1949             copy_operation copy;
1950             target->second.bytes = before_bytes;
1951             RegClass rc = target->second.op.regClass().resize(before_bytes);
1952             target->second.op = Operand(target->second.op.physReg(), rc);
1953             target->second.def = Definition(target->second.def.physReg(), rc);
1954             memset(target->second.uses + target->second.bytes, 0, 8 - target->second.bytes);
1955          }
1956 
1957          /* break early since we know each byte of the swap's definition is used
1958           * at most once */
1959          bytes_left &= ~imask;
1960          if (!bytes_left)
1961             break;
1962       }
1963    }
1964    ctx->program->statistics[statistic_copies] += ctx->instructions.size() - num_instructions_before;
1965 }
1966 
1967 void
emit_set_mode(Builder & bld,float_mode new_mode,bool set_round,bool set_denorm)1968 emit_set_mode(Builder& bld, float_mode new_mode, bool set_round, bool set_denorm)
1969 {
1970    if (bld.program->gfx_level >= GFX10) {
1971       if (set_round)
1972          bld.sopp(aco_opcode::s_round_mode, -1, new_mode.round);
1973       if (set_denorm)
1974          bld.sopp(aco_opcode::s_denorm_mode, -1, new_mode.denorm);
1975    } else if (set_round || set_denorm) {
1976       /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1977       bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand::literal32(new_mode.val), (7 << 11) | 1);
1978    }
1979 }
1980 
1981 void
emit_set_mode_from_block(Builder & bld,Program & program,Block * block,bool always_set)1982 emit_set_mode_from_block(Builder& bld, Program& program, Block* block, bool always_set)
1983 {
1984    float_mode config_mode;
1985    config_mode.val = program.config->float_mode;
1986 
1987    bool set_round = always_set && block->fp_mode.round != config_mode.round;
1988    bool set_denorm = always_set && block->fp_mode.denorm != config_mode.denorm;
1989    if (block->kind & block_kind_top_level) {
1990       for (unsigned pred : block->linear_preds) {
1991          if (program.blocks[pred].fp_mode.round != block->fp_mode.round)
1992             set_round = true;
1993          if (program.blocks[pred].fp_mode.denorm != block->fp_mode.denorm)
1994             set_denorm = true;
1995       }
1996    }
1997    /* only allow changing modes at top-level blocks so this doesn't break
1998     * the "jump over empty blocks" optimization */
1999    assert((!set_round && !set_denorm) || (block->kind & block_kind_top_level));
2000    emit_set_mode(bld, block->fp_mode, set_round, set_denorm);
2001 }
2002 
2003 void
lower_to_hw_instr(Program * program)2004 lower_to_hw_instr(Program* program)
2005 {
2006    Block* discard_block = NULL;
2007 
2008    for (int block_idx = program->blocks.size() - 1; block_idx >= 0; block_idx--) {
2009       Block* block = &program->blocks[block_idx];
2010       lower_context ctx;
2011       ctx.program = program;
2012       ctx.block = block;
2013       Builder bld(program, &ctx.instructions);
2014 
2015       emit_set_mode_from_block(bld, *program, block, (block_idx == 0));
2016 
2017       for (size_t instr_idx = 0; instr_idx < block->instructions.size(); instr_idx++) {
2018          aco_ptr<Instruction>& instr = block->instructions[instr_idx];
2019          aco_ptr<Instruction> mov;
2020          if (instr->isPseudo() && instr->opcode != aco_opcode::p_unit_test) {
2021             Pseudo_instruction* pi = &instr->pseudo();
2022 
2023             switch (instr->opcode) {
2024             case aco_opcode::p_extract_vector: {
2025                PhysReg reg = instr->operands[0].physReg();
2026                Definition& def = instr->definitions[0];
2027                reg.reg_b += instr->operands[1].constantValue() * def.bytes();
2028 
2029                if (reg == def.physReg())
2030                   break;
2031 
2032                RegClass op_rc = def.regClass().is_subdword()
2033                                    ? def.regClass()
2034                                    : RegClass(instr->operands[0].getTemp().type(), def.size());
2035                std::map<PhysReg, copy_operation> copy_operations;
2036                copy_operations[def.physReg()] = {Operand(reg, op_rc), def, def.bytes()};
2037                handle_operands(copy_operations, &ctx, program->gfx_level, pi);
2038                break;
2039             }
2040             case aco_opcode::p_create_vector: {
2041                std::map<PhysReg, copy_operation> copy_operations;
2042                PhysReg reg = instr->definitions[0].physReg();
2043 
2044                for (const Operand& op : instr->operands) {
2045                   if (op.isConstant()) {
2046                      const Definition def = Definition(
2047                         reg, instr->definitions[0].getTemp().regClass().resize(op.bytes()));
2048                      copy_operations[reg] = {op, def, op.bytes()};
2049                      reg.reg_b += op.bytes();
2050                      continue;
2051                   }
2052                   if (op.isUndefined()) {
2053                      // TODO: coalesce subdword copies if dst byte is 0
2054                      reg.reg_b += op.bytes();
2055                      continue;
2056                   }
2057 
2058                   RegClass rc_def =
2059                      op.regClass().is_subdword()
2060                         ? op.regClass()
2061                         : instr->definitions[0].getTemp().regClass().resize(op.bytes());
2062                   const Definition def = Definition(reg, rc_def);
2063                   copy_operations[def.physReg()] = {op, def, op.bytes()};
2064                   reg.reg_b += op.bytes();
2065                }
2066                handle_operands(copy_operations, &ctx, program->gfx_level, pi);
2067                break;
2068             }
2069             case aco_opcode::p_split_vector: {
2070                std::map<PhysReg, copy_operation> copy_operations;
2071                PhysReg reg = instr->operands[0].physReg();
2072 
2073                for (const Definition& def : instr->definitions) {
2074                   RegClass rc_op = def.regClass().is_subdword()
2075                                       ? def.regClass()
2076                                       : instr->operands[0].getTemp().regClass().resize(def.bytes());
2077                   const Operand op = Operand(reg, rc_op);
2078                   copy_operations[def.physReg()] = {op, def, def.bytes()};
2079                   reg.reg_b += def.bytes();
2080                }
2081                handle_operands(copy_operations, &ctx, program->gfx_level, pi);
2082                break;
2083             }
2084             case aco_opcode::p_parallelcopy:
2085             case aco_opcode::p_wqm: {
2086                std::map<PhysReg, copy_operation> copy_operations;
2087                for (unsigned j = 0; j < instr->operands.size(); j++) {
2088                   assert(instr->definitions[j].bytes() == instr->operands[j].bytes());
2089                   copy_operations[instr->definitions[j].physReg()] = {
2090                      instr->operands[j], instr->definitions[j], instr->operands[j].bytes()};
2091                }
2092                handle_operands(copy_operations, &ctx, program->gfx_level, pi);
2093                break;
2094             }
2095             case aco_opcode::p_exit_early_if: {
2096                /* don't bother with an early exit near the end of the program */
2097                if ((block->instructions.size() - 1 - instr_idx) <= 4 &&
2098                    block->instructions.back()->opcode == aco_opcode::s_endpgm) {
2099                   unsigned null_exp_dest =
2100                      (ctx.program->stage.hw == HWStage::FS) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
2101                   bool ignore_early_exit = true;
2102 
2103                   for (unsigned k = instr_idx + 1; k < block->instructions.size(); ++k) {
2104                      const aco_ptr<Instruction>& instr2 = block->instructions[k];
2105                      if (instr2->opcode == aco_opcode::s_endpgm ||
2106                          instr2->opcode == aco_opcode::p_logical_end)
2107                         continue;
2108                      else if (instr2->opcode == aco_opcode::exp &&
2109                               instr2->exp().dest == null_exp_dest)
2110                         continue;
2111                      else if (instr2->opcode == aco_opcode::p_parallelcopy &&
2112                               instr2->definitions[0].isFixed() &&
2113                               instr2->definitions[0].physReg() == exec)
2114                         continue;
2115 
2116                      ignore_early_exit = false;
2117                   }
2118 
2119                   if (ignore_early_exit)
2120                      break;
2121                }
2122 
2123                if (!discard_block) {
2124                   discard_block = program->create_and_insert_block();
2125                   block = &program->blocks[block_idx];
2126 
2127                   bld.reset(discard_block);
2128                   bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1), 0,
2129                           program->gfx_level >= GFX11 ? V_008DFC_SQ_EXP_MRT : V_008DFC_SQ_EXP_NULL,
2130                           false, true, true);
2131                   bld.sopp(aco_opcode::s_endpgm);
2132 
2133                   bld.reset(&ctx.instructions);
2134                }
2135 
2136                assert(instr->operands[0].physReg() == scc);
2137                bld.sopp(aco_opcode::s_cbranch_scc0, Definition(exec, s2), instr->operands[0],
2138                         discard_block->index);
2139 
2140                discard_block->linear_preds.push_back(block->index);
2141                block->linear_succs.push_back(discard_block->index);
2142                break;
2143             }
2144             case aco_opcode::p_spill: {
2145                assert(instr->operands[0].regClass() == v1.as_linear());
2146                for (unsigned i = 0; i < instr->operands[2].size(); i++) {
2147                   Operand src =
2148                      instr->operands[2].isConstant()
2149                         ? Operand::c32(uint32_t(instr->operands[2].constantValue64() >> (32 * i)))
2150                         : Operand(PhysReg{instr->operands[2].physReg() + i}, s1);
2151                   bld.writelane(bld.def(v1, instr->operands[0].physReg()), src,
2152                                 Operand::c32(instr->operands[1].constantValue() + i),
2153                                 instr->operands[0]);
2154                }
2155                break;
2156             }
2157             case aco_opcode::p_reload: {
2158                assert(instr->operands[0].regClass() == v1.as_linear());
2159                for (unsigned i = 0; i < instr->definitions[0].size(); i++)
2160                   bld.readlane(bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
2161                                instr->operands[0],
2162                                Operand::c32(instr->operands[1].constantValue() + i));
2163                break;
2164             }
2165             case aco_opcode::p_as_uniform: {
2166                if (instr->operands[0].isConstant() ||
2167                    instr->operands[0].regClass().type() == RegType::sgpr) {
2168                   std::map<PhysReg, copy_operation> copy_operations;
2169                   copy_operations[instr->definitions[0].physReg()] = {
2170                      instr->operands[0], instr->definitions[0], instr->definitions[0].bytes()};
2171                   handle_operands(copy_operations, &ctx, program->gfx_level, pi);
2172                } else {
2173                   assert(instr->operands[0].regClass().type() == RegType::vgpr);
2174                   assert(instr->definitions[0].regClass().type() == RegType::sgpr);
2175                   assert(instr->operands[0].size() == instr->definitions[0].size());
2176                   for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
2177                      bld.vop1(aco_opcode::v_readfirstlane_b32,
2178                               bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
2179                               Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
2180                   }
2181                }
2182                break;
2183             }
2184             case aco_opcode::p_bpermute: {
2185                if (ctx.program->gfx_level <= GFX7)
2186                   emit_gfx6_bpermute(program, instr, bld);
2187                else if (ctx.program->gfx_level >= GFX10 && ctx.program->wave_size == 64)
2188                   emit_gfx10_wave64_bpermute(program, instr, bld);
2189                else
2190                   unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute.");
2191                break;
2192             }
2193             case aco_opcode::p_constaddr: {
2194                unsigned id = instr->definitions[0].tempId();
2195                PhysReg reg = instr->definitions[0].physReg();
2196                bld.sop1(aco_opcode::p_constaddr_getpc, instr->definitions[0], Operand::c32(id));
2197                bld.sop2(aco_opcode::p_constaddr_addlo, Definition(reg, s1), bld.def(s1, scc),
2198                         Operand(reg, s1), instr->operands[0], Operand::c32(id));
2199                /* s_addc_u32 not needed because the program is in a 32-bit VA range */
2200                break;
2201             }
2202             case aco_opcode::p_extract: {
2203                assert(instr->operands[1].isConstant());
2204                assert(instr->operands[2].isConstant());
2205                assert(instr->operands[3].isConstant());
2206                if (instr->definitions[0].regClass() == s1)
2207                   assert(instr->definitions.size() >= 2 && instr->definitions[1].physReg() == scc);
2208                Definition dst = instr->definitions[0];
2209                Operand op = instr->operands[0];
2210                unsigned bits = instr->operands[2].constantValue();
2211                unsigned index = instr->operands[1].constantValue();
2212                unsigned offset = index * bits;
2213                bool signext = !instr->operands[3].constantEquals(0);
2214 
2215                if (dst.regClass() == s1) {
2216                   if (offset == (32 - bits)) {
2217                      bld.sop2(signext ? aco_opcode::s_ashr_i32 : aco_opcode::s_lshr_b32, dst,
2218                               bld.def(s1, scc), op, Operand::c32(offset));
2219                   } else if (offset == 0 && signext && (bits == 8 || bits == 16)) {
2220                      bld.sop1(bits == 8 ? aco_opcode::s_sext_i32_i8 : aco_opcode::s_sext_i32_i16,
2221                               dst, op);
2222                   } else {
2223                      bld.sop2(signext ? aco_opcode::s_bfe_i32 : aco_opcode::s_bfe_u32, dst,
2224                               bld.def(s1, scc), op, Operand::c32((bits << 16) | offset));
2225                   }
2226                } else if ((dst.regClass() == v1 && op.regClass() == v1) ||
2227                           ctx.program->gfx_level <= GFX7) {
2228                   assert(op.physReg().byte() == 0 && dst.physReg().byte() == 0);
2229                   if (offset == (32 - bits) && op.regClass() != s1) {
2230                      bld.vop2(signext ? aco_opcode::v_ashrrev_i32 : aco_opcode::v_lshrrev_b32, dst,
2231                               Operand::c32(offset), op);
2232                   } else {
2233                      bld.vop3(signext ? aco_opcode::v_bfe_i32 : aco_opcode::v_bfe_u32, dst, op,
2234                               Operand::c32(offset), Operand::c32(bits));
2235                   }
2236                } else {
2237                   assert(dst.regClass() == v2b || dst.regClass() == v1b || op.regClass() == v2b ||
2238                          op.regClass() == v1b);
2239                   if (ctx.program->gfx_level >= GFX11) {
2240                      unsigned op_vgpr_byte = op.physReg().byte() + offset / 8;
2241                      unsigned sign_byte = op_vgpr_byte + bits / 8 - 1;
2242 
2243                      uint8_t swiz[4] = {4, 5, 6, 7};
2244                      swiz[dst.physReg().byte()] = op_vgpr_byte;
2245                      if (bits == 16)
2246                         swiz[dst.physReg().byte() + 1] = op_vgpr_byte + 1;
2247                      for (unsigned i = bits / 8; i < dst.bytes(); i++) {
2248                         uint8_t ext = bperm_0;
2249                         if (signext) {
2250                            if (sign_byte == 1)
2251                               ext = bperm_b1_sign;
2252                            else if (sign_byte == 3)
2253                               ext = bperm_b3_sign;
2254                            else /* replicate so sign-extension can be done later */
2255                               ext = sign_byte;
2256                         }
2257                         swiz[dst.physReg().byte() + i] = ext;
2258                      }
2259                      create_bperm(bld, swiz, dst, op);
2260 
2261                      if (signext && sign_byte != 3 && sign_byte != 1) {
2262                         assert(bits == 8);
2263                         assert(dst.regClass() == v2b || dst.regClass() == v1);
2264                         uint8_t ext_swiz[4] = {4, 5, 6, 7};
2265                         uint8_t ext = dst.physReg().byte() == 2 ? bperm_b7_sign : bperm_b5_sign;
2266                         memset(ext_swiz + dst.physReg().byte() + 1, ext, dst.bytes() - 1);
2267                         create_bperm(bld, ext_swiz, dst, Operand::zero());
2268                      }
2269                   } else {
2270                      SDWA_instruction& sdwa =
2271                         bld.vop1_sdwa(aco_opcode::v_mov_b32, dst, op).instr->sdwa();
2272                      sdwa.sel[0] = SubdwordSel(bits / 8, offset / 8, signext);
2273                   }
2274                }
2275                break;
2276             }
2277             case aco_opcode::p_insert: {
2278                assert(instr->operands[1].isConstant());
2279                assert(instr->operands[2].isConstant());
2280                if (instr->definitions[0].regClass() == s1)
2281                   assert(instr->definitions.size() >= 2 && instr->definitions[1].physReg() == scc);
2282                Definition dst = instr->definitions[0];
2283                Operand op = instr->operands[0];
2284                unsigned bits = instr->operands[2].constantValue();
2285                unsigned index = instr->operands[1].constantValue();
2286                unsigned offset = index * bits;
2287 
2288                bool has_sdwa = program->gfx_level >= GFX8 && program->gfx_level < GFX11;
2289                if (dst.regClass() == s1) {
2290                   if (offset == (32 - bits)) {
2291                      bld.sop2(aco_opcode::s_lshl_b32, dst, bld.def(s1, scc), op,
2292                               Operand::c32(offset));
2293                   } else if (offset == 0) {
2294                      bld.sop2(aco_opcode::s_bfe_u32, dst, bld.def(s1, scc), op,
2295                               Operand::c32(bits << 16));
2296                   } else {
2297                      bld.sop2(aco_opcode::s_bfe_u32, dst, bld.def(s1, scc), op,
2298                               Operand::c32(bits << 16));
2299                      bld.sop2(aco_opcode::s_lshl_b32, dst, bld.def(s1, scc),
2300                               Operand(dst.physReg(), s1), Operand::c32(offset));
2301                   }
2302                } else if (dst.regClass() == v1 || !has_sdwa) {
2303                   if (offset == (dst.bytes() * 8u - bits) &&
2304                       (dst.regClass() == v1 || program->gfx_level <= GFX7)) {
2305                      bld.vop2(aco_opcode::v_lshlrev_b32, dst, Operand::c32(offset), op);
2306                   } else if (offset == 0 && (dst.regClass() == v1 || program->gfx_level <= GFX7)) {
2307                      bld.vop3(aco_opcode::v_bfe_u32, dst, op, Operand::zero(), Operand::c32(bits));
2308                   } else if (has_sdwa && (op.regClass() != s1 || program->gfx_level >= GFX9)) {
2309                      bld.vop1_sdwa(aco_opcode::v_mov_b32, dst, op).instr->sdwa().dst_sel =
2310                         SubdwordSel(bits / 8, offset / 8, false);
2311                   } else if (program->gfx_level >= GFX11) {
2312                      uint8_t swiz[] = {4, 5, 6, 7};
2313                      for (unsigned i = 0; i < dst.bytes(); i++)
2314                         swiz[dst.physReg().byte() + i] = bperm_0;
2315                      for (unsigned i = 0; i < bits / 8; i++)
2316                         swiz[dst.physReg().byte() + i + offset / 8] = op.physReg().byte() + i;
2317                      create_bperm(bld, swiz, dst, op);
2318                   } else {
2319                      bld.vop3(aco_opcode::v_bfe_u32, dst, op, Operand::zero(), Operand::c32(bits));
2320                      bld.vop2(aco_opcode::v_lshlrev_b32, dst, Operand::c32(offset),
2321                               Operand(dst.physReg(), v1));
2322                   }
2323                } else {
2324                   assert(dst.regClass() == v2b);
2325                   bld.vop2_sdwa(aco_opcode::v_lshlrev_b32, dst, Operand::c32(offset), op)
2326                      .instr->sdwa()
2327                      .sel[1] = SubdwordSel::ubyte;
2328                }
2329                break;
2330             }
2331             case aco_opcode::p_init_scratch: {
2332                assert(program->gfx_level >= GFX8 && program->gfx_level <= GFX10_3);
2333                if (!program->config->scratch_bytes_per_wave)
2334                   break;
2335 
2336                Operand scratch_addr = instr->operands[0];
2337                Operand scratch_addr_lo(scratch_addr.physReg(), s1);
2338                if (program->stage.hw != HWStage::CS) {
2339                   bld.smem(aco_opcode::s_load_dwordx2, instr->definitions[0], scratch_addr,
2340                            Operand::zero());
2341                   scratch_addr_lo.setFixed(instr->definitions[0].physReg());
2342                }
2343                Operand scratch_addr_hi(scratch_addr_lo.physReg().advance(4), s1);
2344 
2345                /* Since we know what the high 16 bits of scratch_hi is, we can set all the high 16
2346                 * bits in the same instruction that we add the carry.
2347                 */
2348                uint32_t hi_add = 0xffff0000 - S_008F04_SWIZZLE_ENABLE_GFX6(1);
2349 
2350                if (program->gfx_level >= GFX10) {
2351                   Operand scratch_lo(instr->definitions[0].physReg(), s1);
2352                   Operand scratch_hi(instr->definitions[0].physReg().advance(4), s1);
2353 
2354                   bld.sop2(aco_opcode::s_add_u32, Definition(scratch_lo.physReg(), s1),
2355                            Definition(scc, s1), scratch_addr_lo, instr->operands[1]);
2356                   bld.sop2(aco_opcode::s_addc_u32, Definition(scratch_hi.physReg(), s1),
2357                            Definition(scc, s1), scratch_addr_hi, Operand::c32(hi_add),
2358                            Operand(scc, s1));
2359 
2360                   /* "((size - 1) << 11) | register" (FLAT_SCRATCH_LO/HI is encoded as register
2361                    * 20/21) */
2362                   bld.sopk(aco_opcode::s_setreg_b32, scratch_lo, (31 << 11) | 20);
2363                   bld.sopk(aco_opcode::s_setreg_b32, scratch_hi, (31 << 11) | 21);
2364                } else {
2365                   bld.sop2(aco_opcode::s_add_u32, Definition(flat_scr_lo, s1), Definition(scc, s1),
2366                            scratch_addr_lo, instr->operands[1]);
2367                   bld.sop2(aco_opcode::s_addc_u32, Definition(flat_scr_hi, s1), Definition(scc, s1),
2368                            scratch_addr_hi, Operand::c32(hi_add), Operand(scc, s1));
2369                }
2370                break;
2371             }
2372             case aco_opcode::p_jump_to_epilog: {
2373                bld.sop1(aco_opcode::s_setpc_b64, instr->operands[0]);
2374                break;
2375             }
2376             default: break;
2377             }
2378          } else if (instr->isBranch()) {
2379             Pseudo_branch_instruction* branch = &instr->branch();
2380             const uint32_t target = branch->target[0];
2381             const bool uniform_branch = !(branch->opcode == aco_opcode::p_cbranch_z &&
2382                                           branch->operands[0].physReg() == exec);
2383 
2384             /* Check if the branch instruction can be removed.
2385              * This is beneficial when executing the next block with an empty exec mask
2386              * is faster than the branch instruction itself.
2387              */
2388             bool can_remove = block->index < target;
2389             unsigned num_scalar = 0;
2390             unsigned num_vector = 0;
2391 
2392             /* Check the instructions between branch and target */
2393             for (unsigned i = block->index + 1; i < branch->target[0]; i++) {
2394                /* Uniform conditional branches must not be ignored if they
2395                 * are about to jump over actual instructions */
2396                if (uniform_branch && !program->blocks[i].instructions.empty())
2397                   can_remove = false;
2398 
2399                if (!can_remove)
2400                   break;
2401 
2402                for (aco_ptr<Instruction>& inst : program->blocks[i].instructions) {
2403                   if (inst->isSOPP()) {
2404                      /* Discard early exits and loop breaks and continues should work fine with an
2405                       * empty exec mask.
2406                       */
2407                      bool is_break_continue =
2408                         program->blocks[i].kind & (block_kind_break | block_kind_continue);
2409                      bool discard_early_exit =
2410                         discard_block && (unsigned)inst->sopp().block == discard_block->index;
2411                      if ((inst->opcode != aco_opcode::s_cbranch_scc0 &&
2412                           inst->opcode != aco_opcode::s_cbranch_scc1) ||
2413                          (!discard_early_exit && !is_break_continue))
2414                         can_remove = false;
2415                   } else if (inst->isSALU()) {
2416                      num_scalar++;
2417                   } else if (inst->isVALU() || inst->isVINTRP()) {
2418                      num_vector++;
2419                      /* VALU which writes SGPRs are always executed on GFX10+ */
2420                      if (ctx.program->gfx_level >= GFX10) {
2421                         for (Definition& def : inst->definitions) {
2422                            if (def.regClass().type() == RegType::sgpr)
2423                               num_scalar++;
2424                         }
2425                      }
2426                   } else if (inst->isVMEM() || inst->isFlatLike() || inst->isDS() ||
2427                              inst->isEXP()) {
2428                      // TODO: GFX6-9 can use vskip
2429                      can_remove = false;
2430                   } else if (inst->isSMEM()) {
2431                      /* SMEM are at least as expensive as branches */
2432                      can_remove = false;
2433                   } else if (inst->isBarrier()) {
2434                      can_remove = false;
2435                   } else {
2436                      can_remove = false;
2437                      assert(false && "Pseudo instructions should be lowered by this point.");
2438                   }
2439 
2440                   /* Under these conditions, we shouldn't remove the branch */
2441                   unsigned est_cycles;
2442                   if (ctx.program->gfx_level >= GFX10)
2443                      est_cycles = num_scalar * 2 + num_vector;
2444                   else
2445                      est_cycles = num_scalar * 4 + num_vector * 4;
2446 
2447                   if (est_cycles > 16)
2448                      can_remove = false;
2449 
2450                   if (!can_remove)
2451                      break;
2452                }
2453             }
2454 
2455             if (can_remove)
2456                continue;
2457 
2458             /* emit branch instruction */
2459             switch (instr->opcode) {
2460             case aco_opcode::p_branch:
2461                assert(block->linear_succs[0] == target);
2462                bld.sopp(aco_opcode::s_branch, branch->definitions[0], target);
2463                break;
2464             case aco_opcode::p_cbranch_nz:
2465                assert(block->linear_succs[1] == target);
2466                if (branch->operands[0].physReg() == exec)
2467                   bld.sopp(aco_opcode::s_cbranch_execnz, branch->definitions[0], target);
2468                else if (branch->operands[0].physReg() == vcc)
2469                   bld.sopp(aco_opcode::s_cbranch_vccnz, branch->definitions[0], target);
2470                else {
2471                   assert(branch->operands[0].physReg() == scc);
2472                   bld.sopp(aco_opcode::s_cbranch_scc1, branch->definitions[0], target);
2473                }
2474                break;
2475             case aco_opcode::p_cbranch_z:
2476                assert(block->linear_succs[1] == target);
2477                if (branch->operands[0].physReg() == exec)
2478                   bld.sopp(aco_opcode::s_cbranch_execz, branch->definitions[0], target);
2479                else if (branch->operands[0].physReg() == vcc)
2480                   bld.sopp(aco_opcode::s_cbranch_vccz, branch->definitions[0], target);
2481                else {
2482                   assert(branch->operands[0].physReg() == scc);
2483                   bld.sopp(aco_opcode::s_cbranch_scc0, branch->definitions[0], target);
2484                }
2485                break;
2486             default: unreachable("Unknown Pseudo branch instruction!");
2487             }
2488 
2489          } else if (instr->isReduction()) {
2490             Pseudo_reduction_instruction& reduce = instr->reduction();
2491             emit_reduction(&ctx, reduce.opcode, reduce.reduce_op, reduce.cluster_size,
2492                            reduce.operands[1].physReg(),    // tmp
2493                            reduce.definitions[1].physReg(), // stmp
2494                            reduce.operands[2].physReg(),    // vtmp
2495                            reduce.definitions[2].physReg(), // sitmp
2496                            reduce.operands[0], reduce.definitions[0]);
2497          } else if (instr->isBarrier()) {
2498             Pseudo_barrier_instruction& barrier = instr->barrier();
2499 
2500             /* Anything larger than a workgroup isn't possible. Anything
2501              * smaller requires no instructions and this pseudo instruction
2502              * would only be included to control optimizations. */
2503             bool emit_s_barrier = barrier.exec_scope == scope_workgroup &&
2504                                   program->workgroup_size > program->wave_size;
2505 
2506             bld.insert(std::move(instr));
2507             if (emit_s_barrier)
2508                bld.sopp(aco_opcode::s_barrier);
2509          } else if (instr->opcode == aco_opcode::p_cvt_f16_f32_rtne) {
2510             float_mode new_mode = block->fp_mode;
2511             new_mode.round16_64 = fp_round_ne;
2512             bool set_round = new_mode.round != block->fp_mode.round;
2513 
2514             emit_set_mode(bld, new_mode, set_round, false);
2515 
2516             instr->opcode = aco_opcode::v_cvt_f16_f32;
2517             ctx.instructions.emplace_back(std::move(instr));
2518 
2519             emit_set_mode(bld, block->fp_mode, set_round, false);
2520          } else {
2521             ctx.instructions.emplace_back(std::move(instr));
2522          }
2523       }
2524       block->instructions.swap(ctx.instructions);
2525    }
2526 }
2527 
2528 } // namespace aco
2529