• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_builder.h"
26 #include "aco_ir.h"
27 
28 #include "common/sid.h"
29 
30 #include "util/memstream.h"
31 
32 #include "ac_shader_util.h"
33 #include <algorithm>
34 #include <map>
35 #include <vector>
36 
37 namespace aco {
38 
39 struct constaddr_info {
40    unsigned getpc_end;
41    unsigned add_literal;
42 };
43 
44 struct asm_context {
45    Program* program;
46    enum amd_gfx_level gfx_level;
47    std::vector<std::pair<int, SOPP_instruction*>> branches;
48    std::map<unsigned, constaddr_info> constaddrs;
49    std::map<unsigned, constaddr_info> resumeaddrs;
50    std::vector<struct aco_symbol>* symbols;
51    Block* loop_header = NULL;
52    const int16_t* opcode;
53    // TODO: keep track of branch instructions referring blocks
54    // and, when emitting the block, correct the offset in instr
asm_contextaco::asm_context55    asm_context(Program* program_, std::vector<struct aco_symbol>* symbols_)
56        : program(program_), gfx_level(program->gfx_level), symbols(symbols_)
57    {
58       if (gfx_level <= GFX7)
59          opcode = &instr_info.opcode_gfx7[0];
60       else if (gfx_level <= GFX9)
61          opcode = &instr_info.opcode_gfx9[0];
62       else if (gfx_level <= GFX10_3)
63          opcode = &instr_info.opcode_gfx10[0];
64       else if (gfx_level >= GFX11)
65          opcode = &instr_info.opcode_gfx11[0];
66    }
67 
68    int subvector_begin_pos = -1;
69 };
70 
71 unsigned
get_mimg_nsa_dwords(const Instruction * instr)72 get_mimg_nsa_dwords(const Instruction* instr)
73 {
74    unsigned addr_dwords = instr->operands.size() - 3;
75    for (unsigned i = 1; i < addr_dwords; i++) {
76       if (instr->operands[3 + i].physReg() !=
77           instr->operands[3 + (i - 1)].physReg().advance(instr->operands[3 + (i - 1)].bytes()))
78          return DIV_ROUND_UP(addr_dwords - 1, 4);
79    }
80    return 0;
81 }
82 
83 unsigned
get_vopd_opy_start(const Instruction * instr)84 get_vopd_opy_start(const Instruction* instr)
85 {
86    switch (instr->opcode) {
87    case aco_opcode::v_dual_fmac_f32:
88    case aco_opcode::v_dual_fmaak_f32:
89    case aco_opcode::v_dual_fmamk_f32:
90    case aco_opcode::v_dual_cndmask_b32:
91    case aco_opcode::v_dual_dot2acc_f32_f16:
92    case aco_opcode::v_dual_dot2acc_f32_bf16: return 3;
93    case aco_opcode::v_dual_mov_b32: return 1;
94    default: return 2;
95    }
96 }
97 
98 uint32_t
reg(asm_context & ctx,PhysReg reg)99 reg(asm_context& ctx, PhysReg reg)
100 {
101    if (ctx.gfx_level >= GFX11) {
102       if (reg == m0)
103          return sgpr_null.reg();
104       else if (reg == sgpr_null)
105          return m0.reg();
106    }
107    return reg.reg();
108 }
109 
110 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Operand op,unsigned width=32)111 reg(asm_context& ctx, Operand op, unsigned width = 32)
112 {
113    return reg(ctx, op.physReg()) & BITFIELD_MASK(width);
114 }
115 
116 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Definition def,unsigned width=32)117 reg(asm_context& ctx, Definition def, unsigned width = 32)
118 {
119    return reg(ctx, def.physReg()) & BITFIELD_MASK(width);
120 }
121 
122 bool
needs_vop3_gfx11(asm_context & ctx,Instruction * instr)123 needs_vop3_gfx11(asm_context& ctx, Instruction* instr)
124 {
125    if (ctx.gfx_level <= GFX10_3)
126       return false;
127 
128    uint8_t mask = get_gfx11_true16_mask(instr->opcode);
129    if (!mask)
130       return false;
131 
132    u_foreach_bit (i, mask & 0x3) {
133       if (instr->operands[i].physReg().reg() >= (256 + 128))
134          return true;
135    }
136    if ((mask & 0x8) && instr->definitions[0].physReg().reg() >= (256 + 128))
137       return true;
138    return false;
139 }
140 
141 void
emit_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)142 emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
143 {
144    /* lower remaining pseudo-instructions */
145    if (instr->opcode == aco_opcode::p_constaddr_getpc) {
146       ctx.constaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
147 
148       instr->opcode = aco_opcode::s_getpc_b64;
149       instr->operands.pop_back();
150    } else if (instr->opcode == aco_opcode::p_constaddr_addlo) {
151       ctx.constaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
152 
153       instr->opcode = aco_opcode::s_add_u32;
154       instr->operands.pop_back();
155       assert(instr->operands[1].isConstant());
156       /* in case it's an inline constant, make it a literal */
157       instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
158    } else if (instr->opcode == aco_opcode::p_resumeaddr_getpc) {
159       ctx.resumeaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
160 
161       instr->opcode = aco_opcode::s_getpc_b64;
162       instr->operands.pop_back();
163    } else if (instr->opcode == aco_opcode::p_resumeaddr_addlo) {
164       ctx.resumeaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
165 
166       instr->opcode = aco_opcode::s_add_u32;
167       instr->operands.pop_back();
168       assert(instr->operands[1].isConstant());
169       /* in case it's an inline constant, make it a literal */
170       instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
171    } else if (instr->opcode == aco_opcode::p_load_symbol) {
172       assert(instr->operands[0].isConstant());
173       assert(ctx.symbols);
174 
175       struct aco_symbol info;
176       info.id = (enum aco_symbol_id)instr->operands[0].constantValue();
177       info.offset = out.size() + 1;
178       ctx.symbols->push_back(info);
179 
180       instr->opcode = aco_opcode::s_mov_b32;
181       /* in case it's an inline constant, make it a literal */
182       instr->operands[0] = Operand::literal32(0);
183    }
184 
185    /* Promote VOP12C to VOP3 if necessary. */
186    if ((instr->isVOP1() || instr->isVOP2() || instr->isVOPC()) && !instr->isVOP3() &&
187        needs_vop3_gfx11(ctx, instr)) {
188       instr->format = asVOP3(instr->format);
189       if (instr->opcode == aco_opcode::v_fmaak_f16) {
190          instr->opcode = aco_opcode::v_fma_f16;
191          instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
192       } else if (instr->opcode == aco_opcode::v_fmamk_f16) {
193          instr->valu().swapOperands(1, 2);
194          instr->opcode = aco_opcode::v_fma_f16;
195          instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
196       }
197    }
198 
199    uint32_t opcode = ctx.opcode[(int)instr->opcode];
200    if (opcode == (uint32_t)-1) {
201       char* outmem;
202       size_t outsize;
203       struct u_memstream mem;
204       u_memstream_open(&mem, &outmem, &outsize);
205       FILE* const memf = u_memstream_get(&mem);
206 
207       fprintf(memf, "Unsupported opcode: ");
208       aco_print_instr(ctx.gfx_level, instr, memf);
209       u_memstream_close(&mem);
210 
211       aco_err(ctx.program, outmem);
212       free(outmem);
213 
214       abort();
215    }
216 
217    switch (instr->format) {
218    case Format::SOP2: {
219       uint32_t encoding = (0b10 << 30);
220       encoding |= opcode << 23;
221       encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
222       encoding |= instr->operands.size() >= 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
223       encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
224       out.push_back(encoding);
225       break;
226    }
227    case Format::SOPK: {
228       SOPK_instruction& sopk = instr->sopk();
229 
230       if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
231          assert(ctx.gfx_level >= GFX10);
232          assert(ctx.subvector_begin_pos == -1);
233          ctx.subvector_begin_pos = out.size();
234       } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
235          assert(ctx.gfx_level >= GFX10);
236          assert(ctx.subvector_begin_pos != -1);
237          /* Adjust s_subvector_loop_begin instruction to the address after the end  */
238          out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
239          /* Adjust s_subvector_loop_end instruction to the address after the beginning  */
240          sopk.imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
241          ctx.subvector_begin_pos = -1;
242       }
243 
244       uint32_t encoding = (0b1011 << 28);
245       encoding |= opcode << 23;
246       encoding |= !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc)
247                      ? reg(ctx, instr->definitions[0]) << 16
248                   : !instr->operands.empty() && instr->operands[0].physReg() <= 127
249                      ? reg(ctx, instr->operands[0]) << 16
250                      : 0;
251       encoding |= sopk.imm;
252       out.push_back(encoding);
253       break;
254    }
255    case Format::SOP1: {
256       uint32_t encoding = (0b101111101 << 23);
257       encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
258       encoding |= opcode << 8;
259       encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
260       out.push_back(encoding);
261       break;
262    }
263    case Format::SOPC: {
264       uint32_t encoding = (0b101111110 << 23);
265       encoding |= opcode << 16;
266       encoding |= instr->operands.size() == 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
267       encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
268       out.push_back(encoding);
269       break;
270    }
271    case Format::SOPP: {
272       SOPP_instruction& sopp = instr->sopp();
273       uint32_t encoding = (0b101111111 << 23);
274       encoding |= opcode << 16;
275       encoding |= (uint16_t)sopp.imm;
276       if (sopp.block != -1) {
277          sopp.pass_flags = 0;
278          ctx.branches.emplace_back(out.size(), &sopp);
279       }
280       out.push_back(encoding);
281       break;
282    }
283    case Format::SMEM: {
284       SMEM_instruction& smem = instr->smem();
285       bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
286       bool is_load = !instr->definitions.empty();
287       uint32_t encoding = 0;
288 
289       if (ctx.gfx_level <= GFX7) {
290          encoding = (0b11000 << 27);
291          encoding |= opcode << 22;
292          encoding |= instr->definitions.size() ? reg(ctx, instr->definitions[0]) << 15 : 0;
293          encoding |= instr->operands.size() ? (reg(ctx, instr->operands[0]) >> 1) << 9 : 0;
294          if (instr->operands.size() >= 2) {
295             if (!instr->operands[1].isConstant()) {
296                encoding |= reg(ctx, instr->operands[1]);
297             } else if (instr->operands[1].constantValue() >= 1024) {
298                encoding |= 255; /* SQ_SRC_LITERAL */
299             } else {
300                encoding |= instr->operands[1].constantValue() >> 2;
301                encoding |= 1 << 8;
302             }
303          }
304          out.push_back(encoding);
305          /* SMRD instructions can take a literal on GFX7 */
306          if (instr->operands.size() >= 2 && instr->operands[1].isConstant() &&
307              instr->operands[1].constantValue() >= 1024)
308             out.push_back(instr->operands[1].constantValue() >> 2);
309          return;
310       }
311 
312       if (ctx.gfx_level <= GFX9) {
313          encoding = (0b110000 << 26);
314          assert(!smem.dlc); /* Device-level coherent is not supported on GFX9 and lower */
315          encoding |= smem.nv ? 1 << 15 : 0;
316       } else {
317          encoding = (0b111101 << 26);
318          assert(!smem.nv); /* Non-volatile is not supported on GFX10 */
319          encoding |= smem.dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 14) : 0;
320       }
321 
322       encoding |= opcode << 18;
323       encoding |= smem.glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
324 
325       if (ctx.gfx_level <= GFX9) {
326          if (instr->operands.size() >= 2)
327             encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
328       }
329       if (ctx.gfx_level == GFX9) {
330          encoding |= soe ? 1 << 14 : 0;
331       }
332 
333       if (is_load || instr->operands.size() >= 3) { /* SDATA */
334          encoding |= (is_load ? reg(ctx, instr->definitions[0]) : reg(ctx, instr->operands[2]))
335                      << 6;
336       }
337       if (instr->operands.size() >= 1) { /* SBASE */
338          encoding |= reg(ctx, instr->operands[0]) >> 1;
339       }
340 
341       out.push_back(encoding);
342       encoding = 0;
343 
344       int32_t offset = 0;
345       uint32_t soffset =
346          ctx.gfx_level >= GFX10
347             ? reg(ctx, sgpr_null) /* On GFX10 this is disabled by specifying SGPR_NULL */
348             : 0;                  /* On GFX9, it is disabled by the SOE bit (and it's not present on
349                                      GFX8 and below) */
350       if (instr->operands.size() >= 2) {
351          const Operand& op_off1 = instr->operands[1];
352          if (ctx.gfx_level <= GFX9) {
353             offset = op_off1.isConstant() ? op_off1.constantValue() : reg(ctx, op_off1);
354          } else {
355             /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an
356              * SGPR */
357             if (op_off1.isConstant()) {
358                offset = op_off1.constantValue();
359             } else {
360                soffset = reg(ctx, op_off1);
361                assert(!soe); /* There is no place to put the other SGPR offset, if any */
362             }
363          }
364 
365          if (soe) {
366             const Operand& op_off2 = instr->operands.back();
367             assert(ctx.gfx_level >= GFX9); /* GFX8 and below don't support specifying a constant
368                                                and an SGPR at the same time */
369             assert(!op_off2.isConstant());
370             soffset = reg(ctx, op_off2);
371          }
372       }
373       encoding |= offset;
374       encoding |= soffset << 25;
375 
376       out.push_back(encoding);
377       return;
378    }
379    case Format::VOP2: {
380       VALU_instruction& valu = instr->valu();
381       uint32_t encoding = 0;
382       encoding |= opcode << 25;
383       encoding |= reg(ctx, instr->definitions[0], 8) << 17;
384       encoding |= (valu.opsel[3] ? 128 : 0) << 17;
385       encoding |= reg(ctx, instr->operands[1], 8) << 9;
386       encoding |= (valu.opsel[1] ? 128 : 0) << 9;
387       encoding |= reg(ctx, instr->operands[0]);
388       encoding |= valu.opsel[0] ? 128 : 0;
389       out.push_back(encoding);
390       break;
391    }
392    case Format::VOP1: {
393       VALU_instruction& valu = instr->valu();
394       uint32_t encoding = (0b0111111 << 25);
395       if (!instr->definitions.empty()) {
396          encoding |= reg(ctx, instr->definitions[0], 8) << 17;
397          encoding |= (valu.opsel[3] ? 128 : 0) << 17;
398       }
399       encoding |= opcode << 9;
400       if (!instr->operands.empty()) {
401          encoding |= reg(ctx, instr->operands[0]);
402          encoding |= valu.opsel[0] ? 128 : 0;
403       }
404       out.push_back(encoding);
405       break;
406    }
407    case Format::VOPC: {
408       VALU_instruction& valu = instr->valu();
409       uint32_t encoding = (0b0111110 << 25);
410       encoding |= opcode << 17;
411       encoding |= reg(ctx, instr->operands[1], 8) << 9;
412       encoding |= (valu.opsel[1] ? 128 : 0) << 9;
413       encoding |= reg(ctx, instr->operands[0]);
414       encoding |= valu.opsel[0] ? 128 : 0;
415       out.push_back(encoding);
416       break;
417    }
418    case Format::VINTRP: {
419       VINTRP_instruction& interp = instr->vintrp();
420       uint32_t encoding = 0;
421 
422       if (instr->opcode == aco_opcode::v_interp_p1ll_f16 ||
423           instr->opcode == aco_opcode::v_interp_p1lv_f16 ||
424           instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
425           instr->opcode == aco_opcode::v_interp_p2_f16) {
426          if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
427             encoding = (0b110100 << 26);
428          } else if (ctx.gfx_level >= GFX10) {
429             encoding = (0b110101 << 26);
430          } else {
431             unreachable("Unknown gfx_level.");
432          }
433 
434          encoding |= opcode << 16;
435          encoding |= reg(ctx, instr->definitions[0], 8);
436          out.push_back(encoding);
437 
438          encoding = 0;
439          encoding |= interp.attribute;
440          encoding |= interp.component << 6;
441          encoding |= reg(ctx, instr->operands[0]) << 9;
442          if (instr->opcode == aco_opcode::v_interp_p2_f16 ||
443              instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
444              instr->opcode == aco_opcode::v_interp_p1lv_f16) {
445             encoding |= reg(ctx, instr->operands[2]) << 18;
446          }
447          out.push_back(encoding);
448       } else {
449          if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
450             encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
451          } else {
452             encoding = (0b110010 << 26);
453          }
454 
455          assert(encoding);
456          encoding |= reg(ctx, instr->definitions[0], 8) << 18;
457          encoding |= opcode << 16;
458          encoding |= interp.attribute << 10;
459          encoding |= interp.component << 8;
460          if (instr->opcode == aco_opcode::v_interp_mov_f32)
461             encoding |= (0x3 & instr->operands[0].constantValue());
462          else
463             encoding |= reg(ctx, instr->operands[0], 8);
464          out.push_back(encoding);
465       }
466       break;
467    }
468    case Format::VINTERP_INREG: {
469       VINTERP_inreg_instruction& interp = instr->vinterp_inreg();
470       uint32_t encoding = (0b11001101 << 24);
471       encoding |= reg(ctx, instr->definitions[0], 8);
472       encoding |= (uint32_t)interp.wait_exp << 8;
473       encoding |= (uint32_t)interp.opsel << 11;
474       encoding |= (uint32_t)interp.clamp << 15;
475       encoding |= opcode << 16;
476       out.push_back(encoding);
477 
478       encoding = 0;
479       for (unsigned i = 0; i < instr->operands.size(); i++)
480          encoding |= reg(ctx, instr->operands[i]) << (i * 9);
481       for (unsigned i = 0; i < 3; i++)
482          encoding |= interp.neg[i] << (29 + i);
483       out.push_back(encoding);
484       break;
485    }
486    case Format::VOPD: {
487       VOPD_instruction& vopd = instr->vopd();
488       uint32_t encoding = (0b110010 << 26);
489       encoding |= reg(ctx, instr->operands[0]);
490       if (instr->opcode != aco_opcode::v_dual_mov_b32)
491          encoding |= reg(ctx, instr->operands[1], 8) << 9;
492       encoding |= (uint32_t)ctx.opcode[(int)vopd.opy] << 17;
493       encoding |= opcode << 22;
494       out.push_back(encoding);
495 
496       unsigned opy_start = get_vopd_opy_start(instr);
497 
498       encoding = reg(ctx, instr->operands[opy_start]);
499       if (vopd.opy != aco_opcode::v_dual_mov_b32)
500          encoding |= reg(ctx, instr->operands[opy_start + 1], 8) << 9;
501       encoding |= (reg(ctx, instr->definitions[1], 8) >> 1) << 17;
502       encoding |= reg(ctx, instr->definitions[0], 8) << 24;
503       out.push_back(encoding);
504       break;
505    }
506    case Format::DS: {
507       DS_instruction& ds = instr->ds();
508       uint32_t encoding = (0b110110 << 26);
509       if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
510          encoding |= opcode << 17;
511          encoding |= (ds.gds ? 1 : 0) << 16;
512       } else {
513          encoding |= opcode << 18;
514          encoding |= (ds.gds ? 1 : 0) << 17;
515       }
516       encoding |= ((0xFF & ds.offset1) << 8);
517       encoding |= (0xFFFF & ds.offset0);
518       out.push_back(encoding);
519       encoding = 0;
520       if (!instr->definitions.empty())
521          encoding |= reg(ctx, instr->definitions[0], 8) << 24;
522       if (instr->operands.size() >= 3 && instr->operands[2].physReg() != m0)
523          encoding |= reg(ctx, instr->operands[2], 8) << 16;
524       if (instr->operands.size() >= 2 && instr->operands[1].physReg() != m0)
525          encoding |= reg(ctx, instr->operands[1], 8) << 8;
526       if (!instr->operands[0].isUndefined())
527          encoding |= reg(ctx, instr->operands[0], 8);
528       out.push_back(encoding);
529       break;
530    }
531    case Format::LDSDIR: {
532       LDSDIR_instruction& dir = instr->ldsdir();
533       uint32_t encoding = (0b11001110 << 24);
534       encoding |= opcode << 20;
535       encoding |= (uint32_t)dir.wait_vdst << 16;
536       encoding |= (uint32_t)dir.attr << 10;
537       encoding |= (uint32_t)dir.attr_chan << 8;
538       encoding |= reg(ctx, instr->definitions[0], 8);
539       out.push_back(encoding);
540       break;
541    }
542    case Format::MUBUF: {
543       MUBUF_instruction& mubuf = instr->mubuf();
544       uint32_t encoding = (0b111000 << 26);
545       if (ctx.gfx_level >= GFX11 && mubuf.lds) /* GFX11 has separate opcodes for LDS loads */
546          opcode = opcode == 0 ? 0x32 : (opcode + 0x1d);
547       else
548          encoding |= (mubuf.lds ? 1 : 0) << 16;
549       encoding |= opcode << 18;
550       encoding |= (mubuf.glc ? 1 : 0) << 14;
551       if (ctx.gfx_level <= GFX10_3)
552          encoding |= (mubuf.idxen ? 1 : 0) << 13;
553       assert(!mubuf.addr64 || ctx.gfx_level <= GFX7);
554       if (ctx.gfx_level == GFX6 || ctx.gfx_level == GFX7)
555          encoding |= (mubuf.addr64 ? 1 : 0) << 15;
556       if (ctx.gfx_level <= GFX10_3)
557          encoding |= (mubuf.offen ? 1 : 0) << 12;
558       if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
559          assert(!mubuf.dlc); /* Device-level coherent is not supported on GFX9 and lower */
560          encoding |= (mubuf.slc ? 1 : 0) << 17;
561       } else if (ctx.gfx_level >= GFX11) {
562          encoding |= (mubuf.slc ? 1 : 0) << 12;
563          encoding |= (mubuf.dlc ? 1 : 0) << 13;
564       } else if (ctx.gfx_level >= GFX10) {
565          encoding |= (mubuf.dlc ? 1 : 0) << 15;
566       }
567       encoding |= 0x0FFF & mubuf.offset;
568       out.push_back(encoding);
569       encoding = 0;
570       if (ctx.gfx_level <= GFX7 || (ctx.gfx_level >= GFX10 && ctx.gfx_level <= GFX10_3)) {
571          encoding |= (mubuf.slc ? 1 : 0) << 22;
572       }
573       encoding |= reg(ctx, instr->operands[2]) << 24;
574       if (ctx.gfx_level >= GFX11) {
575          encoding |= (mubuf.tfe ? 1 : 0) << 21;
576          encoding |= (mubuf.offen ? 1 : 0) << 22;
577          encoding |= (mubuf.idxen ? 1 : 0) << 23;
578       } else {
579          encoding |= (mubuf.tfe ? 1 : 0) << 23;
580       }
581       encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
582       if (instr->operands.size() > 3 && !mubuf.lds)
583          encoding |= reg(ctx, instr->operands[3], 8) << 8;
584       else if (!mubuf.lds)
585          encoding |= reg(ctx, instr->definitions[0], 8) << 8;
586       encoding |= reg(ctx, instr->operands[1], 8);
587       out.push_back(encoding);
588       break;
589    }
590    case Format::MTBUF: {
591       MTBUF_instruction& mtbuf = instr->mtbuf();
592 
593       uint32_t img_format = ac_get_tbuffer_format(ctx.gfx_level, mtbuf.dfmt, mtbuf.nfmt);
594       uint32_t encoding = (0b111010 << 26);
595       assert(img_format <= 0x7F);
596       assert(!mtbuf.dlc || ctx.gfx_level >= GFX10);
597       if (ctx.gfx_level >= GFX11) {
598          encoding |= (mtbuf.slc ? 1 : 0) << 12;
599          encoding |= (mtbuf.dlc ? 1 : 0) << 13;
600       } else {
601          /* DLC bit replaces one bit of the OPCODE on GFX10 */
602          encoding |= (mtbuf.dlc ? 1 : 0) << 15;
603       }
604       if (ctx.gfx_level <= GFX10_3) {
605          encoding |= (mtbuf.idxen ? 1 : 0) << 13;
606          encoding |= (mtbuf.offen ? 1 : 0) << 12;
607       }
608       encoding |= (mtbuf.glc ? 1 : 0) << 14;
609       encoding |= 0x0FFF & mtbuf.offset;
610       encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
611 
612       if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9 || ctx.gfx_level >= GFX11) {
613          encoding |= opcode << 15;
614       } else {
615          encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
616       }
617 
618       out.push_back(encoding);
619       encoding = 0;
620 
621       encoding |= reg(ctx, instr->operands[2]) << 24;
622       if (ctx.gfx_level >= GFX11) {
623          encoding |= (mtbuf.tfe ? 1 : 0) << 21;
624          encoding |= (mtbuf.offen ? 1 : 0) << 22;
625          encoding |= (mtbuf.idxen ? 1 : 0) << 23;
626       } else {
627          encoding |= (mtbuf.tfe ? 1 : 0) << 23;
628          encoding |= (mtbuf.slc ? 1 : 0) << 22;
629       }
630       encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
631       if (instr->operands.size() > 3)
632          encoding |= reg(ctx, instr->operands[3], 8) << 8;
633       else
634          encoding |= reg(ctx, instr->definitions[0], 8) << 8;
635       encoding |= reg(ctx, instr->operands[1], 8);
636 
637       if (ctx.gfx_level >= GFX10) {
638          encoding |= (((opcode & 0x08) >> 3) << 21); /* MSB of 4-bit OPCODE */
639       }
640 
641       out.push_back(encoding);
642       break;
643    }
644    case Format::MIMG: {
645       unsigned nsa_dwords = get_mimg_nsa_dwords(instr);
646       assert(!nsa_dwords || ctx.gfx_level >= GFX10);
647 
648       MIMG_instruction& mimg = instr->mimg();
649       uint32_t encoding = (0b111100 << 26);
650       if (ctx.gfx_level >= GFX11) { /* GFX11: rearranges most fields */
651          assert(nsa_dwords <= 1);
652          encoding |= nsa_dwords;
653          encoding |= mimg.dim << 2;
654          encoding |= mimg.unrm ? 1 << 7 : 0;
655          encoding |= (0xF & mimg.dmask) << 8;
656          encoding |= mimg.slc ? 1 << 12 : 0;
657          encoding |= mimg.dlc ? 1 << 13 : 0;
658          encoding |= mimg.glc ? 1 << 14 : 0;
659          encoding |= mimg.r128 ? 1 << 15 : 0;
660          encoding |= mimg.a16 ? 1 << 16 : 0;
661          encoding |= mimg.d16 ? 1 << 17 : 0;
662          encoding |= (opcode & 0xFF) << 18;
663       } else {
664          encoding |= mimg.slc ? 1 << 25 : 0;
665          encoding |= (opcode & 0x7f) << 18;
666          encoding |= (opcode >> 7) & 1;
667          encoding |= mimg.lwe ? 1 << 17 : 0;
668          encoding |= mimg.tfe ? 1 << 16 : 0;
669          encoding |= mimg.glc ? 1 << 13 : 0;
670          encoding |= mimg.unrm ? 1 << 12 : 0;
671          if (ctx.gfx_level <= GFX9) {
672             assert(!mimg.dlc); /* Device-level coherent is not supported on GFX9 and lower */
673             assert(!mimg.r128);
674             encoding |= mimg.a16 ? 1 << 15 : 0;
675             encoding |= mimg.da ? 1 << 14 : 0;
676          } else {
677             encoding |= mimg.r128
678                            ? 1 << 15
679                            : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
680             encoding |= nsa_dwords << 1;
681             encoding |= mimg.dim << 3; /* GFX10: dimensionality instead of declare array */
682             encoding |= mimg.dlc ? 1 << 7 : 0;
683          }
684          encoding |= (0xF & mimg.dmask) << 8;
685       }
686       out.push_back(encoding);
687 
688       encoding = reg(ctx, instr->operands[3], 8); /* VADDR */
689       if (!instr->definitions.empty()) {
690          encoding |= reg(ctx, instr->definitions[0], 8) << 8; /* VDATA */
691       } else if (!instr->operands[2].isUndefined()) {
692          encoding |= reg(ctx, instr->operands[2], 8) << 8; /* VDATA */
693       }
694       encoding |= (0x1F & (reg(ctx, instr->operands[0]) >> 2)) << 16; /* T# (resource) */
695 
696       assert(!mimg.d16 || ctx.gfx_level >= GFX9);
697       if (ctx.gfx_level >= GFX11) {
698          if (!instr->operands[1].isUndefined())
699             encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 26; /* sampler */
700 
701          encoding |= mimg.tfe ? 1 << 21 : 0;
702          encoding |= mimg.lwe ? 1 << 22 : 0;
703       } else {
704          if (!instr->operands[1].isUndefined())
705             encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 21; /* sampler */
706 
707          encoding |= mimg.d16 ? 1 << 31 : 0;
708          if (ctx.gfx_level >= GFX10) {
709             /* GFX10: A16 still exists, but is in a different place */
710             encoding |= mimg.a16 ? 1 << 30 : 0;
711          }
712       }
713 
714       out.push_back(encoding);
715 
716       if (nsa_dwords) {
717          out.resize(out.size() + nsa_dwords);
718          std::vector<uint32_t>::iterator nsa = std::prev(out.end(), nsa_dwords);
719          for (unsigned i = 0; i < instr->operands.size() - 4u; i++)
720             nsa[i / 4] |= reg(ctx, instr->operands[4 + i], 8) << (i % 4 * 8);
721       }
722       break;
723    }
724    case Format::FLAT:
725    case Format::SCRATCH:
726    case Format::GLOBAL: {
727       FLAT_instruction& flat = instr->flatlike();
728       uint32_t encoding = (0b110111 << 26);
729       encoding |= opcode << 18;
730       if (ctx.gfx_level == GFX9 || ctx.gfx_level >= GFX11) {
731          if (instr->isFlat())
732             assert(flat.offset <= 0xfff);
733          else
734             assert(flat.offset >= -4096 && flat.offset < 4096);
735          encoding |= flat.offset & 0x1fff;
736       } else if (ctx.gfx_level <= GFX8 || instr->isFlat()) {
737          /* GFX10 has a 12-bit immediate OFFSET field,
738           * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
739           */
740          assert(flat.offset == 0);
741       } else {
742          assert(flat.offset >= -2048 && flat.offset <= 2047);
743          encoding |= flat.offset & 0xfff;
744       }
745       if (instr->isScratch())
746          encoding |= 1 << (ctx.gfx_level >= GFX11 ? 16 : 14);
747       else if (instr->isGlobal())
748          encoding |= 2 << (ctx.gfx_level >= GFX11 ? 16 : 14);
749       encoding |= flat.lds ? 1 << 13 : 0;
750       encoding |= flat.glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
751       encoding |= flat.slc ? 1 << (ctx.gfx_level >= GFX11 ? 15 : 17) : 0;
752       if (ctx.gfx_level >= GFX10) {
753          assert(!flat.nv);
754          encoding |= flat.dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 12) : 0;
755       } else {
756          assert(!flat.dlc);
757       }
758       out.push_back(encoding);
759       encoding = reg(ctx, instr->operands[0], 8);
760       if (!instr->definitions.empty())
761          encoding |= reg(ctx, instr->definitions[0], 8) << 24;
762       if (instr->operands.size() >= 3)
763          encoding |= reg(ctx, instr->operands[2], 8) << 8;
764       if (!instr->operands[1].isUndefined()) {
765          assert(ctx.gfx_level >= GFX10 || instr->operands[1].physReg() != 0x7F);
766          assert(instr->format != Format::FLAT);
767          encoding |= reg(ctx, instr->operands[1], 8) << 16;
768       } else if (instr->format != Format::FLAT ||
769                  ctx.gfx_level >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
770          /* For GFX10.3 scratch, 0x7F disables both ADDR and SADDR, unlike sgpr_null, which only
771           * disables SADDR.
772           */
773          if (ctx.gfx_level <= GFX9 ||
774              (instr->format == Format::SCRATCH && instr->operands[0].isUndefined()))
775             encoding |= 0x7F << 16;
776          else
777             encoding |= reg(ctx, sgpr_null) << 16;
778       }
779       if (ctx.gfx_level >= GFX11 && instr->isScratch())
780          encoding |= !instr->operands[0].isUndefined() ? 1 << 23 : 0;
781       else
782          encoding |= flat.nv ? 1 << 23 : 0;
783       out.push_back(encoding);
784       break;
785    }
786    case Format::EXP: {
787       Export_instruction& exp = instr->exp();
788       uint32_t encoding;
789       if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
790          encoding = (0b110001 << 26);
791       } else {
792          encoding = (0b111110 << 26);
793       }
794 
795       if (ctx.gfx_level >= GFX11) {
796          encoding |= exp.row_en ? 0b1 << 13 : 0;
797       } else {
798          encoding |= exp.valid_mask ? 0b1 << 12 : 0;
799          encoding |= exp.compressed ? 0b1 << 10 : 0;
800       }
801       encoding |= exp.done ? 0b1 << 11 : 0;
802       encoding |= exp.dest << 4;
803       encoding |= exp.enabled_mask;
804       out.push_back(encoding);
805       encoding = reg(ctx, exp.operands[0], 8);
806       encoding |= reg(ctx, exp.operands[1], 8) << 8;
807       encoding |= reg(ctx, exp.operands[2], 8) << 16;
808       encoding |= reg(ctx, exp.operands[3], 8) << 24;
809       out.push_back(encoding);
810       break;
811    }
812    case Format::PSEUDO:
813    case Format::PSEUDO_BARRIER:
814       if (instr->opcode != aco_opcode::p_unit_test)
815          unreachable("Pseudo instructions should be lowered before assembly.");
816       break;
817    default:
818       if (instr->isDPP16()) {
819          assert(ctx.gfx_level >= GFX8);
820          DPP16_instruction& dpp = instr->dpp16();
821 
822          /* first emit the instruction without the DPP operand */
823          Operand dpp_op = instr->operands[0];
824          instr->operands[0] = Operand(PhysReg{250}, v1);
825          instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP16);
826          emit_instruction(ctx, out, instr);
827          uint32_t encoding = (0xF & dpp.row_mask) << 28;
828          encoding |= (0xF & dpp.bank_mask) << 24;
829          encoding |= dpp.abs[1] << 23;
830          encoding |= dpp.neg[1] << 22;
831          encoding |= dpp.abs[0] << 21;
832          encoding |= dpp.neg[0] << 20;
833          encoding |= dpp.fetch_inactive << 18;
834          encoding |= dpp.bound_ctrl << 19;
835          encoding |= dpp.dpp_ctrl << 8;
836          encoding |= reg(ctx, dpp_op, 8);
837          encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
838          out.push_back(encoding);
839          return;
840       } else if (instr->isDPP8()) {
841          assert(ctx.gfx_level >= GFX10);
842          DPP8_instruction& dpp = instr->dpp8();
843 
844          /* first emit the instruction without the DPP operand */
845          Operand dpp_op = instr->operands[0];
846          instr->operands[0] = Operand(PhysReg{233u + dpp.fetch_inactive}, v1);
847          instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP8);
848          emit_instruction(ctx, out, instr);
849          uint32_t encoding = reg(ctx, dpp_op, 8);
850          encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
851          encoding |= dpp.lane_sel << 8;
852          out.push_back(encoding);
853          return;
854       } else if (instr->isVOP3()) {
855          VALU_instruction& vop3 = instr->valu();
856 
857          if (instr->isVOP2()) {
858             opcode = opcode + 0x100;
859          } else if (instr->isVOP1()) {
860             if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9)
861                opcode = opcode + 0x140;
862             else
863                opcode = opcode + 0x180;
864          } else if (instr->isVOPC()) {
865             opcode = opcode + 0x0;
866          } else if (instr->isVINTRP()) {
867             opcode = opcode + 0x270;
868          }
869 
870          uint32_t encoding;
871          if (ctx.gfx_level <= GFX9) {
872             encoding = (0b110100 << 26);
873          } else if (ctx.gfx_level >= GFX10) {
874             encoding = (0b110101 << 26);
875          } else {
876             unreachable("Unknown gfx_level.");
877          }
878 
879          if (ctx.gfx_level <= GFX7) {
880             encoding |= opcode << 17;
881             encoding |= (vop3.clamp ? 1 : 0) << 11;
882          } else {
883             encoding |= opcode << 16;
884             encoding |= (vop3.clamp ? 1 : 0) << 15;
885          }
886          encoding |= vop3.opsel << 11;
887          for (unsigned i = 0; i < 3; i++)
888             encoding |= vop3.abs[i] << (8 + i);
889          /* On GFX9 and older, v_cmpx implicitly writes exec besides writing an SGPR pair.
890           * On GFX10 and newer, v_cmpx always writes just exec.
891           */
892          if (instr->definitions.size() == 2 && instr->isVOPC())
893             assert(ctx.gfx_level <= GFX9 && instr->definitions[1].physReg() == exec);
894          else if (instr->definitions.size() == 2)
895             encoding |= reg(ctx, instr->definitions[1]) << 8;
896          encoding |= reg(ctx, instr->definitions[0], 8);
897          out.push_back(encoding);
898          encoding = 0;
899          if (instr->opcode == aco_opcode::v_interp_mov_f32) {
900             encoding = 0x3 & instr->operands[0].constantValue();
901          } else if (instr->opcode == aco_opcode::v_writelane_b32_e64) {
902             encoding |= reg(ctx, instr->operands[0]) << 0;
903             encoding |= reg(ctx, instr->operands[1]) << 9;
904             /* Encoding src2 works fine with hardware but breaks some disassemblers. */
905          } else {
906             for (unsigned i = 0; i < instr->operands.size(); i++)
907                encoding |= reg(ctx, instr->operands[i]) << (i * 9);
908          }
909          encoding |= vop3.omod << 27;
910          for (unsigned i = 0; i < 3; i++)
911             encoding |= vop3.neg[i] << (29 + i);
912          out.push_back(encoding);
913 
914       } else if (instr->isVOP3P()) {
915          VALU_instruction& vop3 = instr->valu();
916 
917          uint32_t encoding;
918          if (ctx.gfx_level == GFX9) {
919             encoding = (0b110100111 << 23);
920          } else if (ctx.gfx_level >= GFX10) {
921             encoding = (0b110011 << 26);
922          } else {
923             unreachable("Unknown gfx_level.");
924          }
925 
926          encoding |= opcode << 16;
927          encoding |= (vop3.clamp ? 1 : 0) << 15;
928          encoding |= vop3.opsel_lo << 11;
929          encoding |= ((vop3.opsel_hi & 0x4) ? 1 : 0) << 14;
930          for (unsigned i = 0; i < 3; i++)
931             encoding |= vop3.neg_hi[i] << (8 + i);
932          encoding |= reg(ctx, instr->definitions[0], 8);
933          out.push_back(encoding);
934          encoding = 0;
935          for (unsigned i = 0; i < instr->operands.size(); i++)
936             encoding |= reg(ctx, instr->operands[i]) << (i * 9);
937          encoding |= (vop3.opsel_hi & 0x3) << 27;
938          for (unsigned i = 0; i < 3; i++)
939             encoding |= vop3.neg_lo[i] << (29 + i);
940          out.push_back(encoding);
941       } else if (instr->isSDWA()) {
942          assert(ctx.gfx_level >= GFX8 && ctx.gfx_level < GFX11);
943          SDWA_instruction& sdwa = instr->sdwa();
944 
945          /* first emit the instruction without the SDWA operand */
946          Operand sdwa_op = instr->operands[0];
947          instr->operands[0] = Operand(PhysReg{249}, v1);
948          instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::SDWA);
949          emit_instruction(ctx, out, instr);
950 
951          uint32_t encoding = 0;
952 
953          if (instr->isVOPC()) {
954             if (instr->definitions[0].physReg() !=
955                 (ctx.gfx_level >= GFX10 && is_cmpx(instr->opcode) ? exec : vcc)) {
956                encoding |= reg(ctx, instr->definitions[0]) << 8;
957                encoding |= 1 << 15;
958             }
959             encoding |= (sdwa.clamp ? 1 : 0) << 13;
960          } else {
961             encoding |= sdwa.dst_sel.to_sdwa_sel(instr->definitions[0].physReg().byte()) << 8;
962             uint32_t dst_u = sdwa.dst_sel.sign_extend() ? 1 : 0;
963             if (instr->definitions[0].bytes() < 4) /* dst_preserve */
964                dst_u = 2;
965             encoding |= dst_u << 11;
966             encoding |= (sdwa.clamp ? 1 : 0) << 13;
967             encoding |= sdwa.omod << 14;
968          }
969 
970          encoding |= sdwa.sel[0].to_sdwa_sel(sdwa_op.physReg().byte()) << 16;
971          encoding |= sdwa.sel[0].sign_extend() ? 1 << 19 : 0;
972          encoding |= sdwa.abs[0] << 21;
973          encoding |= sdwa.neg[0] << 20;
974 
975          if (instr->operands.size() >= 2) {
976             encoding |= sdwa.sel[1].to_sdwa_sel(instr->operands[1].physReg().byte()) << 24;
977             encoding |= sdwa.sel[1].sign_extend() ? 1 << 27 : 0;
978             encoding |= sdwa.abs[1] << 29;
979             encoding |= sdwa.neg[1] << 28;
980          }
981 
982          encoding |= reg(ctx, sdwa_op, 8);
983          encoding |= (sdwa_op.physReg() < 256) << 23;
984          if (instr->operands.size() >= 2)
985             encoding |= (instr->operands[1].physReg() < 256) << 31;
986          out.push_back(encoding);
987       } else {
988          unreachable("unimplemented instruction format");
989       }
990       break;
991    }
992 
993    /* append literal dword */
994    for (const Operand& op : instr->operands) {
995       if (op.isLiteral()) {
996          out.push_back(op.constantValue());
997          break;
998       }
999    }
1000 }
1001 
1002 void
emit_block(asm_context & ctx,std::vector<uint32_t> & out,Block & block)1003 emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
1004 {
1005    for (aco_ptr<Instruction>& instr : block.instructions) {
1006 #if 0
1007       int start_idx = out.size();
1008       std::cerr << "Encoding:\t" << std::endl;
1009       aco_print_instr(&*instr, stderr);
1010       std::cerr << std::endl;
1011 #endif
1012       emit_instruction(ctx, out, instr.get());
1013 #if 0
1014       for (int i = start_idx; i < out.size(); i++)
1015          std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
1016 #endif
1017    }
1018 }
1019 
1020 void
fix_exports(asm_context & ctx,std::vector<uint32_t> & out,Program * program)1021 fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
1022 {
1023    bool exported = false;
1024    for (Block& block : program->blocks) {
1025       if (!(block.kind & block_kind_export_end))
1026          continue;
1027       std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
1028       while (it != block.instructions.rend()) {
1029          if ((*it)->isEXP()) {
1030             Export_instruction& exp = (*it)->exp();
1031             if (program->stage.hw == AC_HW_VERTEX_SHADER ||
1032                 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
1033                if (exp.dest >= V_008DFC_SQ_EXP_POS && exp.dest <= (V_008DFC_SQ_EXP_POS + 3)) {
1034                   exp.done = true;
1035                   exported = true;
1036                   break;
1037                }
1038             } else {
1039                exp.done = true;
1040                exp.valid_mask = true;
1041                exported = true;
1042                break;
1043             }
1044          } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec) {
1045             break;
1046          }
1047          ++it;
1048       }
1049    }
1050 
1051    /* GFX10+ FS may not export anything if no discard is used. */
1052    bool may_skip_export = program->stage.hw == AC_HW_PIXEL_SHADER && program->gfx_level >= GFX10;
1053 
1054    if (!exported && !may_skip_export) {
1055       /* Abort in order to avoid a GPU hang. */
1056       bool is_vertex_or_ngg = (program->stage.hw == AC_HW_VERTEX_SHADER ||
1057                                program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER);
1058       aco_err(program,
1059               "Missing export in %s shader:", is_vertex_or_ngg ? "vertex or NGG" : "fragment");
1060       aco_print_program(program, stderr);
1061       abort();
1062    }
1063 }
1064 
1065 static void
insert_code(asm_context & ctx,std::vector<uint32_t> & out,unsigned insert_before,unsigned insert_count,const uint32_t * insert_data)1066 insert_code(asm_context& ctx, std::vector<uint32_t>& out, unsigned insert_before,
1067             unsigned insert_count, const uint32_t* insert_data)
1068 {
1069    out.insert(out.begin() + insert_before, insert_data, insert_data + insert_count);
1070 
1071    /* Update the offset of each affected block */
1072    for (Block& block : ctx.program->blocks) {
1073       if (block.offset >= insert_before)
1074          block.offset += insert_count;
1075    }
1076 
1077    /* Find first branch after the inserted code */
1078    auto branch_it = std::find_if(ctx.branches.begin(), ctx.branches.end(),
1079                                  [insert_before](const auto& branch) -> bool
1080                                  { return (unsigned)branch.first >= insert_before; });
1081 
1082    /* Update the locations of branches */
1083    for (; branch_it != ctx.branches.end(); ++branch_it)
1084       branch_it->first += insert_count;
1085 
1086    /* Update the locations of p_constaddr instructions */
1087    for (auto& constaddr : ctx.constaddrs) {
1088       constaddr_info& info = constaddr.second;
1089       if (info.getpc_end >= insert_before)
1090          info.getpc_end += insert_count;
1091       if (info.add_literal >= insert_before)
1092          info.add_literal += insert_count;
1093    }
1094    for (auto& constaddr : ctx.resumeaddrs) {
1095       constaddr_info& info = constaddr.second;
1096       if (info.getpc_end >= insert_before)
1097          info.getpc_end += insert_count;
1098       if (info.add_literal >= insert_before)
1099          info.add_literal += insert_count;
1100    }
1101 
1102    if (ctx.symbols) {
1103       for (auto& symbol : *ctx.symbols) {
1104          if (symbol.offset >= insert_before)
1105             symbol.offset += insert_count;
1106       }
1107    }
1108 }
1109 
1110 static void
fix_branches_gfx10(asm_context & ctx,std::vector<uint32_t> & out)1111 fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
1112 {
1113    /* Branches with an offset of 0x3f are buggy on GFX10,
1114     * we workaround by inserting NOPs if needed.
1115     */
1116    bool gfx10_3f_bug = false;
1117 
1118    do {
1119       auto buggy_branch_it = std::find_if(
1120          ctx.branches.begin(), ctx.branches.end(),
1121          [&ctx](const auto& branch) -> bool {
1122             return ((int)ctx.program->blocks[branch.second->block].offset - branch.first - 1) ==
1123                    0x3f;
1124          });
1125 
1126       gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
1127 
1128       if (gfx10_3f_bug) {
1129          /* Insert an s_nop after the branch */
1130          constexpr uint32_t s_nop_0 = 0xbf800000u;
1131          insert_code(ctx, out, buggy_branch_it->first + 1, 1, &s_nop_0);
1132       }
1133    } while (gfx10_3f_bug);
1134 }
1135 
1136 void
emit_long_jump(asm_context & ctx,SOPP_instruction * branch,bool backwards,std::vector<uint32_t> & out)1137 emit_long_jump(asm_context& ctx, SOPP_instruction* branch, bool backwards,
1138                std::vector<uint32_t>& out)
1139 {
1140    Builder bld(ctx.program);
1141 
1142    Definition def;
1143    if (branch->definitions.empty()) {
1144       assert(ctx.program->blocks[branch->block].kind & block_kind_discard_early_exit);
1145       def = Definition(PhysReg(0), s2); /* The discard early exit block doesn't use SGPRs. */
1146    } else {
1147       def = branch->definitions[0];
1148    }
1149 
1150    Definition def_tmp_lo(def.physReg(), s1);
1151    Operand op_tmp_lo(def.physReg(), s1);
1152    Definition def_tmp_hi(def.physReg().advance(4), s1);
1153    Operand op_tmp_hi(def.physReg().advance(4), s1);
1154 
1155    aco_ptr<Instruction> instr;
1156 
1157    if (branch->opcode != aco_opcode::s_branch) {
1158       /* for conditional branches, skip the long jump if the condition is false */
1159       aco_opcode inv;
1160       switch (branch->opcode) {
1161       case aco_opcode::s_cbranch_scc0: inv = aco_opcode::s_cbranch_scc1; break;
1162       case aco_opcode::s_cbranch_scc1: inv = aco_opcode::s_cbranch_scc0; break;
1163       case aco_opcode::s_cbranch_vccz: inv = aco_opcode::s_cbranch_vccnz; break;
1164       case aco_opcode::s_cbranch_vccnz: inv = aco_opcode::s_cbranch_vccz; break;
1165       case aco_opcode::s_cbranch_execz: inv = aco_opcode::s_cbranch_execnz; break;
1166       case aco_opcode::s_cbranch_execnz: inv = aco_opcode::s_cbranch_execz; break;
1167       default: unreachable("Unhandled long jump.");
1168       }
1169       instr.reset(bld.sopp(inv, -1, 6));
1170       emit_instruction(ctx, out, instr.get());
1171    }
1172 
1173    /* create the new PC and stash SCC in the LSB */
1174    instr.reset(bld.sop1(aco_opcode::s_getpc_b64, def).instr);
1175    emit_instruction(ctx, out, instr.get());
1176 
1177    instr.reset(
1178       bld.sop2(aco_opcode::s_addc_u32, def_tmp_lo, op_tmp_lo, Operand::literal32(0)).instr);
1179    emit_instruction(ctx, out, instr.get());
1180    branch->pass_flags = out.size();
1181 
1182    /* s_addc_u32 for high 32 bits not needed because the program is in a 32-bit VA range */
1183 
1184    /* restore SCC and clear the LSB of the new PC */
1185    instr.reset(bld.sopc(aco_opcode::s_bitcmp1_b32, def_tmp_lo, op_tmp_lo, Operand::zero()).instr);
1186    emit_instruction(ctx, out, instr.get());
1187    instr.reset(bld.sop1(aco_opcode::s_bitset0_b32, def_tmp_lo, Operand::zero()).instr);
1188    emit_instruction(ctx, out, instr.get());
1189 
1190    /* create the s_setpc_b64 to jump */
1191    instr.reset(bld.sop1(aco_opcode::s_setpc_b64, Operand(def.physReg(), s2)).instr);
1192    emit_instruction(ctx, out, instr.get());
1193 }
1194 
1195 void
fix_branches(asm_context & ctx,std::vector<uint32_t> & out)1196 fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
1197 {
1198    bool repeat = false;
1199    do {
1200       repeat = false;
1201 
1202       if (ctx.gfx_level == GFX10)
1203          fix_branches_gfx10(ctx, out);
1204 
1205       for (std::pair<int, SOPP_instruction*>& branch : ctx.branches) {
1206          int offset = (int)ctx.program->blocks[branch.second->block].offset - branch.first - 1;
1207          if ((offset < INT16_MIN || offset > INT16_MAX) && !branch.second->pass_flags) {
1208             std::vector<uint32_t> long_jump;
1209             bool backwards =
1210                ctx.program->blocks[branch.second->block].offset < (unsigned)branch.first;
1211             emit_long_jump(ctx, branch.second, backwards, long_jump);
1212 
1213             out[branch.first] = long_jump[0];
1214             insert_code(ctx, out, branch.first + 1, long_jump.size() - 1, long_jump.data() + 1);
1215 
1216             repeat = true;
1217             break;
1218          }
1219 
1220          if (branch.second->pass_flags) {
1221             int after_getpc = branch.first + branch.second->pass_flags - 2;
1222             offset = (int)ctx.program->blocks[branch.second->block].offset - after_getpc;
1223             out[branch.first + branch.second->pass_flags - 1] = offset * 4;
1224          } else {
1225             out[branch.first] &= 0xffff0000u;
1226             out[branch.first] |= (uint16_t)offset;
1227          }
1228       }
1229    } while (repeat);
1230 }
1231 
1232 void
fix_constaddrs(asm_context & ctx,std::vector<uint32_t> & out)1233 fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
1234 {
1235    for (auto& constaddr : ctx.constaddrs) {
1236       constaddr_info& info = constaddr.second;
1237       out[info.add_literal] += (out.size() - info.getpc_end) * 4u;
1238 
1239       if (ctx.symbols) {
1240          struct aco_symbol sym;
1241          sym.id = aco_symbol_const_data_addr;
1242          sym.offset = info.add_literal;
1243          ctx.symbols->push_back(sym);
1244       }
1245    }
1246    for (auto& addr : ctx.resumeaddrs) {
1247       constaddr_info& info = addr.second;
1248       const Block& block = ctx.program->blocks[out[info.add_literal]];
1249       assert(block.kind & block_kind_resume);
1250       out[info.add_literal] = (block.offset - info.getpc_end) * 4u;
1251    }
1252 }
1253 
1254 void
align_block(asm_context & ctx,std::vector<uint32_t> & code,Block & block)1255 align_block(asm_context& ctx, std::vector<uint32_t>& code, Block& block)
1256 {
1257    /* Blocks with block_kind_loop_exit might be eliminated after jump threading, so we instead find
1258     * loop exits using loop_nest_depth.
1259     */
1260    if (ctx.loop_header && !block.linear_preds.empty() &&
1261        block.loop_nest_depth < ctx.loop_header->loop_nest_depth) {
1262       Block* loop_header = ctx.loop_header;
1263       ctx.loop_header = NULL;
1264       std::vector<uint32_t> nops;
1265 
1266       const unsigned loop_num_cl = DIV_ROUND_UP(block.offset - loop_header->offset, 16);
1267 
1268       /* On GFX10.3+, change the prefetch mode if the loop fits into 2 or 3 cache lines.
1269        * Don't use the s_inst_prefetch instruction on GFX10 as it might cause hangs.
1270        */
1271       const bool change_prefetch =
1272          ctx.program->gfx_level >= GFX10_3 && loop_num_cl > 1 && loop_num_cl <= 3;
1273 
1274       if (change_prefetch) {
1275          Builder bld(ctx.program);
1276          int16_t prefetch_mode = loop_num_cl == 3 ? 0x1 : 0x2;
1277          aco_ptr<Instruction> instr(bld.sopp(aco_opcode::s_inst_prefetch, -1, prefetch_mode));
1278          emit_instruction(ctx, nops, instr.get());
1279          insert_code(ctx, code, loop_header->offset, nops.size(), nops.data());
1280 
1281          /* Change prefetch mode back to default (0x3). */
1282          instr->sopp().imm = 0x3;
1283          emit_instruction(ctx, code, instr.get());
1284       }
1285 
1286       const unsigned loop_start_cl = loop_header->offset >> 4;
1287       const unsigned loop_end_cl = (block.offset - 1) >> 4;
1288 
1289       /* Align the loop if it fits into the fetched cache lines or if we can
1290        * reduce the number of cache lines with less than 8 NOPs.
1291        */
1292       const bool align_loop = loop_end_cl - loop_start_cl >= loop_num_cl &&
1293                               (loop_num_cl == 1 || change_prefetch || loop_header->offset % 16 > 8);
1294 
1295       if (align_loop) {
1296          nops.clear();
1297          nops.resize(16 - (loop_header->offset % 16), 0xbf800000u);
1298          insert_code(ctx, code, loop_header->offset, nops.size(), nops.data());
1299       }
1300    }
1301 
1302    if (block.kind & block_kind_loop_header) {
1303       /* In case of nested loops, only handle the inner-most loops in order
1304        * to not break the alignment of inner loops by handling outer loops.
1305        * Also ignore loops without back-edge.
1306        */
1307       ctx.loop_header = block.linear_preds.size() > 1 ? &block : NULL;
1308    }
1309 
1310    /* align resume shaders with cache line */
1311    if (block.kind & block_kind_resume) {
1312       size_t cache_aligned = align(code.size(), 16);
1313       code.resize(cache_aligned, 0xbf800000u); /* s_nop 0 */
1314       block.offset = code.size();
1315    }
1316 }
1317 
1318 unsigned
emit_program(Program * program,std::vector<uint32_t> & code,std::vector<struct aco_symbol> * symbols,bool append_endpgm)1319 emit_program(Program* program, std::vector<uint32_t>& code, std::vector<struct aco_symbol>* symbols,
1320              bool append_endpgm)
1321 {
1322    asm_context ctx(program, symbols);
1323 
1324    bool is_separately_compiled_ngg_vs_or_es =
1325       (program->stage.sw == SWStage::VS || program->stage.sw == SWStage::TES) &&
1326       program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER &&
1327       program->info.merged_shader_compiled_separately;
1328 
1329    /* Prolog has no exports. */
1330    if (!program->is_prolog && !program->info.has_epilog && !is_separately_compiled_ngg_vs_or_es &&
1331        (program->stage.hw == AC_HW_VERTEX_SHADER || program->stage.hw == AC_HW_PIXEL_SHADER ||
1332         program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER))
1333       fix_exports(ctx, code, program);
1334 
1335    for (Block& block : program->blocks) {
1336       block.offset = code.size();
1337       align_block(ctx, code, block);
1338       emit_block(ctx, code, block);
1339    }
1340 
1341    fix_branches(ctx, code);
1342 
1343    unsigned exec_size = code.size() * sizeof(uint32_t);
1344 
1345    /* Add end-of-code markers for the UMR disassembler. */
1346    if (append_endpgm)
1347       code.resize(code.size() + 5, 0xbf9f0000u);
1348 
1349    fix_constaddrs(ctx, code);
1350 
1351    while (program->constant_data.size() % 4u)
1352       program->constant_data.push_back(0);
1353    /* Copy constant data */
1354    code.insert(code.end(), (uint32_t*)program->constant_data.data(),
1355                (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
1356 
1357    program->config->scratch_bytes_per_wave =
1358       align(program->config->scratch_bytes_per_wave, program->dev.scratch_alloc_granule);
1359 
1360    return exec_size;
1361 }
1362 
1363 } // namespace aco
1364