1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9
10 #include "common/sid.h"
11
12 #include "util/memstream.h"
13
14 #include "ac_shader_util.h"
15 #include <algorithm>
16 #include <cstdint>
17 #include <map>
18 #include <vector>
19
20 namespace aco {
21
22 struct constaddr_info {
23 unsigned getpc_end;
24 unsigned add_literal;
25 };
26
27 struct branch_info {
28 unsigned pos;
29 unsigned target;
30 };
31
32 struct asm_context {
33 Program* program;
34 enum amd_gfx_level gfx_level;
35 std::vector<branch_info> branches;
36 std::map<unsigned, constaddr_info> constaddrs;
37 std::map<unsigned, constaddr_info> resumeaddrs;
38 std::vector<struct aco_symbol>* symbols;
39 uint32_t loop_header = -1u;
40 uint32_t loop_exit = 0u;
41 const int16_t* opcode;
42 // TODO: keep track of branch instructions referring blocks
43 // and, when emitting the block, correct the offset in instr
asm_contextaco::asm_context44 asm_context(Program* program_, std::vector<struct aco_symbol>* symbols_)
45 : program(program_), gfx_level(program->gfx_level), symbols(symbols_)
46 {
47 if (gfx_level <= GFX7)
48 opcode = &instr_info.opcode_gfx7[0];
49 else if (gfx_level <= GFX9)
50 opcode = &instr_info.opcode_gfx9[0];
51 else if (gfx_level <= GFX10_3)
52 opcode = &instr_info.opcode_gfx10[0];
53 else if (gfx_level <= GFX11_5)
54 opcode = &instr_info.opcode_gfx11[0];
55 else
56 opcode = &instr_info.opcode_gfx12[0];
57 }
58
59 int subvector_begin_pos = -1;
60 };
61
62 unsigned
get_mimg_nsa_dwords(const Instruction * instr)63 get_mimg_nsa_dwords(const Instruction* instr)
64 {
65 unsigned addr_dwords = instr->operands.size() - 3;
66 for (unsigned i = 1; i < addr_dwords; i++) {
67 if (instr->operands[3 + i].physReg() !=
68 instr->operands[3 + (i - 1)].physReg().advance(instr->operands[3 + (i - 1)].bytes()))
69 return DIV_ROUND_UP(addr_dwords - 1, 4);
70 }
71 return 0;
72 }
73
74 unsigned
get_vopd_opy_start(const Instruction * instr)75 get_vopd_opy_start(const Instruction* instr)
76 {
77 switch (instr->opcode) {
78 case aco_opcode::v_dual_fmac_f32:
79 case aco_opcode::v_dual_fmaak_f32:
80 case aco_opcode::v_dual_fmamk_f32:
81 case aco_opcode::v_dual_cndmask_b32:
82 case aco_opcode::v_dual_dot2acc_f32_f16:
83 case aco_opcode::v_dual_dot2acc_f32_bf16: return 3;
84 case aco_opcode::v_dual_mov_b32: return 1;
85 default: return 2;
86 }
87 }
88
89 uint32_t
reg(asm_context & ctx,PhysReg reg)90 reg(asm_context& ctx, PhysReg reg)
91 {
92 if (ctx.gfx_level >= GFX11) {
93 if (reg == m0)
94 return sgpr_null.reg();
95 else if (reg == sgpr_null)
96 return m0.reg();
97 }
98 return reg.reg();
99 }
100
101 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Operand op,unsigned width=32)102 reg(asm_context& ctx, Operand op, unsigned width = 32)
103 {
104 return reg(ctx, op.physReg()) & BITFIELD_MASK(width);
105 }
106
107 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Definition def,unsigned width=32)108 reg(asm_context& ctx, Definition def, unsigned width = 32)
109 {
110 return reg(ctx, def.physReg()) & BITFIELD_MASK(width);
111 }
112
113 bool
needs_vop3_gfx11(asm_context & ctx,Instruction * instr)114 needs_vop3_gfx11(asm_context& ctx, Instruction* instr)
115 {
116 if (ctx.gfx_level <= GFX10_3)
117 return false;
118
119 uint8_t mask = get_gfx11_true16_mask(instr->opcode);
120 if (!mask)
121 return false;
122
123 u_foreach_bit (i, mask & 0x3) {
124 if (instr->operands[i].physReg().reg() >= (256 + 128))
125 return true;
126 }
127 if ((mask & 0x8) && instr->definitions[0].physReg().reg() >= (256 + 128))
128 return true;
129 return false;
130 }
131
132 template <typename T>
133 uint32_t
get_gfx12_cpol(const T & instr)134 get_gfx12_cpol(const T& instr)
135 {
136 uint32_t scope = instr.cache.gfx12.scope;
137 uint32_t th = instr.cache.gfx12.temporal_hint;
138 return scope | (th << 2);
139 }
140
141 void
emit_sop2_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)142 emit_sop2_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
143 {
144 uint32_t opcode = ctx.opcode[(int)instr->opcode];
145
146 uint32_t encoding = (0b10 << 30);
147 encoding |= opcode << 23;
148 encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
149 encoding |= instr->operands.size() >= 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
150 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
151 out.push_back(encoding);
152 }
153
154 void
emit_sopk_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)155 emit_sopk_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
156 {
157 uint32_t opcode = ctx.opcode[(int)instr->opcode];
158 const SALU_instruction& sopk = instr->salu();
159 assert(sopk.imm <= UINT16_MAX);
160 uint16_t imm = sopk.imm;
161
162 if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
163 assert(ctx.gfx_level >= GFX10);
164 assert(ctx.subvector_begin_pos == -1);
165 ctx.subvector_begin_pos = out.size();
166 } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
167 assert(ctx.gfx_level >= GFX10);
168 assert(ctx.subvector_begin_pos != -1);
169 /* Adjust s_subvector_loop_begin instruction to the address after the end */
170 out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
171 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
172 imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
173 ctx.subvector_begin_pos = -1;
174 }
175
176 uint32_t encoding = (0b1011 << 28);
177 encoding |= opcode << 23;
178 encoding |= !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc)
179 ? reg(ctx, instr->definitions[0]) << 16
180 : !instr->operands.empty() && instr->operands[0].physReg() <= 127
181 ? reg(ctx, instr->operands[0]) << 16
182 : 0;
183 encoding |= imm;
184 out.push_back(encoding);
185 }
186
187 void
emit_sop1_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)188 emit_sop1_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
189 {
190 uint32_t opcode = ctx.opcode[(int)instr->opcode];
191
192 uint32_t encoding = (0b101111101 << 23);
193 encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
194 encoding |= opcode << 8;
195 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
196 out.push_back(encoding);
197 }
198
199 void
emit_sopc_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)200 emit_sopc_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
201 {
202 uint32_t opcode = ctx.opcode[(int)instr->opcode];
203
204 uint32_t encoding = (0b101111110 << 23);
205 encoding |= opcode << 16;
206 encoding |= instr->operands.size() == 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
207 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
208 out.push_back(encoding);
209 }
210
211 void
emit_sopp_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr,bool force_imm=false)212 emit_sopp_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr,
213 bool force_imm = false)
214 {
215 uint32_t opcode = ctx.opcode[(int)instr->opcode];
216 const SALU_instruction& sopp = instr->salu();
217
218 uint32_t encoding = (0b101111111 << 23);
219 encoding |= opcode << 16;
220
221 if (!force_imm && instr_info.classes[(int)instr->opcode] == instr_class::branch) {
222 ctx.branches.push_back({(unsigned)out.size(), sopp.imm});
223 } else {
224 assert(sopp.imm <= UINT16_MAX);
225 encoding |= (uint16_t)sopp.imm;
226 }
227 out.push_back(encoding);
228 }
229
230 void
emit_smem_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)231 emit_smem_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
232 {
233 uint32_t opcode = ctx.opcode[(int)instr->opcode];
234 const SMEM_instruction& smem = instr->smem();
235 bool glc = smem.cache.value & ac_glc;
236 bool dlc = smem.cache.value & ac_dlc;
237
238 bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
239 bool is_load = !instr->definitions.empty();
240 uint32_t encoding = 0;
241
242 if (ctx.gfx_level <= GFX7) {
243 encoding = (0b11000 << 27);
244 encoding |= opcode << 22;
245 encoding |= instr->definitions.size() ? reg(ctx, instr->definitions[0]) << 15 : 0;
246 encoding |= instr->operands.size() ? (reg(ctx, instr->operands[0]) >> 1) << 9 : 0;
247 if (instr->operands.size() >= 2) {
248 if (!instr->operands[1].isConstant()) {
249 encoding |= reg(ctx, instr->operands[1]);
250 } else if (instr->operands[1].constantValue() >= 1024) {
251 encoding |= 255; /* SQ_SRC_LITERAL */
252 } else {
253 encoding |= instr->operands[1].constantValue() >> 2;
254 encoding |= 1 << 8;
255 }
256 }
257 out.push_back(encoding);
258 /* SMRD instructions can take a literal on GFX7 */
259 if (instr->operands.size() >= 2 && instr->operands[1].isConstant() &&
260 instr->operands[1].constantValue() >= 1024)
261 out.push_back(instr->operands[1].constantValue() >> 2);
262 return;
263 }
264
265 if (ctx.gfx_level <= GFX9) {
266 encoding = (0b110000 << 26);
267 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
268 /* We don't use the NV bit. */
269 } else {
270 encoding = (0b111101 << 26);
271 if (ctx.gfx_level <= GFX11_5)
272 encoding |= dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 14) : 0;
273 }
274
275 if (ctx.gfx_level <= GFX11_5) {
276 encoding |= opcode << 18;
277 encoding |= glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
278 } else {
279 encoding |= opcode << 13;
280 encoding |= get_gfx12_cpol(smem) << 21;
281 }
282
283 if (ctx.gfx_level <= GFX9) {
284 if (instr->operands.size() >= 2)
285 encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
286 }
287 if (ctx.gfx_level == GFX9) {
288 encoding |= soe ? 1 << 14 : 0;
289 }
290
291 if (is_load || instr->operands.size() >= 3) { /* SDATA */
292 encoding |= (is_load ? reg(ctx, instr->definitions[0]) : reg(ctx, instr->operands[2])) << 6;
293 }
294 if (instr->operands.size() >= 1) { /* SBASE */
295 encoding |= reg(ctx, instr->operands[0]) >> 1;
296 }
297
298 out.push_back(encoding);
299 encoding = 0;
300
301 int32_t offset = 0;
302 uint32_t soffset =
303 ctx.gfx_level >= GFX10
304 ? reg(ctx, sgpr_null) /* On GFX10 this is disabled by specifying SGPR_NULL */
305 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on
306 GFX8 and below) */
307 if (instr->operands.size() >= 2) {
308 const Operand& op_off1 = instr->operands[1];
309 if (ctx.gfx_level <= GFX9) {
310 offset = op_off1.isConstant() ? op_off1.constantValue() : reg(ctx, op_off1);
311 } else {
312 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an
313 * SGPR */
314 if (op_off1.isConstant()) {
315 offset = op_off1.constantValue();
316 } else {
317 soffset = reg(ctx, op_off1);
318 assert(!soe); /* There is no place to put the other SGPR offset, if any */
319 }
320 }
321
322 if (soe) {
323 const Operand& op_off2 = instr->operands.back();
324 assert(ctx.gfx_level >= GFX9); /* GFX8 and below don't support specifying a constant
325 and an SGPR at the same time */
326 assert(!op_off2.isConstant());
327 soffset = reg(ctx, op_off2);
328 }
329 }
330 encoding |= offset;
331 encoding |= soffset << 25;
332
333 out.push_back(encoding);
334 }
335
336 void
emit_vop2_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)337 emit_vop2_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
338 {
339 uint32_t opcode = ctx.opcode[(int)instr->opcode];
340 const VALU_instruction& valu = instr->valu();
341
342 uint32_t encoding = 0;
343 encoding |= opcode << 25;
344 encoding |= reg(ctx, instr->definitions[0], 8) << 17;
345 encoding |= (valu.opsel[3] ? 128 : 0) << 17;
346 encoding |= reg(ctx, instr->operands[1], 8) << 9;
347 encoding |= (valu.opsel[1] ? 128 : 0) << 9;
348 encoding |= reg(ctx, instr->operands[0]);
349 encoding |= valu.opsel[0] ? 128 : 0;
350 out.push_back(encoding);
351 }
352
353 void
emit_vop1_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)354 emit_vop1_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
355 {
356 uint32_t opcode = ctx.opcode[(int)instr->opcode];
357 const VALU_instruction& valu = instr->valu();
358
359 uint32_t encoding = (0b0111111 << 25);
360 if (!instr->definitions.empty()) {
361 encoding |= reg(ctx, instr->definitions[0], 8) << 17;
362 encoding |= (valu.opsel[3] ? 128 : 0) << 17;
363 }
364 encoding |= opcode << 9;
365 if (!instr->operands.empty()) {
366 encoding |= reg(ctx, instr->operands[0]);
367 encoding |= valu.opsel[0] ? 128 : 0;
368 }
369 out.push_back(encoding);
370 }
371
372 void
emit_vopc_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)373 emit_vopc_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
374 {
375 uint32_t opcode = ctx.opcode[(int)instr->opcode];
376 const VALU_instruction& valu = instr->valu();
377
378 uint32_t encoding = (0b0111110 << 25);
379 encoding |= opcode << 17;
380 encoding |= reg(ctx, instr->operands[1], 8) << 9;
381 encoding |= (valu.opsel[1] ? 128 : 0) << 9;
382 encoding |= reg(ctx, instr->operands[0]);
383 encoding |= valu.opsel[0] ? 128 : 0;
384 out.push_back(encoding);
385 }
386
387 void
emit_vintrp_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)388 emit_vintrp_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
389 {
390 uint32_t opcode = ctx.opcode[(int)instr->opcode];
391 const VINTRP_instruction& interp = instr->vintrp();
392
393 uint32_t encoding = 0;
394 if (instr->opcode == aco_opcode::v_interp_p1ll_f16 ||
395 instr->opcode == aco_opcode::v_interp_p1lv_f16 ||
396 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
397 instr->opcode == aco_opcode::v_interp_p2_f16 ||
398 instr->opcode == aco_opcode::v_interp_p2_hi_f16) {
399 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
400 encoding = (0b110100 << 26);
401 } else if (ctx.gfx_level >= GFX10) {
402 encoding = (0b110101 << 26);
403 } else {
404 unreachable("Unknown gfx_level.");
405 }
406
407 unsigned opsel = instr->opcode == aco_opcode::v_interp_p2_hi_f16 ? 0x8 : 0;
408
409 encoding |= opcode << 16;
410 encoding |= opsel << 11;
411 encoding |= reg(ctx, instr->definitions[0], 8);
412 out.push_back(encoding);
413
414 encoding = 0;
415 encoding |= interp.attribute;
416 encoding |= interp.component << 6;
417 encoding |= interp.high_16bits << 8;
418 encoding |= reg(ctx, instr->operands[0]) << 9;
419 if (instr->opcode == aco_opcode::v_interp_p2_f16 ||
420 instr->opcode == aco_opcode::v_interp_p2_hi_f16 ||
421 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
422 instr->opcode == aco_opcode::v_interp_p1lv_f16) {
423 encoding |= reg(ctx, instr->operands[2]) << 18;
424 }
425 out.push_back(encoding);
426 } else {
427 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
428 encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
429 } else {
430 encoding = (0b110010 << 26);
431 }
432
433 assert(encoding);
434 encoding |= reg(ctx, instr->definitions[0], 8) << 18;
435 encoding |= opcode << 16;
436 encoding |= interp.attribute << 10;
437 encoding |= interp.component << 8;
438 if (instr->opcode == aco_opcode::v_interp_mov_f32)
439 encoding |= (0x3 & instr->operands[0].constantValue());
440 else
441 encoding |= reg(ctx, instr->operands[0], 8);
442 out.push_back(encoding);
443 }
444 }
445
446 void
emit_vinterp_inreg_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)447 emit_vinterp_inreg_instruction(asm_context& ctx, std::vector<uint32_t>& out,
448 const Instruction* instr)
449 {
450 uint32_t opcode = ctx.opcode[(int)instr->opcode];
451 const VINTERP_inreg_instruction& interp = instr->vinterp_inreg();
452
453 uint32_t encoding = (0b11001101 << 24);
454 encoding |= reg(ctx, instr->definitions[0], 8);
455 encoding |= (uint32_t)interp.wait_exp << 8;
456 encoding |= (uint32_t)interp.opsel << 11;
457 encoding |= (uint32_t)interp.clamp << 15;
458 encoding |= opcode << 16;
459 out.push_back(encoding);
460
461 encoding = 0;
462 for (unsigned i = 0; i < instr->operands.size(); i++)
463 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
464 for (unsigned i = 0; i < 3; i++)
465 encoding |= interp.neg[i] << (29 + i);
466 out.push_back(encoding);
467 }
468
469 void
emit_vopd_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)470 emit_vopd_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
471 {
472 uint32_t opcode = ctx.opcode[(int)instr->opcode];
473 const VOPD_instruction& vopd = instr->vopd();
474
475 uint32_t encoding = (0b110010 << 26);
476 encoding |= reg(ctx, instr->operands[0]);
477 if (instr->opcode != aco_opcode::v_dual_mov_b32)
478 encoding |= reg(ctx, instr->operands[1], 8) << 9;
479 encoding |= (uint32_t)ctx.opcode[(int)vopd.opy] << 17;
480 encoding |= opcode << 22;
481 out.push_back(encoding);
482
483 unsigned opy_start = get_vopd_opy_start(instr);
484
485 encoding = reg(ctx, instr->operands[opy_start]);
486 if (vopd.opy != aco_opcode::v_dual_mov_b32)
487 encoding |= reg(ctx, instr->operands[opy_start + 1], 8) << 9;
488 encoding |= (reg(ctx, instr->definitions[1], 8) >> 1) << 17;
489 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
490 out.push_back(encoding);
491 }
492
493 void
emit_ds_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)494 emit_ds_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
495 {
496 uint32_t opcode = ctx.opcode[(int)instr->opcode];
497 const DS_instruction& ds = instr->ds();
498
499 uint32_t encoding = (0b110110 << 26);
500 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
501 encoding |= opcode << 17;
502 encoding |= (ds.gds ? 1 : 0) << 16;
503 } else {
504 encoding |= opcode << 18;
505 encoding |= (ds.gds ? 1 : 0) << 17;
506 }
507 encoding |= ((0xFF & ds.offset1) << 8);
508 encoding |= (0xFFFF & ds.offset0);
509 out.push_back(encoding);
510 encoding = 0;
511 if (!instr->definitions.empty())
512 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
513 for (unsigned i = 0; i < MIN2(instr->operands.size(), 3); i++) {
514 const Operand& op = instr->operands[i];
515 if (op.physReg() != m0 && !op.isUndefined())
516 encoding |= reg(ctx, op, 8) << (8 * i);
517 }
518 out.push_back(encoding);
519 }
520
521 void
emit_ldsdir_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)522 emit_ldsdir_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
523 {
524 uint32_t opcode = ctx.opcode[(int)instr->opcode];
525 const LDSDIR_instruction& dir = instr->ldsdir();
526
527 uint32_t encoding = (0b11001110 << 24);
528 encoding |= opcode << 20;
529 encoding |= (uint32_t)dir.wait_vdst << 16;
530 if (ctx.gfx_level >= GFX12)
531 encoding |= (uint32_t)dir.wait_vsrc << 23;
532 encoding |= (uint32_t)dir.attr << 10;
533 encoding |= (uint32_t)dir.attr_chan << 8;
534 encoding |= reg(ctx, instr->definitions[0], 8);
535 out.push_back(encoding);
536 }
537
538 void
emit_mubuf_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)539 emit_mubuf_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
540 {
541 uint32_t opcode = ctx.opcode[(int)instr->opcode];
542 const MUBUF_instruction& mubuf = instr->mubuf();
543 bool glc = mubuf.cache.value & ac_glc;
544 bool slc = mubuf.cache.value & ac_slc;
545 bool dlc = mubuf.cache.value & ac_dlc;
546
547 uint32_t encoding = (0b111000 << 26);
548 if (ctx.gfx_level >= GFX11 && mubuf.lds) /* GFX11 has separate opcodes for LDS loads */
549 opcode = opcode == 0 ? 0x32 : (opcode + 0x1d);
550 else
551 encoding |= (mubuf.lds ? 1 : 0) << 16;
552 encoding |= opcode << 18;
553 encoding |= (glc ? 1 : 0) << 14;
554 if (ctx.gfx_level <= GFX10_3)
555 encoding |= (mubuf.idxen ? 1 : 0) << 13;
556 assert(!mubuf.addr64 || ctx.gfx_level <= GFX7);
557 if (ctx.gfx_level == GFX6 || ctx.gfx_level == GFX7)
558 encoding |= (mubuf.addr64 ? 1 : 0) << 15;
559 if (ctx.gfx_level <= GFX10_3)
560 encoding |= (mubuf.offen ? 1 : 0) << 12;
561 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
562 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
563 encoding |= (slc ? 1 : 0) << 17;
564 } else if (ctx.gfx_level >= GFX11) {
565 encoding |= (slc ? 1 : 0) << 12;
566 encoding |= (dlc ? 1 : 0) << 13;
567 } else if (ctx.gfx_level >= GFX10) {
568 encoding |= (dlc ? 1 : 0) << 15;
569 }
570 encoding |= 0x0FFF & mubuf.offset;
571 out.push_back(encoding);
572 encoding = 0;
573 if (ctx.gfx_level <= GFX7 || (ctx.gfx_level >= GFX10 && ctx.gfx_level <= GFX10_3)) {
574 encoding |= (slc ? 1 : 0) << 22;
575 }
576 encoding |= reg(ctx, instr->operands[2]) << 24;
577 if (ctx.gfx_level >= GFX11) {
578 encoding |= (mubuf.tfe ? 1 : 0) << 21;
579 encoding |= (mubuf.offen ? 1 : 0) << 22;
580 encoding |= (mubuf.idxen ? 1 : 0) << 23;
581 } else {
582 encoding |= (mubuf.tfe ? 1 : 0) << 23;
583 }
584 encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
585 if (instr->operands.size() > 3 && !mubuf.lds)
586 encoding |= reg(ctx, instr->operands[3], 8) << 8;
587 else if (!mubuf.lds)
588 encoding |= reg(ctx, instr->definitions[0], 8) << 8;
589 encoding |= reg(ctx, instr->operands[1], 8);
590 out.push_back(encoding);
591 }
592
593 void
emit_mubuf_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)594 emit_mubuf_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
595 {
596 uint32_t opcode = ctx.opcode[(int)instr->opcode];
597 const MUBUF_instruction& mubuf = instr->mubuf();
598 assert(!mubuf.lds);
599
600 uint32_t encoding = 0b110001 << 26;
601 encoding |= opcode << 14;
602 if (instr->operands[2].isConstant()) {
603 assert(instr->operands[2].constantValue() == 0);
604 encoding |= reg(ctx, sgpr_null);
605 } else {
606 encoding |= reg(ctx, instr->operands[2]);
607 }
608 encoding |= (mubuf.tfe ? 1 : 0) << 22;
609 out.push_back(encoding);
610
611 encoding = 0;
612 if (instr->operands.size() > 3)
613 encoding |= reg(ctx, instr->operands[3], 8);
614 else
615 encoding |= reg(ctx, instr->definitions[0], 8);
616 encoding |= reg(ctx, instr->operands[0]) << 9;
617 encoding |= (mubuf.offen ? 1 : 0) << 30;
618 encoding |= (mubuf.idxen ? 1 : 0) << 31;
619 encoding |= get_gfx12_cpol(mubuf) << 18;
620 encoding |= 1 << 23;
621 out.push_back(encoding);
622
623 encoding = 0;
624 if (!instr->operands[1].isUndefined())
625 encoding |= reg(ctx, instr->operands[1], 8);
626 encoding |= (mubuf.offset & 0x00ffffff) << 8;
627 out.push_back(encoding);
628 }
629
630 void
emit_mtbuf_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)631 emit_mtbuf_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
632 {
633 uint32_t opcode = ctx.opcode[(int)instr->opcode];
634 const MTBUF_instruction& mtbuf = instr->mtbuf();
635 bool glc = mtbuf.cache.value & ac_glc;
636 bool slc = mtbuf.cache.value & ac_slc;
637 bool dlc = mtbuf.cache.value & ac_dlc;
638 uint32_t img_format = ac_get_tbuffer_format(ctx.gfx_level, mtbuf.dfmt, mtbuf.nfmt);
639 assert(img_format <= 0x7F);
640 assert(!dlc || ctx.gfx_level >= GFX10);
641
642 uint32_t encoding = (0b111010 << 26);
643 encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
644 if (ctx.gfx_level < GFX8) {
645 encoding |= opcode << 16;
646 /* ADDR64 is unused */
647 } else if (ctx.gfx_level >= GFX10 && ctx.gfx_level < GFX11) {
648 /* DLC bit replaces one bit of the OPCODE on GFX10 */
649 encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
650 encoding |= (dlc ? 1 : 0) << 15;
651 } else {
652 encoding |= opcode << 15;
653 }
654 encoding |= (glc ? 1 : 0) << 14;
655 if (ctx.gfx_level >= GFX11) {
656 encoding |= (dlc ? 1 : 0) << 13;
657 encoding |= (slc ? 1 : 0) << 12;
658 } else {
659 encoding |= (mtbuf.idxen ? 1 : 0) << 13;
660 encoding |= (mtbuf.offen ? 1 : 0) << 12;
661 }
662 encoding |= 0x0FFF & mtbuf.offset;
663 out.push_back(encoding);
664
665 encoding = 0;
666 encoding |= reg(ctx, instr->operands[2]) << 24;
667 if (ctx.gfx_level >= GFX11) {
668 encoding |= (mtbuf.idxen ? 1 : 0) << 23;
669 encoding |= (mtbuf.offen ? 1 : 0) << 22;
670 encoding |= (mtbuf.tfe ? 1 : 0) << 21;
671 } else {
672 encoding |= (mtbuf.tfe ? 1 : 0) << 23;
673 encoding |= (slc ? 1 : 0) << 22;
674 if (ctx.gfx_level >= GFX10)
675 encoding |= (((opcode & 0x08) >> 3) << 21); /* MSB of 4-bit OPCODE */
676 }
677 encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
678 if (instr->operands.size() > 3)
679 encoding |= reg(ctx, instr->operands[3], 8) << 8;
680 else
681 encoding |= reg(ctx, instr->definitions[0], 8) << 8;
682 encoding |= reg(ctx, instr->operands[1], 8);
683 out.push_back(encoding);
684 }
685
686 void
emit_mtbuf_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)687 emit_mtbuf_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
688 {
689 uint32_t opcode = ctx.opcode[(int)instr->opcode];
690 const MTBUF_instruction& mtbuf = instr->mtbuf();
691
692 uint32_t img_format = ac_get_tbuffer_format(ctx.gfx_level, mtbuf.dfmt, mtbuf.nfmt);
693
694 uint32_t encoding = 0b110001 << 26;
695 encoding |= 0b1000 << 18;
696 encoding |= opcode << 14;
697 if (instr->operands[2].isConstant()) {
698 assert(instr->operands[2].constantValue() == 0);
699 encoding |= reg(ctx, sgpr_null);
700 } else {
701 encoding |= reg(ctx, instr->operands[2]);
702 }
703 encoding |= (mtbuf.tfe ? 1 : 0) << 22;
704 out.push_back(encoding);
705
706 encoding = 0;
707 if (instr->operands.size() > 3)
708 encoding |= reg(ctx, instr->operands[3], 8);
709 else
710 encoding |= reg(ctx, instr->definitions[0], 8);
711 encoding |= reg(ctx, instr->operands[0]) << 9;
712 encoding |= (mtbuf.offen ? 1 : 0) << 30;
713 encoding |= (mtbuf.idxen ? 1 : 0) << 31;
714 encoding |= get_gfx12_cpol(mtbuf) << 18;
715 encoding |= img_format << 23;
716 out.push_back(encoding);
717
718 encoding = 0;
719 encoding |= reg(ctx, instr->operands[1], 8);
720 encoding |= (mtbuf.offset & 0x00ffffff) << 8;
721 out.push_back(encoding);
722 }
723
724 void
emit_mimg_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)725 emit_mimg_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
726 {
727 uint32_t opcode = ctx.opcode[(int)instr->opcode];
728 const MIMG_instruction& mimg = instr->mimg();
729 bool glc = mimg.cache.value & ac_glc;
730 bool slc = mimg.cache.value & ac_slc;
731 bool dlc = mimg.cache.value & ac_dlc;
732
733 unsigned nsa_dwords = get_mimg_nsa_dwords(instr);
734 assert(!nsa_dwords || ctx.gfx_level >= GFX10);
735
736 uint32_t encoding = (0b111100 << 26);
737 if (ctx.gfx_level >= GFX11) { /* GFX11: rearranges most fields */
738 assert(nsa_dwords <= 1);
739 encoding |= nsa_dwords;
740 encoding |= mimg.dim << 2;
741 encoding |= mimg.unrm ? 1 << 7 : 0;
742 encoding |= (0xF & mimg.dmask) << 8;
743 encoding |= slc ? 1 << 12 : 0;
744 encoding |= dlc ? 1 << 13 : 0;
745 encoding |= glc ? 1 << 14 : 0;
746 encoding |= mimg.r128 ? 1 << 15 : 0;
747 encoding |= mimg.a16 ? 1 << 16 : 0;
748 encoding |= mimg.d16 ? 1 << 17 : 0;
749 encoding |= (opcode & 0xFF) << 18;
750 } else {
751 encoding |= slc ? 1 << 25 : 0;
752 encoding |= (opcode & 0x7f) << 18;
753 encoding |= (opcode >> 7) & 1;
754 encoding |= mimg.lwe ? 1 << 17 : 0;
755 encoding |= mimg.tfe ? 1 << 16 : 0;
756 encoding |= glc ? 1 << 13 : 0;
757 encoding |= mimg.unrm ? 1 << 12 : 0;
758 if (ctx.gfx_level <= GFX9) {
759 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
760 assert(!mimg.r128);
761 encoding |= mimg.a16 ? 1 << 15 : 0;
762 encoding |= mimg.da ? 1 << 14 : 0;
763 } else {
764 encoding |= mimg.r128 ? 1 << 15
765 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
766 encoding |= nsa_dwords << 1;
767 encoding |= mimg.dim << 3; /* GFX10: dimensionality instead of declare array */
768 encoding |= dlc ? 1 << 7 : 0;
769 }
770 encoding |= (0xF & mimg.dmask) << 8;
771 }
772 out.push_back(encoding);
773
774 encoding = reg(ctx, instr->operands[3], 8); /* VADDR */
775 if (!instr->definitions.empty()) {
776 encoding |= reg(ctx, instr->definitions[0], 8) << 8; /* VDATA */
777 } else if (!instr->operands[2].isUndefined()) {
778 encoding |= reg(ctx, instr->operands[2], 8) << 8; /* VDATA */
779 }
780 encoding |= (0x1F & (reg(ctx, instr->operands[0]) >> 2)) << 16; /* T# (resource) */
781
782 assert(!mimg.d16 || ctx.gfx_level >= GFX9);
783 if (ctx.gfx_level >= GFX11) {
784 if (!instr->operands[1].isUndefined())
785 encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 26; /* sampler */
786
787 encoding |= mimg.tfe ? 1 << 21 : 0;
788 encoding |= mimg.lwe ? 1 << 22 : 0;
789 } else {
790 if (!instr->operands[1].isUndefined())
791 encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 21; /* sampler */
792
793 encoding |= mimg.d16 ? 1 << 31 : 0;
794 if (ctx.gfx_level >= GFX10) {
795 /* GFX10: A16 still exists, but is in a different place */
796 encoding |= mimg.a16 ? 1 << 30 : 0;
797 }
798 }
799
800 out.push_back(encoding);
801
802 if (nsa_dwords) {
803 out.resize(out.size() + nsa_dwords);
804 std::vector<uint32_t>::iterator nsa = std::prev(out.end(), nsa_dwords);
805 for (unsigned i = 0; i < instr->operands.size() - 4u; i++)
806 nsa[i / 4] |= reg(ctx, instr->operands[4 + i], 8) << (i % 4 * 8);
807 }
808 }
809
810 void
emit_mimg_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)811 emit_mimg_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
812 {
813 uint32_t opcode = ctx.opcode[(int)instr->opcode];
814 const MIMG_instruction& mimg = instr->mimg();
815
816 bool vsample = !instr->operands[1].isUndefined() || instr->opcode == aco_opcode::image_msaa_load;
817 uint32_t encoding = opcode << 14;
818 if (vsample) {
819 encoding |= 0b111001 << 26;
820 encoding |= mimg.tfe << 3;
821 encoding |= mimg.unrm << 13;
822 } else {
823 encoding |= 0b110100 << 26;
824 }
825 encoding |= mimg.dim;
826 encoding |= mimg.r128 << 4;
827 encoding |= mimg.d16 << 5;
828 encoding |= mimg.a16 << 6;
829 encoding |= (mimg.dmask & 0xf) << 22;
830 out.push_back(encoding);
831
832 uint8_t vaddr[5] = {0, 0, 0, 0, 0};
833 for (unsigned i = 3; i < instr->operands.size(); i++)
834 vaddr[i - 3] = reg(ctx, instr->operands[i], 8);
835 unsigned num_vaddr = instr->operands.size() - 3;
836 for (unsigned i = 0; i < MIN2(instr->operands.back().size() - 1, 5 - num_vaddr); i++)
837 vaddr[num_vaddr + i] = reg(ctx, instr->operands.back(), 8) + i + 1;
838
839 encoding = 0;
840 if (!instr->definitions.empty())
841 encoding |= reg(ctx, instr->definitions[0], 8); /* VDATA */
842 else if (!instr->operands[2].isUndefined())
843 encoding |= reg(ctx, instr->operands[2], 8); /* VDATA */
844 encoding |= reg(ctx, instr->operands[0]) << 9; /* T# (resource) */
845 if (vsample) {
846 encoding |= mimg.lwe << 8;
847 if (instr->opcode != aco_opcode::image_msaa_load)
848 encoding |= reg(ctx, instr->operands[1]) << 23; /* sampler */
849 } else {
850 encoding |= mimg.tfe << 23;
851 encoding |= vaddr[4] << 24;
852 }
853 encoding |= get_gfx12_cpol(mimg) << 18;
854 out.push_back(encoding);
855
856 encoding = 0;
857 for (unsigned i = 0; i < 4; i++)
858 encoding |= vaddr[i] << (i * 8);
859 out.push_back(encoding);
860 }
861
862 void
emit_flatlike_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)863 emit_flatlike_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
864 {
865 uint32_t opcode = ctx.opcode[(int)instr->opcode];
866 const FLAT_instruction& flat = instr->flatlike();
867 bool glc = flat.cache.value & ac_glc;
868 bool slc = flat.cache.value & ac_slc;
869 bool dlc = flat.cache.value & ac_dlc;
870
871 uint32_t encoding = (0b110111 << 26);
872 encoding |= opcode << 18;
873 if (ctx.gfx_level == GFX9 || ctx.gfx_level >= GFX11) {
874 if (instr->isFlat())
875 assert(flat.offset <= 0xfff);
876 else
877 assert(flat.offset >= -4096 && flat.offset < 4096);
878 encoding |= flat.offset & 0x1fff;
879 } else if (ctx.gfx_level <= GFX8 || instr->isFlat()) {
880 /* GFX10 has a 12-bit immediate OFFSET field,
881 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
882 */
883 assert(flat.offset == 0);
884 } else {
885 assert(flat.offset >= -2048 && flat.offset <= 2047);
886 encoding |= flat.offset & 0xfff;
887 }
888 if (instr->isScratch())
889 encoding |= 1 << (ctx.gfx_level >= GFX11 ? 16 : 14);
890 else if (instr->isGlobal())
891 encoding |= 2 << (ctx.gfx_level >= GFX11 ? 16 : 14);
892 encoding |= flat.lds ? 1 << 13 : 0;
893 encoding |= glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
894 encoding |= slc ? 1 << (ctx.gfx_level >= GFX11 ? 15 : 17) : 0;
895 if (ctx.gfx_level >= GFX10) {
896 assert(!flat.nv);
897 encoding |= dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 12) : 0;
898 } else {
899 assert(!dlc);
900 }
901 out.push_back(encoding);
902 encoding = reg(ctx, instr->operands[0], 8);
903 if (!instr->definitions.empty())
904 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
905 if (instr->operands.size() >= 3)
906 encoding |= reg(ctx, instr->operands[2], 8) << 8;
907 if (!instr->operands[1].isUndefined()) {
908 assert(ctx.gfx_level >= GFX10 || instr->operands[1].physReg() != 0x7F);
909 assert(instr->format != Format::FLAT);
910 encoding |= reg(ctx, instr->operands[1], 8) << 16;
911 } else if (instr->format != Format::FLAT ||
912 ctx.gfx_level >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
913 /* For GFX10.3 scratch, 0x7F disables both ADDR and SADDR, unlike sgpr_null, which only
914 * disables SADDR. On GFX11, this was replaced with SVE.
915 */
916 if (ctx.gfx_level <= GFX9 ||
917 (instr->isScratch() && instr->operands[0].isUndefined() && ctx.gfx_level < GFX11))
918 encoding |= 0x7F << 16;
919 else
920 encoding |= reg(ctx, sgpr_null) << 16;
921 }
922 if (ctx.gfx_level >= GFX11 && instr->isScratch())
923 encoding |= !instr->operands[0].isUndefined() ? 1 << 23 : 0;
924 else
925 encoding |= flat.nv ? 1 << 23 : 0;
926 out.push_back(encoding);
927 }
928
929 void
emit_flatlike_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)930 emit_flatlike_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out,
931 const Instruction* instr)
932 {
933 uint32_t opcode = ctx.opcode[(int)instr->opcode];
934 const FLAT_instruction& flat = instr->flatlike();
935 assert(!flat.lds);
936
937 uint32_t encoding = opcode << 14;
938 encoding |= 0b111011 << 26;
939 if (!instr->operands[1].isUndefined()) {
940 assert(!instr->isFlat());
941 encoding |= reg(ctx, instr->operands[1]);
942 } else {
943 encoding |= reg(ctx, sgpr_null);
944 }
945 if (instr->isScratch())
946 encoding |= 1 << 24;
947 else if (instr->isGlobal())
948 encoding |= 2 << 24;
949 out.push_back(encoding);
950
951 encoding = 0;
952 if (!instr->definitions.empty())
953 encoding |= reg(ctx, instr->definitions[0], 8);
954 if (instr->isScratch())
955 encoding |= !instr->operands[0].isUndefined() ? 1 << 17 : 0;
956 encoding |= get_gfx12_cpol(flat) << 18;
957 if (instr->operands.size() >= 3)
958 encoding |= reg(ctx, instr->operands[2], 8) << 23;
959 out.push_back(encoding);
960
961 encoding = 0;
962 if (!instr->operands[0].isUndefined())
963 encoding |= reg(ctx, instr->operands[0], 8);
964 encoding |= (flat.offset & 0x00ffffff) << 8;
965 out.push_back(encoding);
966 }
967
968 void
emit_exp_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)969 emit_exp_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
970 {
971 const Export_instruction& exp = instr->exp();
972 uint32_t encoding;
973 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
974 encoding = (0b110001 << 26);
975 } else {
976 encoding = (0b111110 << 26);
977 }
978
979 if (ctx.gfx_level >= GFX11) {
980 encoding |= exp.row_en ? 0b1 << 13 : 0;
981 } else {
982 encoding |= exp.valid_mask ? 0b1 << 12 : 0;
983 encoding |= exp.compressed ? 0b1 << 10 : 0;
984 }
985 encoding |= exp.done ? 0b1 << 11 : 0;
986 encoding |= exp.dest << 4;
987 encoding |= exp.enabled_mask;
988 out.push_back(encoding);
989 encoding = reg(ctx, exp.operands[0], 8);
990 encoding |= reg(ctx, exp.operands[1], 8) << 8;
991 encoding |= reg(ctx, exp.operands[2], 8) << 16;
992 encoding |= reg(ctx, exp.operands[3], 8) << 24;
993 out.push_back(encoding);
994 }
995
996 void emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr);
997
998 void
emit_dpp16_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)999 emit_dpp16_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1000 {
1001 assert(ctx.gfx_level >= GFX8);
1002 DPP16_instruction& dpp = instr->dpp16();
1003
1004 /* first emit the instruction without the DPP operand */
1005 Operand dpp_op = instr->operands[0];
1006 instr->operands[0] = Operand(PhysReg{250}, v1);
1007 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP16);
1008 emit_instruction(ctx, out, instr);
1009 instr->format = (Format)((uint16_t)instr->format | (uint16_t)Format::DPP16);
1010 instr->operands[0] = dpp_op;
1011
1012 uint32_t encoding = (0xF & dpp.row_mask) << 28;
1013 encoding |= (0xF & dpp.bank_mask) << 24;
1014 encoding |= dpp.abs[1] << 23;
1015 encoding |= dpp.neg[1] << 22;
1016 encoding |= dpp.abs[0] << 21;
1017 encoding |= dpp.neg[0] << 20;
1018 encoding |= dpp.fetch_inactive << 18;
1019 encoding |= dpp.bound_ctrl << 19;
1020 encoding |= dpp.dpp_ctrl << 8;
1021 encoding |= reg(ctx, dpp_op, 8);
1022 encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
1023 out.push_back(encoding);
1024 }
1025
1026 void
emit_dpp8_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1027 emit_dpp8_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1028 {
1029 assert(ctx.gfx_level >= GFX10);
1030 DPP8_instruction& dpp = instr->dpp8();
1031
1032 /* first emit the instruction without the DPP operand */
1033 Operand dpp_op = instr->operands[0];
1034 instr->operands[0] = Operand(PhysReg{233u + dpp.fetch_inactive}, v1);
1035 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP8);
1036 emit_instruction(ctx, out, instr);
1037 instr->format = (Format)((uint16_t)instr->format | (uint16_t)Format::DPP8);
1038 instr->operands[0] = dpp_op;
1039
1040 uint32_t encoding = reg(ctx, dpp_op, 8);
1041 encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
1042 encoding |= dpp.lane_sel << 8;
1043 out.push_back(encoding);
1044 }
1045
1046 void
emit_vop3_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)1047 emit_vop3_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
1048 {
1049 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1050 const VALU_instruction& vop3 = instr->valu();
1051
1052 if (instr->isVOP2()) {
1053 opcode = opcode + 0x100;
1054 } else if (instr->isVOP1()) {
1055 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9)
1056 opcode = opcode + 0x140;
1057 else
1058 opcode = opcode + 0x180;
1059 } else if (instr->isVOPC()) {
1060 opcode = opcode + 0x0;
1061 } else if (instr->isVINTRP()) {
1062 opcode = opcode + 0x270;
1063 }
1064
1065 uint32_t encoding;
1066 if (ctx.gfx_level <= GFX9) {
1067 encoding = (0b110100 << 26);
1068 } else if (ctx.gfx_level >= GFX10) {
1069 encoding = (0b110101 << 26);
1070 } else {
1071 unreachable("Unknown gfx_level.");
1072 }
1073
1074 if (ctx.gfx_level <= GFX7) {
1075 encoding |= opcode << 17;
1076 encoding |= (vop3.clamp ? 1 : 0) << 11;
1077 } else {
1078 encoding |= opcode << 16;
1079 encoding |= (vop3.clamp ? 1 : 0) << 15;
1080 }
1081 encoding |= vop3.opsel << 11;
1082 for (unsigned i = 0; i < 3; i++)
1083 encoding |= vop3.abs[i] << (8 + i);
1084 /* On GFX9 and older, v_cmpx implicitly writes exec besides writing an SGPR pair.
1085 * On GFX10 and newer, v_cmpx always writes just exec.
1086 */
1087 if (instr->definitions.size() == 2 && instr->isVOPC())
1088 assert(ctx.gfx_level <= GFX9 && instr->definitions[1].physReg() == exec);
1089 else if (instr->definitions.size() == 2 && instr->opcode != aco_opcode::v_swap_b16)
1090 encoding |= reg(ctx, instr->definitions[1]) << 8;
1091 encoding |= reg(ctx, instr->definitions[0], 8);
1092 out.push_back(encoding);
1093 encoding = 0;
1094
1095 unsigned num_ops = instr->operands.size();
1096 /* Encoding implicit sources works fine with hardware but breaks some disassemblers. */
1097 if (instr->opcode == aco_opcode::v_writelane_b32_e64)
1098 num_ops = 2;
1099 else if (instr->opcode == aco_opcode::v_swap_b16)
1100 num_ops = 1;
1101
1102 for (unsigned i = 0; i < num_ops; i++)
1103 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
1104 encoding |= vop3.omod << 27;
1105 for (unsigned i = 0; i < 3; i++)
1106 encoding |= vop3.neg[i] << (29 + i);
1107 out.push_back(encoding);
1108 }
1109
1110 void
emit_vop3p_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)1111 emit_vop3p_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
1112 {
1113 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1114 const VALU_instruction& vop3 = instr->valu();
1115
1116 uint32_t encoding;
1117 if (ctx.gfx_level == GFX9) {
1118 encoding = (0b110100111 << 23);
1119 } else if (ctx.gfx_level >= GFX10) {
1120 encoding = (0b110011 << 26);
1121 } else {
1122 unreachable("Unknown gfx_level.");
1123 }
1124
1125 encoding |= opcode << 16;
1126 encoding |= (vop3.clamp ? 1 : 0) << 15;
1127 encoding |= vop3.opsel_lo << 11;
1128 encoding |= ((vop3.opsel_hi & 0x4) ? 1 : 0) << 14;
1129 for (unsigned i = 0; i < 3; i++)
1130 encoding |= vop3.neg_hi[i] << (8 + i);
1131 encoding |= reg(ctx, instr->definitions[0], 8);
1132 out.push_back(encoding);
1133 encoding = 0;
1134 for (unsigned i = 0; i < instr->operands.size(); i++)
1135 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
1136 encoding |= (vop3.opsel_hi & 0x3) << 27;
1137 for (unsigned i = 0; i < 3; i++)
1138 encoding |= vop3.neg_lo[i] << (29 + i);
1139 out.push_back(encoding);
1140 }
1141
1142 void
emit_sdwa_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1143 emit_sdwa_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1144 {
1145 assert(ctx.gfx_level >= GFX8 && ctx.gfx_level < GFX11);
1146 SDWA_instruction& sdwa = instr->sdwa();
1147
1148 /* first emit the instruction without the SDWA operand */
1149 Operand sdwa_op = instr->operands[0];
1150 instr->operands[0] = Operand(PhysReg{249}, v1);
1151 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::SDWA);
1152 emit_instruction(ctx, out, instr);
1153 instr->format = (Format)((uint16_t)instr->format | (uint16_t)Format::SDWA);
1154 instr->operands[0] = sdwa_op;
1155
1156 uint32_t encoding = 0;
1157
1158 if (instr->isVOPC()) {
1159 if (instr->definitions[0].physReg() !=
1160 (ctx.gfx_level >= GFX10 && is_cmpx(instr->opcode) ? exec : vcc)) {
1161 encoding |= reg(ctx, instr->definitions[0]) << 8;
1162 encoding |= 1 << 15;
1163 }
1164 encoding |= (sdwa.clamp ? 1 : 0) << 13;
1165 } else {
1166 encoding |= sdwa.dst_sel.to_sdwa_sel(instr->definitions[0].physReg().byte()) << 8;
1167 uint32_t dst_u = sdwa.dst_sel.sign_extend() ? 1 : 0;
1168 if (instr->definitions[0].bytes() < 4) /* dst_preserve */
1169 dst_u = 2;
1170 encoding |= dst_u << 11;
1171 encoding |= (sdwa.clamp ? 1 : 0) << 13;
1172 encoding |= sdwa.omod << 14;
1173 }
1174
1175 encoding |= sdwa.sel[0].to_sdwa_sel(sdwa_op.physReg().byte()) << 16;
1176 encoding |= sdwa.sel[0].sign_extend() ? 1 << 19 : 0;
1177 encoding |= sdwa.abs[0] << 21;
1178 encoding |= sdwa.neg[0] << 20;
1179
1180 if (instr->operands.size() >= 2) {
1181 encoding |= sdwa.sel[1].to_sdwa_sel(instr->operands[1].physReg().byte()) << 24;
1182 encoding |= sdwa.sel[1].sign_extend() ? 1 << 27 : 0;
1183 encoding |= sdwa.abs[1] << 29;
1184 encoding |= sdwa.neg[1] << 28;
1185 }
1186
1187 encoding |= reg(ctx, sdwa_op, 8);
1188 encoding |= (sdwa_op.physReg() < 256) << 23;
1189 if (instr->operands.size() >= 2)
1190 encoding |= (instr->operands[1].physReg() < 256) << 31;
1191 out.push_back(encoding);
1192 }
1193
1194 void
emit_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1195 emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1196 {
1197 /* lower remaining pseudo-instructions */
1198 if (instr->opcode == aco_opcode::p_constaddr_getpc) {
1199 ctx.constaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
1200
1201 instr->opcode = aco_opcode::s_getpc_b64;
1202 instr->operands.pop_back();
1203 } else if (instr->opcode == aco_opcode::p_constaddr_addlo) {
1204 ctx.constaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
1205
1206 instr->opcode = aco_opcode::s_add_u32;
1207 instr->operands.pop_back();
1208 assert(instr->operands[1].isConstant());
1209 /* in case it's an inline constant, make it a literal */
1210 instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
1211 } else if (instr->opcode == aco_opcode::p_resumeaddr_getpc) {
1212 ctx.resumeaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
1213
1214 instr->opcode = aco_opcode::s_getpc_b64;
1215 instr->operands.pop_back();
1216 } else if (instr->opcode == aco_opcode::p_resumeaddr_addlo) {
1217 ctx.resumeaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
1218
1219 instr->opcode = aco_opcode::s_add_u32;
1220 instr->operands.pop_back();
1221 assert(instr->operands[1].isConstant());
1222 /* in case it's an inline constant, make it a literal */
1223 instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
1224 } else if (instr->opcode == aco_opcode::p_load_symbol) {
1225 assert(instr->operands[0].isConstant());
1226 assert(ctx.symbols);
1227
1228 struct aco_symbol info;
1229 info.id = (enum aco_symbol_id)instr->operands[0].constantValue();
1230 info.offset = out.size() + 1;
1231 ctx.symbols->push_back(info);
1232
1233 instr->opcode = aco_opcode::s_mov_b32;
1234 /* in case it's an inline constant, make it a literal */
1235 instr->operands[0] = Operand::literal32(0);
1236 } else if (instr->opcode == aco_opcode::p_debug_info) {
1237 assert(instr->operands[0].isConstant());
1238 uint32_t index = instr->operands[0].constantValue();
1239 ctx.program->debug_info[index].offset = (out.size() - 1) * 4;
1240 return;
1241 }
1242
1243 /* Promote VOP12C to VOP3 if necessary. */
1244 if ((instr->isVOP1() || instr->isVOP2() || instr->isVOPC()) && !instr->isVOP3() &&
1245 needs_vop3_gfx11(ctx, instr)) {
1246 instr->format = asVOP3(instr->format);
1247 if (instr->opcode == aco_opcode::v_fmaak_f16) {
1248 instr->opcode = aco_opcode::v_fma_f16;
1249 instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
1250 } else if (instr->opcode == aco_opcode::v_fmamk_f16) {
1251 instr->valu().swapOperands(1, 2);
1252 instr->opcode = aco_opcode::v_fma_f16;
1253 instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
1254 }
1255 }
1256
1257 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1258 if (opcode == (uint32_t)-1) {
1259 char* outmem;
1260 size_t outsize;
1261 struct u_memstream mem;
1262 u_memstream_open(&mem, &outmem, &outsize);
1263 FILE* const memf = u_memstream_get(&mem);
1264
1265 fprintf(memf, "Unsupported opcode: ");
1266 aco_print_instr(ctx.gfx_level, instr, memf);
1267 u_memstream_close(&mem);
1268
1269 aco_err(ctx.program, outmem);
1270 free(outmem);
1271
1272 abort();
1273 }
1274
1275 switch (instr->format) {
1276 case Format::SOP2: {
1277 emit_sop2_instruction(ctx, out, instr);
1278 break;
1279 }
1280 case Format::SOPK: {
1281 emit_sopk_instruction(ctx, out, instr);
1282 break;
1283 }
1284 case Format::SOP1: {
1285 emit_sop1_instruction(ctx, out, instr);
1286 break;
1287 }
1288 case Format::SOPC: {
1289 emit_sopc_instruction(ctx, out, instr);
1290 break;
1291 }
1292 case Format::SOPP: {
1293 emit_sopp_instruction(ctx, out, instr);
1294 break;
1295 }
1296 case Format::SMEM: {
1297 emit_smem_instruction(ctx, out, instr);
1298 return;
1299 }
1300 case Format::VOP2: {
1301 emit_vop2_instruction(ctx, out, instr);
1302 break;
1303 }
1304 case Format::VOP1: {
1305 emit_vop1_instruction(ctx, out, instr);
1306 break;
1307 }
1308 case Format::VOPC: {
1309 emit_vopc_instruction(ctx, out, instr);
1310 break;
1311 }
1312 case Format::VINTRP: {
1313 emit_vintrp_instruction(ctx, out, instr);
1314 break;
1315 }
1316 case Format::VINTERP_INREG: {
1317 emit_vinterp_inreg_instruction(ctx, out, instr);
1318 break;
1319 }
1320 case Format::VOPD: {
1321 emit_vopd_instruction(ctx, out, instr);
1322 break;
1323 }
1324 case Format::DS: {
1325 emit_ds_instruction(ctx, out, instr);
1326 break;
1327 }
1328 case Format::LDSDIR: {
1329 emit_ldsdir_instruction(ctx, out, instr);
1330 break;
1331 }
1332 case Format::MUBUF: {
1333 if (ctx.gfx_level >= GFX12)
1334 emit_mubuf_instruction_gfx12(ctx, out, instr);
1335 else
1336 emit_mubuf_instruction(ctx, out, instr);
1337 break;
1338 }
1339 case Format::MTBUF: {
1340 if (ctx.gfx_level >= GFX12)
1341 emit_mtbuf_instruction_gfx12(ctx, out, instr);
1342 else
1343 emit_mtbuf_instruction(ctx, out, instr);
1344 break;
1345 }
1346 case Format::MIMG: {
1347 if (ctx.gfx_level >= GFX12)
1348 emit_mimg_instruction_gfx12(ctx, out, instr);
1349 else
1350 emit_mimg_instruction(ctx, out, instr);
1351 break;
1352 }
1353 case Format::FLAT:
1354 case Format::SCRATCH:
1355 case Format::GLOBAL: {
1356 if (ctx.gfx_level >= GFX12)
1357 emit_flatlike_instruction_gfx12(ctx, out, instr);
1358 else
1359 emit_flatlike_instruction(ctx, out, instr);
1360 break;
1361 }
1362 case Format::EXP: {
1363 emit_exp_instruction(ctx, out, instr);
1364 break;
1365 }
1366 case Format::PSEUDO:
1367 case Format::PSEUDO_BARRIER:
1368 if (instr->opcode != aco_opcode::p_unit_test)
1369 unreachable("Pseudo instructions should be lowered before assembly.");
1370 break;
1371 default:
1372 if (instr->isDPP16()) {
1373 emit_dpp16_instruction(ctx, out, instr);
1374 return;
1375 } else if (instr->isDPP8()) {
1376 emit_dpp8_instruction(ctx, out, instr);
1377 return;
1378 } else if (instr->isVOP3()) {
1379 emit_vop3_instruction(ctx, out, instr);
1380 } else if (instr->isVOP3P()) {
1381 emit_vop3p_instruction(ctx, out, instr);
1382 } else if (instr->isSDWA()) {
1383 emit_sdwa_instruction(ctx, out, instr);
1384 } else {
1385 unreachable("unimplemented instruction format");
1386 }
1387 break;
1388 }
1389
1390 /* append literal dword */
1391 for (const Operand& op : instr->operands) {
1392 if (op.isLiteral()) {
1393 out.push_back(op.constantValue());
1394 break;
1395 }
1396 }
1397 }
1398
1399 void
emit_block(asm_context & ctx,std::vector<uint32_t> & out,Block & block)1400 emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
1401 {
1402 for (aco_ptr<Instruction>& instr : block.instructions) {
1403 #if 0
1404 int start_idx = out.size();
1405 std::cerr << "Encoding:\t" << std::endl;
1406 aco_print_instr(&*instr, stderr);
1407 std::cerr << std::endl;
1408 #endif
1409 emit_instruction(ctx, out, instr.get());
1410 #if 0
1411 for (int i = start_idx; i < out.size(); i++)
1412 std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
1413 #endif
1414 }
1415 }
1416
1417 void
fix_exports(asm_context & ctx,std::vector<uint32_t> & out,Program * program)1418 fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
1419 {
1420 bool exported = false;
1421 for (Block& block : program->blocks) {
1422 if (!(block.kind & block_kind_export_end))
1423 continue;
1424 std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
1425 while (it != block.instructions.rend()) {
1426 if ((*it)->isEXP()) {
1427 Export_instruction& exp = (*it)->exp();
1428 if (program->stage.hw == AC_HW_VERTEX_SHADER ||
1429 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
1430 if (exp.dest >= V_008DFC_SQ_EXP_POS && exp.dest <= (V_008DFC_SQ_EXP_POS + 3)) {
1431 exp.done = true;
1432 exported = true;
1433 break;
1434 }
1435 } else {
1436 exp.done = true;
1437 exp.valid_mask = true;
1438 exported = true;
1439 break;
1440 }
1441 } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec) {
1442 break;
1443 }
1444 ++it;
1445 }
1446 }
1447
1448 /* GFX10+ FS may not export anything if no discard is used. */
1449 bool may_skip_export = program->stage.hw == AC_HW_PIXEL_SHADER && program->gfx_level >= GFX10;
1450
1451 if (!exported && !may_skip_export) {
1452 /* Abort in order to avoid a GPU hang. */
1453 bool is_vertex_or_ngg = (program->stage.hw == AC_HW_VERTEX_SHADER ||
1454 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER);
1455 aco_err(program,
1456 "Missing export in %s shader:", is_vertex_or_ngg ? "vertex or NGG" : "fragment");
1457 aco_print_program(program, stderr);
1458 abort();
1459 }
1460 }
1461
1462 static void
insert_code(asm_context & ctx,std::vector<uint32_t> & out,unsigned insert_before,unsigned insert_count,const uint32_t * insert_data)1463 insert_code(asm_context& ctx, std::vector<uint32_t>& out, unsigned insert_before,
1464 unsigned insert_count, const uint32_t* insert_data)
1465 {
1466 out.insert(out.begin() + insert_before, insert_data, insert_data + insert_count);
1467
1468 /* Update the offset of each affected block */
1469 for (Block& block : ctx.program->blocks) {
1470 if (block.offset >= insert_before)
1471 block.offset += insert_count;
1472 }
1473
1474 /* Update the locations of branches */
1475 for (branch_info& info : ctx.branches) {
1476 if (info.pos >= insert_before)
1477 info.pos += insert_count;
1478 }
1479
1480 /* Update the locations of p_constaddr instructions */
1481 for (auto& constaddr : ctx.constaddrs) {
1482 constaddr_info& info = constaddr.second;
1483 if (info.getpc_end >= insert_before)
1484 info.getpc_end += insert_count;
1485 if (info.add_literal >= insert_before)
1486 info.add_literal += insert_count;
1487 }
1488 for (auto& constaddr : ctx.resumeaddrs) {
1489 constaddr_info& info = constaddr.second;
1490 if (info.getpc_end >= insert_before)
1491 info.getpc_end += insert_count;
1492 if (info.add_literal >= insert_before)
1493 info.add_literal += insert_count;
1494 }
1495
1496 if (ctx.symbols) {
1497 for (auto& symbol : *ctx.symbols) {
1498 if (symbol.offset >= insert_before)
1499 symbol.offset += insert_count;
1500 }
1501 }
1502 }
1503
1504 static void
fix_branches_gfx10(asm_context & ctx,std::vector<uint32_t> & out)1505 fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
1506 {
1507 /* Branches with an offset of 0x3f are buggy on GFX10,
1508 * we workaround by inserting NOPs if needed.
1509 */
1510 bool gfx10_3f_bug = false;
1511
1512 do {
1513 auto buggy_branch_it = std::find_if(
1514 ctx.branches.begin(), ctx.branches.end(), [&](const branch_info& branch) -> bool
1515 { return ((int)ctx.program->blocks[branch.target].offset - branch.pos - 1) == 0x3f; });
1516 gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
1517
1518 if (gfx10_3f_bug) {
1519 /* Insert an s_nop after the branch */
1520 constexpr uint32_t s_nop_0 = 0xbf800000u;
1521 insert_code(ctx, out, buggy_branch_it->pos + 1, 1, &s_nop_0);
1522 }
1523 } while (gfx10_3f_bug);
1524 }
1525
1526 void
chain_branches(asm_context & ctx,std::vector<uint32_t> & out,branch_info & branch)1527 chain_branches(asm_context& ctx, std::vector<uint32_t>& out, branch_info& branch)
1528 {
1529 /* Create an empty block in order to remember the offset of the chained branch instruction.
1530 * The new branch instructions are inserted into the program in source code order.
1531 */
1532 Block* new_block = ctx.program->create_and_insert_block();
1533 Builder bld(ctx.program);
1534 std::vector<uint32_t> code;
1535 Instruction* branch_instr;
1536
1537 /* Re-direct original branch to new block (offset). */
1538 unsigned target = branch.target;
1539 branch.target = new_block->index;
1540
1541 unsigned skip_branch_target = 0; /* Target of potentially inserted short jump. */
1542
1543 /* Find suitable insertion point:
1544 * We define two offset ranges within our new branch instruction should be placed.
1545 * Then we try to maximize the distance from either the previous branch or the target.
1546 */
1547 const int half_dist = (INT16_MAX - 31) / 2;
1548 const unsigned upper_start = MIN2(ctx.program->blocks[target].offset, branch.pos) + half_dist;
1549 const unsigned upper_end = upper_start + half_dist;
1550 const unsigned lower_end = MAX2(ctx.program->blocks[target].offset, branch.pos) - half_dist;
1551 const unsigned lower_start = lower_end - half_dist;
1552 unsigned insert_at = 0;
1553 for (unsigned i = 0; i < ctx.program->blocks.size() - 1; i++) {
1554 Block& block = ctx.program->blocks[i];
1555 Block& next = ctx.program->blocks[i + 1];
1556 if (next.offset >= lower_end)
1557 break;
1558 if (next.offset < upper_start || (next.offset > upper_end && next.offset < lower_start))
1559 continue;
1560
1561 /* If this block ends in an unconditional branch, we can insert
1562 * another branch right after it without additional cost for the
1563 * existing code.
1564 */
1565 if (!block.instructions.empty() &&
1566 block.instructions.back()->opcode == aco_opcode::s_branch) {
1567 insert_at = next.offset;
1568 bld.reset(&block.instructions);
1569 if (next.offset >= lower_start)
1570 break;
1571 }
1572 }
1573
1574 /* If we didn't find a suitable insertion point, split the existing code. */
1575 if (insert_at == 0) {
1576 /* Find the last block that is still within reach. */
1577 unsigned insertion_block_idx = 0;
1578 while (ctx.program->blocks[insertion_block_idx + 1].offset < upper_end)
1579 insertion_block_idx++;
1580
1581 insert_at = ctx.program->blocks[insertion_block_idx].offset;
1582 auto it = ctx.program->blocks[insertion_block_idx].instructions.begin();
1583 int skip = 0;
1584 if (insert_at < upper_start) {
1585 /* Ensure some forward progress by splitting the block if necessary. */
1586 while (skip-- > 0 || insert_at < upper_start) {
1587 Instruction* instr = (it++)->get();
1588 if (instr->isSOPP()) {
1589 if (instr->opcode == aco_opcode::s_clause)
1590 skip = instr->salu().imm + 1;
1591 else if (instr->opcode == aco_opcode::s_delay_alu)
1592 skip = ((instr->salu().imm >> 4) & 0x7) + 1;
1593 else if (instr->opcode == aco_opcode::s_branch)
1594 skip = 1;
1595 insert_at++;
1596 continue;
1597 }
1598 emit_instruction(ctx, code, instr);
1599 assert(out[insert_at] == code[0]);
1600 insert_at += code.size();
1601 code.clear();
1602 }
1603
1604 /* If the insertion point is in the middle of the block, insert the branch instructions
1605 * into that block instead. */
1606 bld.reset(&ctx.program->blocks[insertion_block_idx].instructions, it);
1607 } else {
1608 bld.reset(&ctx.program->blocks[insertion_block_idx - 1].instructions);
1609 skip_branch_target = insertion_block_idx;
1610 }
1611
1612 /* Since we insert a branch into existing code, mitigate LdsBranchVmemWARHazard on GFX10. */
1613 if (ctx.program->gfx_level == GFX10) {
1614 emit_sopk_instruction(
1615 ctx, code, bld.sopk(aco_opcode::s_waitcnt_vscnt, Operand(sgpr_null, s1), 0).instr);
1616 }
1617
1618 /* For the existing code, create a short jump over the new branch. */
1619 branch_instr = bld.sopp(aco_opcode::s_branch, 1).instr;
1620 emit_sopp_instruction(ctx, code, branch_instr, true);
1621 }
1622 const unsigned block_offset = insert_at + code.size();
1623
1624 branch_instr = bld.sopp(aco_opcode::s_branch, 0);
1625 emit_sopp_instruction(ctx, code, branch_instr, true);
1626 insert_code(ctx, out, insert_at, code.size(), code.data());
1627
1628 new_block->offset = block_offset;
1629 if (skip_branch_target) {
1630 /* If we insert a short jump over the new branch at the end of a block,
1631 * ensure that it gets updated accordingly after additional changes. */
1632 ctx.branches.push_back({block_offset - 1, skip_branch_target});
1633 }
1634 ctx.branches.push_back({block_offset, target});
1635 assert(out[ctx.branches.back().pos] == code.back());
1636 }
1637
1638 void
fix_branches(asm_context & ctx,std::vector<uint32_t> & out)1639 fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
1640 {
1641 bool repeat = false;
1642 do {
1643 repeat = false;
1644
1645 if (ctx.gfx_level == GFX10)
1646 fix_branches_gfx10(ctx, out);
1647
1648 for (branch_info& branch : ctx.branches) {
1649 int offset = (int)ctx.program->blocks[branch.target].offset - branch.pos - 1;
1650 if (offset >= INT16_MIN && offset <= INT16_MAX) {
1651 out[branch.pos] &= 0xffff0000u;
1652 out[branch.pos] |= (uint16_t)offset;
1653 } else {
1654 chain_branches(ctx, out, branch);
1655 repeat = true;
1656 break;
1657 }
1658 }
1659 } while (repeat);
1660 }
1661
1662 void
fix_constaddrs(asm_context & ctx,std::vector<uint32_t> & out)1663 fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
1664 {
1665 for (auto& constaddr : ctx.constaddrs) {
1666 constaddr_info& info = constaddr.second;
1667 out[info.add_literal] += (out.size() - info.getpc_end) * 4u;
1668
1669 if (ctx.symbols) {
1670 struct aco_symbol sym;
1671 sym.id = aco_symbol_const_data_addr;
1672 sym.offset = info.add_literal;
1673 ctx.symbols->push_back(sym);
1674 }
1675 }
1676 for (auto& addr : ctx.resumeaddrs) {
1677 constaddr_info& info = addr.second;
1678 const Block& block = ctx.program->blocks[out[info.add_literal]];
1679 assert(block.kind & block_kind_resume);
1680 out[info.add_literal] = (block.offset - info.getpc_end) * 4u;
1681 }
1682 }
1683
1684 void
align_block(asm_context & ctx,std::vector<uint32_t> & code,Block & block)1685 align_block(asm_context& ctx, std::vector<uint32_t>& code, Block& block)
1686 {
1687 /* Align the previous loop. */
1688 if (ctx.loop_header != -1u &&
1689 block.loop_nest_depth < ctx.program->blocks[ctx.loop_header].loop_nest_depth) {
1690 assert(ctx.loop_exit != -1u);
1691 Block& loop_header = ctx.program->blocks[ctx.loop_header];
1692 Block& loop_exit = ctx.program->blocks[ctx.loop_exit];
1693 ctx.loop_header = -1u;
1694 ctx.loop_exit = -1u;
1695 std::vector<uint32_t> nops;
1696
1697 const unsigned loop_num_cl = DIV_ROUND_UP(block.offset - loop_header.offset, 16);
1698
1699 /* On GFX10.3+, change the prefetch mode if the loop fits into 2 or 3 cache lines.
1700 * Don't use the s_inst_prefetch instruction on GFX10 as it might cause hangs.
1701 */
1702 const bool change_prefetch = ctx.program->gfx_level >= GFX10_3 &&
1703 ctx.program->gfx_level <= GFX11 && loop_num_cl > 1 &&
1704 loop_num_cl <= 3;
1705
1706 if (change_prefetch) {
1707 Builder bld(ctx.program, &ctx.program->blocks[loop_header.linear_preds[0]]);
1708 int16_t prefetch_mode = loop_num_cl == 3 ? 0x1 : 0x2;
1709 Instruction* instr = bld.sopp(aco_opcode::s_inst_prefetch, prefetch_mode);
1710 emit_instruction(ctx, nops, instr);
1711 insert_code(ctx, code, loop_header.offset, nops.size(), nops.data());
1712
1713 /* Change prefetch mode back to default (0x3) at the loop exit. */
1714 bld.reset(&loop_exit.instructions, loop_exit.instructions.begin());
1715 instr = bld.sopp(aco_opcode::s_inst_prefetch, 0x3);
1716 if (ctx.loop_exit < block.index) {
1717 nops.clear();
1718 emit_instruction(ctx, nops, instr);
1719 insert_code(ctx, code, loop_exit.offset, nops.size(), nops.data());
1720 }
1721 }
1722
1723 const unsigned loop_start_cl = loop_header.offset >> 4;
1724 const unsigned loop_end_cl = (block.offset - 1) >> 4;
1725
1726 /* Align the loop if it fits into the fetched cache lines or if we can
1727 * reduce the number of cache lines with less than 8 NOPs.
1728 */
1729 const bool align_loop = loop_end_cl - loop_start_cl >= loop_num_cl &&
1730 (loop_num_cl == 1 || change_prefetch || loop_header.offset % 16 > 8);
1731
1732 if (align_loop) {
1733 nops.clear();
1734 nops.resize(16 - (loop_header.offset % 16), 0xbf800000u);
1735 insert_code(ctx, code, loop_header.offset, nops.size(), nops.data());
1736 }
1737 }
1738
1739 if (block.kind & block_kind_loop_header) {
1740 /* In case of nested loops, only handle the inner-most loops in order
1741 * to not break the alignment of inner loops by handling outer loops.
1742 * Also ignore loops without back-edge.
1743 */
1744 if (block.linear_preds.size() > 1) {
1745 ctx.loop_header = block.index;
1746 ctx.loop_exit = -1u;
1747 }
1748 }
1749
1750 /* Blocks with block_kind_loop_exit might be eliminated after jump threading,
1751 * so we instead find loop exits using the successors when in loop_nest_depth.
1752 * This works, because control flow always re-converges after loops.
1753 */
1754 if (ctx.loop_header != -1u && ctx.loop_exit == -1u) {
1755 for (uint32_t succ_idx : block.linear_succs) {
1756 Block& succ = ctx.program->blocks[succ_idx];
1757 if (succ.loop_nest_depth < ctx.program->blocks[ctx.loop_header].loop_nest_depth)
1758 ctx.loop_exit = succ_idx;
1759 }
1760 }
1761
1762 /* align resume shaders with cache line */
1763 if (block.kind & block_kind_resume) {
1764 size_t cache_aligned = align(code.size(), 16);
1765 code.resize(cache_aligned, 0xbf800000u); /* s_nop 0 */
1766 block.offset = code.size();
1767 }
1768 }
1769
1770 unsigned
emit_program(Program * program,std::vector<uint32_t> & code,std::vector<struct aco_symbol> * symbols,bool append_endpgm)1771 emit_program(Program* program, std::vector<uint32_t>& code, std::vector<struct aco_symbol>* symbols,
1772 bool append_endpgm)
1773 {
1774 asm_context ctx(program, symbols);
1775
1776 bool is_separately_compiled_ngg_vs_or_es =
1777 (program->stage.sw == SWStage::VS || program->stage.sw == SWStage::TES) &&
1778 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER &&
1779 program->info.merged_shader_compiled_separately;
1780
1781 /* Prolog has no exports. */
1782 if (!program->is_prolog && !program->info.ps.has_epilog &&
1783 !is_separately_compiled_ngg_vs_or_es &&
1784 (program->stage.hw == AC_HW_VERTEX_SHADER || program->stage.hw == AC_HW_PIXEL_SHADER ||
1785 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER))
1786 fix_exports(ctx, code, program);
1787
1788 for (Block& block : program->blocks) {
1789 block.offset = code.size();
1790 align_block(ctx, code, block);
1791 emit_block(ctx, code, block);
1792 }
1793
1794 fix_branches(ctx, code);
1795
1796 unsigned exec_size = code.size() * sizeof(uint32_t);
1797
1798 /* Add end-of-code markers for the UMR disassembler. */
1799 if (append_endpgm)
1800 code.resize(code.size() + 5, 0xbf9f0000u);
1801
1802 fix_constaddrs(ctx, code);
1803
1804 while (program->constant_data.size() % 4u)
1805 program->constant_data.push_back(0);
1806 /* Copy constant data */
1807 code.insert(code.end(), (uint32_t*)program->constant_data.data(),
1808 (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
1809
1810 program->config->scratch_bytes_per_wave =
1811 align(program->config->scratch_bytes_per_wave, program->dev.scratch_alloc_granule);
1812
1813 return exec_size;
1814 }
1815
1816 } // namespace aco
1817