1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9
10 #include "common/sid.h"
11
12 #include "util/memstream.h"
13
14 #include "ac_shader_util.h"
15 #include <algorithm>
16 #include <cstdint>
17 #include <map>
18 #include <vector>
19
20 namespace aco {
21
22 struct constaddr_info {
23 unsigned getpc_end;
24 unsigned add_literal;
25 };
26
27 struct branch_info {
28 unsigned pos;
29 unsigned target;
30 };
31
32 struct asm_context {
33 Program* program;
34 enum amd_gfx_level gfx_level;
35 std::vector<branch_info> branches;
36 std::map<unsigned, constaddr_info> constaddrs;
37 std::map<unsigned, constaddr_info> resumeaddrs;
38 std::vector<struct aco_symbol>* symbols;
39 uint32_t loop_header = -1u;
40 const int16_t* opcode;
41 // TODO: keep track of branch instructions referring blocks
42 // and, when emitting the block, correct the offset in instr
asm_contextaco::asm_context43 asm_context(Program* program_, std::vector<struct aco_symbol>* symbols_)
44 : program(program_), gfx_level(program->gfx_level), symbols(symbols_)
45 {
46 if (gfx_level <= GFX7)
47 opcode = &instr_info.opcode_gfx7[0];
48 else if (gfx_level <= GFX9)
49 opcode = &instr_info.opcode_gfx9[0];
50 else if (gfx_level <= GFX10_3)
51 opcode = &instr_info.opcode_gfx10[0];
52 else if (gfx_level <= GFX11_5)
53 opcode = &instr_info.opcode_gfx11[0];
54 else
55 opcode = &instr_info.opcode_gfx12[0];
56 }
57
58 int subvector_begin_pos = -1;
59 };
60
61 unsigned
get_mimg_nsa_dwords(const Instruction * instr)62 get_mimg_nsa_dwords(const Instruction* instr)
63 {
64 unsigned addr_dwords = instr->operands.size() - 3;
65 for (unsigned i = 1; i < addr_dwords; i++) {
66 if (instr->operands[3 + i].physReg() !=
67 instr->operands[3 + (i - 1)].physReg().advance(instr->operands[3 + (i - 1)].bytes()))
68 return DIV_ROUND_UP(addr_dwords - 1, 4);
69 }
70 return 0;
71 }
72
73 unsigned
get_vopd_opy_start(const Instruction * instr)74 get_vopd_opy_start(const Instruction* instr)
75 {
76 switch (instr->opcode) {
77 case aco_opcode::v_dual_fmac_f32:
78 case aco_opcode::v_dual_fmaak_f32:
79 case aco_opcode::v_dual_fmamk_f32:
80 case aco_opcode::v_dual_cndmask_b32:
81 case aco_opcode::v_dual_dot2acc_f32_f16:
82 case aco_opcode::v_dual_dot2acc_f32_bf16: return 3;
83 case aco_opcode::v_dual_mov_b32: return 1;
84 default: return 2;
85 }
86 }
87
88 uint32_t
reg(asm_context & ctx,PhysReg reg)89 reg(asm_context& ctx, PhysReg reg)
90 {
91 if (ctx.gfx_level >= GFX11) {
92 if (reg == m0)
93 return sgpr_null.reg();
94 else if (reg == sgpr_null)
95 return m0.reg();
96 }
97 return reg.reg();
98 }
99
100 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Operand op,unsigned width=32)101 reg(asm_context& ctx, Operand op, unsigned width = 32)
102 {
103 return reg(ctx, op.physReg()) & BITFIELD_MASK(width);
104 }
105
106 ALWAYS_INLINE uint32_t
reg(asm_context & ctx,Definition def,unsigned width=32)107 reg(asm_context& ctx, Definition def, unsigned width = 32)
108 {
109 return reg(ctx, def.physReg()) & BITFIELD_MASK(width);
110 }
111
112 bool
needs_vop3_gfx11(asm_context & ctx,Instruction * instr)113 needs_vop3_gfx11(asm_context& ctx, Instruction* instr)
114 {
115 if (ctx.gfx_level <= GFX10_3)
116 return false;
117
118 uint8_t mask = get_gfx11_true16_mask(instr->opcode);
119 if (!mask)
120 return false;
121
122 u_foreach_bit (i, mask & 0x3) {
123 if (instr->operands[i].physReg().reg() >= (256 + 128))
124 return true;
125 }
126 if ((mask & 0x8) && instr->definitions[0].physReg().reg() >= (256 + 128))
127 return true;
128 return false;
129 }
130
131 template <typename T>
132 uint32_t
get_gfx12_cpol(const T & instr)133 get_gfx12_cpol(const T& instr)
134 {
135 uint32_t scope = instr.cache.gfx12.scope;
136 uint32_t th = instr.cache.gfx12.temporal_hint;
137 return scope | (th << 2);
138 }
139
140 void
emit_sop2_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)141 emit_sop2_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
142 {
143 uint32_t opcode = ctx.opcode[(int)instr->opcode];
144
145 uint32_t encoding = (0b10 << 30);
146 encoding |= opcode << 23;
147 encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
148 encoding |= instr->operands.size() >= 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
149 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
150 out.push_back(encoding);
151 }
152
153 void
emit_sopk_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)154 emit_sopk_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
155 {
156 uint32_t opcode = ctx.opcode[(int)instr->opcode];
157 const SALU_instruction& sopk = instr->salu();
158 assert(sopk.imm <= UINT16_MAX);
159 uint16_t imm = sopk.imm;
160
161 if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
162 assert(ctx.gfx_level >= GFX10);
163 assert(ctx.subvector_begin_pos == -1);
164 ctx.subvector_begin_pos = out.size();
165 } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
166 assert(ctx.gfx_level >= GFX10);
167 assert(ctx.subvector_begin_pos != -1);
168 /* Adjust s_subvector_loop_begin instruction to the address after the end */
169 out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
170 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
171 imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
172 ctx.subvector_begin_pos = -1;
173 }
174
175 uint32_t encoding = (0b1011 << 28);
176 encoding |= opcode << 23;
177 encoding |= !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc)
178 ? reg(ctx, instr->definitions[0]) << 16
179 : !instr->operands.empty() && instr->operands[0].physReg() <= 127
180 ? reg(ctx, instr->operands[0]) << 16
181 : 0;
182 encoding |= imm;
183 out.push_back(encoding);
184 }
185
186 void
emit_sop1_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)187 emit_sop1_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
188 {
189 uint32_t opcode = ctx.opcode[(int)instr->opcode];
190
191 uint32_t encoding = (0b101111101 << 23);
192 encoding |= !instr->definitions.empty() ? reg(ctx, instr->definitions[0]) << 16 : 0;
193 encoding |= opcode << 8;
194 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
195 out.push_back(encoding);
196 }
197
198 void
emit_sopc_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)199 emit_sopc_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
200 {
201 uint32_t opcode = ctx.opcode[(int)instr->opcode];
202
203 uint32_t encoding = (0b101111110 << 23);
204 encoding |= opcode << 16;
205 encoding |= instr->operands.size() == 2 ? reg(ctx, instr->operands[1]) << 8 : 0;
206 encoding |= !instr->operands.empty() ? reg(ctx, instr->operands[0]) : 0;
207 out.push_back(encoding);
208 }
209
210 void
emit_sopp_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr,bool force_imm=false)211 emit_sopp_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr,
212 bool force_imm = false)
213 {
214 uint32_t opcode = ctx.opcode[(int)instr->opcode];
215 const SALU_instruction& sopp = instr->salu();
216
217 uint32_t encoding = (0b101111111 << 23);
218 encoding |= opcode << 16;
219
220 if (!force_imm && instr_info.classes[(int)instr->opcode] == instr_class::branch) {
221 ctx.branches.push_back({(unsigned)out.size(), sopp.imm});
222 } else {
223 assert(sopp.imm <= UINT16_MAX);
224 encoding |= (uint16_t)sopp.imm;
225 }
226 out.push_back(encoding);
227 }
228
229 void
emit_smem_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)230 emit_smem_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
231 {
232 uint32_t opcode = ctx.opcode[(int)instr->opcode];
233 const SMEM_instruction& smem = instr->smem();
234 bool glc = smem.cache.value & ac_glc;
235 bool dlc = smem.cache.value & ac_dlc;
236
237 bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
238 bool is_load = !instr->definitions.empty();
239 uint32_t encoding = 0;
240
241 if (ctx.gfx_level <= GFX7) {
242 encoding = (0b11000 << 27);
243 encoding |= opcode << 22;
244 encoding |= instr->definitions.size() ? reg(ctx, instr->definitions[0]) << 15 : 0;
245 encoding |= instr->operands.size() ? (reg(ctx, instr->operands[0]) >> 1) << 9 : 0;
246 if (instr->operands.size() >= 2) {
247 if (!instr->operands[1].isConstant()) {
248 encoding |= reg(ctx, instr->operands[1]);
249 } else if (instr->operands[1].constantValue() >= 1024) {
250 encoding |= 255; /* SQ_SRC_LITERAL */
251 } else {
252 encoding |= instr->operands[1].constantValue() >> 2;
253 encoding |= 1 << 8;
254 }
255 }
256 out.push_back(encoding);
257 /* SMRD instructions can take a literal on GFX7 */
258 if (instr->operands.size() >= 2 && instr->operands[1].isConstant() &&
259 instr->operands[1].constantValue() >= 1024)
260 out.push_back(instr->operands[1].constantValue() >> 2);
261 return;
262 }
263
264 if (ctx.gfx_level <= GFX9) {
265 encoding = (0b110000 << 26);
266 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
267 /* We don't use the NV bit. */
268 } else {
269 encoding = (0b111101 << 26);
270 if (ctx.gfx_level <= GFX11_5)
271 encoding |= dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 14) : 0;
272 }
273
274 if (ctx.gfx_level <= GFX11_5) {
275 encoding |= opcode << 18;
276 encoding |= glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
277 } else {
278 encoding |= opcode << 13;
279 encoding |= get_gfx12_cpol(smem) << 21;
280 }
281
282 if (ctx.gfx_level <= GFX9) {
283 if (instr->operands.size() >= 2)
284 encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
285 }
286 if (ctx.gfx_level == GFX9) {
287 encoding |= soe ? 1 << 14 : 0;
288 }
289
290 if (is_load || instr->operands.size() >= 3) { /* SDATA */
291 encoding |= (is_load ? reg(ctx, instr->definitions[0]) : reg(ctx, instr->operands[2])) << 6;
292 }
293 if (instr->operands.size() >= 1) { /* SBASE */
294 encoding |= reg(ctx, instr->operands[0]) >> 1;
295 }
296
297 out.push_back(encoding);
298 encoding = 0;
299
300 int32_t offset = 0;
301 uint32_t soffset =
302 ctx.gfx_level >= GFX10
303 ? reg(ctx, sgpr_null) /* On GFX10 this is disabled by specifying SGPR_NULL */
304 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on
305 GFX8 and below) */
306 if (instr->operands.size() >= 2) {
307 const Operand& op_off1 = instr->operands[1];
308 if (ctx.gfx_level <= GFX9) {
309 offset = op_off1.isConstant() ? op_off1.constantValue() : reg(ctx, op_off1);
310 } else {
311 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an
312 * SGPR */
313 if (op_off1.isConstant()) {
314 offset = op_off1.constantValue();
315 } else {
316 soffset = reg(ctx, op_off1);
317 assert(!soe); /* There is no place to put the other SGPR offset, if any */
318 }
319 }
320
321 if (soe) {
322 const Operand& op_off2 = instr->operands.back();
323 assert(ctx.gfx_level >= GFX9); /* GFX8 and below don't support specifying a constant
324 and an SGPR at the same time */
325 assert(!op_off2.isConstant());
326 soffset = reg(ctx, op_off2);
327 }
328 }
329 encoding |= offset;
330 encoding |= soffset << 25;
331
332 out.push_back(encoding);
333 }
334
335 void
emit_vop2_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)336 emit_vop2_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
337 {
338 uint32_t opcode = ctx.opcode[(int)instr->opcode];
339 const VALU_instruction& valu = instr->valu();
340
341 uint32_t encoding = 0;
342 encoding |= opcode << 25;
343 encoding |= reg(ctx, instr->definitions[0], 8) << 17;
344 encoding |= (valu.opsel[3] ? 128 : 0) << 17;
345 encoding |= reg(ctx, instr->operands[1], 8) << 9;
346 encoding |= (valu.opsel[1] ? 128 : 0) << 9;
347 encoding |= reg(ctx, instr->operands[0]);
348 encoding |= valu.opsel[0] ? 128 : 0;
349 out.push_back(encoding);
350 }
351
352 void
emit_vop1_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)353 emit_vop1_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
354 {
355 uint32_t opcode = ctx.opcode[(int)instr->opcode];
356 const VALU_instruction& valu = instr->valu();
357
358 uint32_t encoding = (0b0111111 << 25);
359 if (!instr->definitions.empty()) {
360 encoding |= reg(ctx, instr->definitions[0], 8) << 17;
361 encoding |= (valu.opsel[3] ? 128 : 0) << 17;
362 }
363 encoding |= opcode << 9;
364 if (!instr->operands.empty()) {
365 encoding |= reg(ctx, instr->operands[0]);
366 encoding |= valu.opsel[0] ? 128 : 0;
367 }
368 out.push_back(encoding);
369 }
370
371 void
emit_vopc_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)372 emit_vopc_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
373 {
374 uint32_t opcode = ctx.opcode[(int)instr->opcode];
375 const VALU_instruction& valu = instr->valu();
376
377 uint32_t encoding = (0b0111110 << 25);
378 encoding |= opcode << 17;
379 encoding |= reg(ctx, instr->operands[1], 8) << 9;
380 encoding |= (valu.opsel[1] ? 128 : 0) << 9;
381 encoding |= reg(ctx, instr->operands[0]);
382 encoding |= valu.opsel[0] ? 128 : 0;
383 out.push_back(encoding);
384 }
385
386 void
emit_vintrp_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)387 emit_vintrp_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
388 {
389 uint32_t opcode = ctx.opcode[(int)instr->opcode];
390 const VINTRP_instruction& interp = instr->vintrp();
391
392 uint32_t encoding = 0;
393 if (instr->opcode == aco_opcode::v_interp_p1ll_f16 ||
394 instr->opcode == aco_opcode::v_interp_p1lv_f16 ||
395 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
396 instr->opcode == aco_opcode::v_interp_p2_f16 ||
397 instr->opcode == aco_opcode::v_interp_p2_hi_f16) {
398 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
399 encoding = (0b110100 << 26);
400 } else if (ctx.gfx_level >= GFX10) {
401 encoding = (0b110101 << 26);
402 } else {
403 unreachable("Unknown gfx_level.");
404 }
405
406 unsigned opsel = instr->opcode == aco_opcode::v_interp_p2_hi_f16 ? 0x8 : 0;
407
408 encoding |= opcode << 16;
409 encoding |= opsel << 11;
410 encoding |= reg(ctx, instr->definitions[0], 8);
411 out.push_back(encoding);
412
413 encoding = 0;
414 encoding |= interp.attribute;
415 encoding |= interp.component << 6;
416 encoding |= interp.high_16bits << 8;
417 encoding |= reg(ctx, instr->operands[0]) << 9;
418 if (instr->opcode == aco_opcode::v_interp_p2_f16 ||
419 instr->opcode == aco_opcode::v_interp_p2_hi_f16 ||
420 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
421 instr->opcode == aco_opcode::v_interp_p1lv_f16) {
422 encoding |= reg(ctx, instr->operands[2]) << 18;
423 }
424 out.push_back(encoding);
425 } else {
426 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
427 encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
428 } else {
429 encoding = (0b110010 << 26);
430 }
431
432 assert(encoding);
433 encoding |= reg(ctx, instr->definitions[0], 8) << 18;
434 encoding |= opcode << 16;
435 encoding |= interp.attribute << 10;
436 encoding |= interp.component << 8;
437 if (instr->opcode == aco_opcode::v_interp_mov_f32)
438 encoding |= (0x3 & instr->operands[0].constantValue());
439 else
440 encoding |= reg(ctx, instr->operands[0], 8);
441 out.push_back(encoding);
442 }
443 }
444
445 void
emit_vinterp_inreg_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)446 emit_vinterp_inreg_instruction(asm_context& ctx, std::vector<uint32_t>& out,
447 const Instruction* instr)
448 {
449 uint32_t opcode = ctx.opcode[(int)instr->opcode];
450 const VINTERP_inreg_instruction& interp = instr->vinterp_inreg();
451
452 uint32_t encoding = (0b11001101 << 24);
453 encoding |= reg(ctx, instr->definitions[0], 8);
454 encoding |= (uint32_t)interp.wait_exp << 8;
455 encoding |= (uint32_t)interp.opsel << 11;
456 encoding |= (uint32_t)interp.clamp << 15;
457 encoding |= opcode << 16;
458 out.push_back(encoding);
459
460 encoding = 0;
461 for (unsigned i = 0; i < instr->operands.size(); i++)
462 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
463 for (unsigned i = 0; i < 3; i++)
464 encoding |= interp.neg[i] << (29 + i);
465 out.push_back(encoding);
466 }
467
468 void
emit_vopd_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)469 emit_vopd_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
470 {
471 uint32_t opcode = ctx.opcode[(int)instr->opcode];
472 const VOPD_instruction& vopd = instr->vopd();
473
474 uint32_t encoding = (0b110010 << 26);
475 encoding |= reg(ctx, instr->operands[0]);
476 if (instr->opcode != aco_opcode::v_dual_mov_b32)
477 encoding |= reg(ctx, instr->operands[1], 8) << 9;
478 encoding |= (uint32_t)ctx.opcode[(int)vopd.opy] << 17;
479 encoding |= opcode << 22;
480 out.push_back(encoding);
481
482 unsigned opy_start = get_vopd_opy_start(instr);
483
484 encoding = reg(ctx, instr->operands[opy_start]);
485 if (vopd.opy != aco_opcode::v_dual_mov_b32)
486 encoding |= reg(ctx, instr->operands[opy_start + 1], 8) << 9;
487 encoding |= (reg(ctx, instr->definitions[1], 8) >> 1) << 17;
488 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
489 out.push_back(encoding);
490 }
491
492 void
emit_ds_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)493 emit_ds_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
494 {
495 uint32_t opcode = ctx.opcode[(int)instr->opcode];
496 const DS_instruction& ds = instr->ds();
497
498 uint32_t encoding = (0b110110 << 26);
499 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
500 encoding |= opcode << 17;
501 encoding |= (ds.gds ? 1 : 0) << 16;
502 } else {
503 encoding |= opcode << 18;
504 encoding |= (ds.gds ? 1 : 0) << 17;
505 }
506 encoding |= ((0xFF & ds.offset1) << 8);
507 encoding |= (0xFFFF & ds.offset0);
508 out.push_back(encoding);
509 encoding = 0;
510 if (!instr->definitions.empty())
511 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
512 for (unsigned i = 0; i < MIN2(instr->operands.size(), 3); i++) {
513 const Operand& op = instr->operands[i];
514 if (op.physReg() != m0 && !op.isUndefined())
515 encoding |= reg(ctx, op, 8) << (8 * i);
516 }
517 out.push_back(encoding);
518 }
519
520 void
emit_ldsdir_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)521 emit_ldsdir_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
522 {
523 uint32_t opcode = ctx.opcode[(int)instr->opcode];
524 const LDSDIR_instruction& dir = instr->ldsdir();
525
526 uint32_t encoding = (0b11001110 << 24);
527 encoding |= opcode << 20;
528 encoding |= (uint32_t)dir.wait_vdst << 16;
529 if (ctx.gfx_level >= GFX12)
530 encoding |= (uint32_t)dir.wait_vsrc << 23;
531 encoding |= (uint32_t)dir.attr << 10;
532 encoding |= (uint32_t)dir.attr_chan << 8;
533 encoding |= reg(ctx, instr->definitions[0], 8);
534 out.push_back(encoding);
535 }
536
537 void
emit_mubuf_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)538 emit_mubuf_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
539 {
540 uint32_t opcode = ctx.opcode[(int)instr->opcode];
541 const MUBUF_instruction& mubuf = instr->mubuf();
542 bool glc = mubuf.cache.value & ac_glc;
543 bool slc = mubuf.cache.value & ac_slc;
544 bool dlc = mubuf.cache.value & ac_dlc;
545
546 uint32_t encoding = (0b111000 << 26);
547 if (ctx.gfx_level >= GFX11 && mubuf.lds) /* GFX11 has separate opcodes for LDS loads */
548 opcode = opcode == 0 ? 0x32 : (opcode + 0x1d);
549 else
550 encoding |= (mubuf.lds ? 1 : 0) << 16;
551 encoding |= opcode << 18;
552 encoding |= (glc ? 1 : 0) << 14;
553 if (ctx.gfx_level <= GFX10_3)
554 encoding |= (mubuf.idxen ? 1 : 0) << 13;
555 assert(!mubuf.addr64 || ctx.gfx_level <= GFX7);
556 if (ctx.gfx_level == GFX6 || ctx.gfx_level == GFX7)
557 encoding |= (mubuf.addr64 ? 1 : 0) << 15;
558 if (ctx.gfx_level <= GFX10_3)
559 encoding |= (mubuf.offen ? 1 : 0) << 12;
560 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
561 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
562 encoding |= (slc ? 1 : 0) << 17;
563 } else if (ctx.gfx_level >= GFX11) {
564 encoding |= (slc ? 1 : 0) << 12;
565 encoding |= (dlc ? 1 : 0) << 13;
566 } else if (ctx.gfx_level >= GFX10) {
567 encoding |= (dlc ? 1 : 0) << 15;
568 }
569 encoding |= 0x0FFF & mubuf.offset;
570 out.push_back(encoding);
571 encoding = 0;
572 if (ctx.gfx_level <= GFX7 || (ctx.gfx_level >= GFX10 && ctx.gfx_level <= GFX10_3)) {
573 encoding |= (slc ? 1 : 0) << 22;
574 }
575 encoding |= reg(ctx, instr->operands[2]) << 24;
576 if (ctx.gfx_level >= GFX11) {
577 encoding |= (mubuf.tfe ? 1 : 0) << 21;
578 encoding |= (mubuf.offen ? 1 : 0) << 22;
579 encoding |= (mubuf.idxen ? 1 : 0) << 23;
580 } else {
581 encoding |= (mubuf.tfe ? 1 : 0) << 23;
582 }
583 encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
584 if (instr->operands.size() > 3 && !mubuf.lds)
585 encoding |= reg(ctx, instr->operands[3], 8) << 8;
586 else if (!mubuf.lds)
587 encoding |= reg(ctx, instr->definitions[0], 8) << 8;
588 encoding |= reg(ctx, instr->operands[1], 8);
589 out.push_back(encoding);
590 }
591
592 void
emit_mubuf_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)593 emit_mubuf_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
594 {
595 uint32_t opcode = ctx.opcode[(int)instr->opcode];
596 const MUBUF_instruction& mubuf = instr->mubuf();
597 assert(!mubuf.lds);
598
599 uint32_t encoding = 0b110001 << 26;
600 encoding |= opcode << 14;
601 if (instr->operands[2].isConstant()) {
602 assert(instr->operands[2].constantValue() == 0);
603 encoding |= reg(ctx, sgpr_null);
604 } else {
605 encoding |= reg(ctx, instr->operands[2]);
606 }
607 encoding |= (mubuf.tfe ? 1 : 0) << 22;
608 out.push_back(encoding);
609
610 encoding = 0;
611 if (instr->operands.size() > 3)
612 encoding |= reg(ctx, instr->operands[3], 8);
613 else
614 encoding |= reg(ctx, instr->definitions[0], 8);
615 encoding |= reg(ctx, instr->operands[0]) << 9;
616 encoding |= (mubuf.offen ? 1 : 0) << 30;
617 encoding |= (mubuf.idxen ? 1 : 0) << 31;
618 encoding |= get_gfx12_cpol(mubuf) << 18;
619 encoding |= 1 << 23;
620 out.push_back(encoding);
621
622 encoding = 0;
623 if (!instr->operands[1].isUndefined())
624 encoding |= reg(ctx, instr->operands[1], 8);
625 encoding |= (mubuf.offset & 0x00ffffff) << 8;
626 out.push_back(encoding);
627 }
628
629 void
emit_mtbuf_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)630 emit_mtbuf_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
631 {
632 uint32_t opcode = ctx.opcode[(int)instr->opcode];
633 const MTBUF_instruction& mtbuf = instr->mtbuf();
634 bool glc = mtbuf.cache.value & ac_glc;
635 bool slc = mtbuf.cache.value & ac_slc;
636 bool dlc = mtbuf.cache.value & ac_dlc;
637 uint32_t img_format = ac_get_tbuffer_format(ctx.gfx_level, mtbuf.dfmt, mtbuf.nfmt);
638 assert(img_format <= 0x7F);
639 assert(!dlc || ctx.gfx_level >= GFX10);
640
641 uint32_t encoding = (0b111010 << 26);
642 encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
643 if (ctx.gfx_level < GFX8) {
644 encoding |= opcode << 16;
645 /* ADDR64 is unused */
646 } else if (ctx.gfx_level >= GFX10 && ctx.gfx_level < GFX11) {
647 /* DLC bit replaces one bit of the OPCODE on GFX10 */
648 encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
649 encoding |= (dlc ? 1 : 0) << 15;
650 } else {
651 encoding |= opcode << 15;
652 }
653 encoding |= (glc ? 1 : 0) << 14;
654 if (ctx.gfx_level >= GFX11) {
655 encoding |= (dlc ? 1 : 0) << 13;
656 encoding |= (slc ? 1 : 0) << 12;
657 } else {
658 encoding |= (mtbuf.idxen ? 1 : 0) << 13;
659 encoding |= (mtbuf.offen ? 1 : 0) << 12;
660 }
661 encoding |= 0x0FFF & mtbuf.offset;
662 out.push_back(encoding);
663
664 encoding = 0;
665 encoding |= reg(ctx, instr->operands[2]) << 24;
666 if (ctx.gfx_level >= GFX11) {
667 encoding |= (mtbuf.idxen ? 1 : 0) << 23;
668 encoding |= (mtbuf.offen ? 1 : 0) << 22;
669 encoding |= (mtbuf.tfe ? 1 : 0) << 21;
670 } else {
671 encoding |= (mtbuf.tfe ? 1 : 0) << 23;
672 encoding |= (slc ? 1 : 0) << 22;
673 if (ctx.gfx_level >= GFX10)
674 encoding |= (((opcode & 0x08) >> 3) << 21); /* MSB of 4-bit OPCODE */
675 }
676 encoding |= (reg(ctx, instr->operands[0]) >> 2) << 16;
677 if (instr->operands.size() > 3)
678 encoding |= reg(ctx, instr->operands[3], 8) << 8;
679 else
680 encoding |= reg(ctx, instr->definitions[0], 8) << 8;
681 encoding |= reg(ctx, instr->operands[1], 8);
682 out.push_back(encoding);
683 }
684
685 void
emit_mtbuf_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)686 emit_mtbuf_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
687 {
688 uint32_t opcode = ctx.opcode[(int)instr->opcode];
689 const MTBUF_instruction& mtbuf = instr->mtbuf();
690
691 uint32_t img_format = ac_get_tbuffer_format(ctx.gfx_level, mtbuf.dfmt, mtbuf.nfmt);
692
693 uint32_t encoding = 0b110001 << 26;
694 encoding |= 0b1000 << 18;
695 encoding |= opcode << 14;
696 if (instr->operands[2].isConstant()) {
697 assert(instr->operands[2].constantValue() == 0);
698 encoding |= reg(ctx, sgpr_null);
699 } else {
700 encoding |= reg(ctx, instr->operands[2]);
701 }
702 encoding |= (mtbuf.tfe ? 1 : 0) << 22;
703 out.push_back(encoding);
704
705 encoding = 0;
706 if (instr->operands.size() > 3)
707 encoding |= reg(ctx, instr->operands[3], 8);
708 else
709 encoding |= reg(ctx, instr->definitions[0], 8);
710 encoding |= reg(ctx, instr->operands[0]) << 9;
711 encoding |= (mtbuf.offen ? 1 : 0) << 30;
712 encoding |= (mtbuf.idxen ? 1 : 0) << 31;
713 encoding |= get_gfx12_cpol(mtbuf) << 18;
714 encoding |= img_format << 23;
715 out.push_back(encoding);
716
717 encoding = 0;
718 encoding |= reg(ctx, instr->operands[1], 8);
719 encoding |= (mtbuf.offset & 0x00ffffff) << 8;
720 out.push_back(encoding);
721 }
722
723 void
emit_mimg_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)724 emit_mimg_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
725 {
726 uint32_t opcode = ctx.opcode[(int)instr->opcode];
727 const MIMG_instruction& mimg = instr->mimg();
728 bool glc = mimg.cache.value & ac_glc;
729 bool slc = mimg.cache.value & ac_slc;
730 bool dlc = mimg.cache.value & ac_dlc;
731
732 unsigned nsa_dwords = get_mimg_nsa_dwords(instr);
733 assert(!nsa_dwords || ctx.gfx_level >= GFX10);
734
735 uint32_t encoding = (0b111100 << 26);
736 if (ctx.gfx_level >= GFX11) { /* GFX11: rearranges most fields */
737 assert(nsa_dwords <= 1);
738 encoding |= nsa_dwords;
739 encoding |= mimg.dim << 2;
740 encoding |= mimg.unrm ? 1 << 7 : 0;
741 encoding |= (0xF & mimg.dmask) << 8;
742 encoding |= slc ? 1 << 12 : 0;
743 encoding |= dlc ? 1 << 13 : 0;
744 encoding |= glc ? 1 << 14 : 0;
745 encoding |= mimg.r128 ? 1 << 15 : 0;
746 encoding |= mimg.a16 ? 1 << 16 : 0;
747 encoding |= mimg.d16 ? 1 << 17 : 0;
748 encoding |= (opcode & 0xFF) << 18;
749 } else {
750 encoding |= slc ? 1 << 25 : 0;
751 encoding |= (opcode & 0x7f) << 18;
752 encoding |= (opcode >> 7) & 1;
753 encoding |= mimg.lwe ? 1 << 17 : 0;
754 encoding |= mimg.tfe ? 1 << 16 : 0;
755 encoding |= glc ? 1 << 13 : 0;
756 encoding |= mimg.unrm ? 1 << 12 : 0;
757 if (ctx.gfx_level <= GFX9) {
758 assert(!dlc); /* Device-level coherent is not supported on GFX9 and lower */
759 assert(!mimg.r128);
760 encoding |= mimg.a16 ? 1 << 15 : 0;
761 encoding |= mimg.da ? 1 << 14 : 0;
762 } else {
763 encoding |= mimg.r128 ? 1 << 15
764 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
765 encoding |= nsa_dwords << 1;
766 encoding |= mimg.dim << 3; /* GFX10: dimensionality instead of declare array */
767 encoding |= dlc ? 1 << 7 : 0;
768 }
769 encoding |= (0xF & mimg.dmask) << 8;
770 }
771 out.push_back(encoding);
772
773 encoding = reg(ctx, instr->operands[3], 8); /* VADDR */
774 if (!instr->definitions.empty()) {
775 encoding |= reg(ctx, instr->definitions[0], 8) << 8; /* VDATA */
776 } else if (!instr->operands[2].isUndefined()) {
777 encoding |= reg(ctx, instr->operands[2], 8) << 8; /* VDATA */
778 }
779 encoding |= (0x1F & (reg(ctx, instr->operands[0]) >> 2)) << 16; /* T# (resource) */
780
781 assert(!mimg.d16 || ctx.gfx_level >= GFX9);
782 if (ctx.gfx_level >= GFX11) {
783 if (!instr->operands[1].isUndefined())
784 encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 26; /* sampler */
785
786 encoding |= mimg.tfe ? 1 << 21 : 0;
787 encoding |= mimg.lwe ? 1 << 22 : 0;
788 } else {
789 if (!instr->operands[1].isUndefined())
790 encoding |= (0x1F & (reg(ctx, instr->operands[1]) >> 2)) << 21; /* sampler */
791
792 encoding |= mimg.d16 ? 1 << 31 : 0;
793 if (ctx.gfx_level >= GFX10) {
794 /* GFX10: A16 still exists, but is in a different place */
795 encoding |= mimg.a16 ? 1 << 30 : 0;
796 }
797 }
798
799 out.push_back(encoding);
800
801 if (nsa_dwords) {
802 out.resize(out.size() + nsa_dwords);
803 std::vector<uint32_t>::iterator nsa = std::prev(out.end(), nsa_dwords);
804 for (unsigned i = 0; i < instr->operands.size() - 4u; i++)
805 nsa[i / 4] |= reg(ctx, instr->operands[4 + i], 8) << (i % 4 * 8);
806 }
807 }
808
809 void
emit_mimg_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)810 emit_mimg_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
811 {
812 uint32_t opcode = ctx.opcode[(int)instr->opcode];
813 const MIMG_instruction& mimg = instr->mimg();
814
815 bool vsample = !instr->operands[1].isUndefined() || instr->opcode == aco_opcode::image_msaa_load;
816 uint32_t encoding = opcode << 14;
817 if (vsample) {
818 encoding |= 0b111001 << 26;
819 encoding |= mimg.tfe << 3;
820 encoding |= mimg.unrm << 13;
821 } else {
822 encoding |= 0b110100 << 26;
823 }
824 encoding |= mimg.dim;
825 encoding |= mimg.r128 << 4;
826 encoding |= mimg.d16 << 5;
827 encoding |= mimg.a16 << 6;
828 encoding |= (mimg.dmask & 0xf) << 22;
829 out.push_back(encoding);
830
831 uint8_t vaddr[5] = {0, 0, 0, 0, 0};
832 for (unsigned i = 3; i < instr->operands.size(); i++)
833 vaddr[i - 3] = reg(ctx, instr->operands[i], 8);
834 unsigned num_vaddr = instr->operands.size() - 3;
835 for (unsigned i = 0; i < MIN2(instr->operands.back().size() - 1, 5 - num_vaddr); i++)
836 vaddr[num_vaddr + i] = reg(ctx, instr->operands.back(), 8) + i + 1;
837
838 encoding = 0;
839 if (!instr->definitions.empty())
840 encoding |= reg(ctx, instr->definitions[0], 8); /* VDATA */
841 else if (!instr->operands[2].isUndefined())
842 encoding |= reg(ctx, instr->operands[2], 8); /* VDATA */
843 encoding |= reg(ctx, instr->operands[0]) << 9; /* T# (resource) */
844 if (vsample) {
845 encoding |= mimg.lwe << 8;
846 if (instr->opcode != aco_opcode::image_msaa_load)
847 encoding |= reg(ctx, instr->operands[1]) << 23; /* sampler */
848 } else {
849 encoding |= mimg.tfe << 23;
850 encoding |= vaddr[4] << 24;
851 }
852 encoding |= get_gfx12_cpol(mimg) << 18;
853 out.push_back(encoding);
854
855 encoding = 0;
856 for (unsigned i = 0; i < 4; i++)
857 encoding |= vaddr[i] << (i * 8);
858 out.push_back(encoding);
859 }
860
861 void
emit_flatlike_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)862 emit_flatlike_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
863 {
864 uint32_t opcode = ctx.opcode[(int)instr->opcode];
865 const FLAT_instruction& flat = instr->flatlike();
866 bool glc = flat.cache.value & ac_glc;
867 bool slc = flat.cache.value & ac_slc;
868 bool dlc = flat.cache.value & ac_dlc;
869
870 uint32_t encoding = (0b110111 << 26);
871 encoding |= opcode << 18;
872 if (ctx.gfx_level == GFX9 || ctx.gfx_level >= GFX11) {
873 if (instr->isFlat())
874 assert(flat.offset <= 0xfff);
875 else
876 assert(flat.offset >= -4096 && flat.offset < 4096);
877 encoding |= flat.offset & 0x1fff;
878 } else if (ctx.gfx_level <= GFX8 || instr->isFlat()) {
879 /* GFX10 has a 12-bit immediate OFFSET field,
880 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
881 */
882 assert(flat.offset == 0);
883 } else {
884 assert(flat.offset >= -2048 && flat.offset <= 2047);
885 encoding |= flat.offset & 0xfff;
886 }
887 if (instr->isScratch())
888 encoding |= 1 << (ctx.gfx_level >= GFX11 ? 16 : 14);
889 else if (instr->isGlobal())
890 encoding |= 2 << (ctx.gfx_level >= GFX11 ? 16 : 14);
891 encoding |= flat.lds ? 1 << 13 : 0;
892 encoding |= glc ? 1 << (ctx.gfx_level >= GFX11 ? 14 : 16) : 0;
893 encoding |= slc ? 1 << (ctx.gfx_level >= GFX11 ? 15 : 17) : 0;
894 if (ctx.gfx_level >= GFX10) {
895 assert(!flat.nv);
896 encoding |= dlc ? 1 << (ctx.gfx_level >= GFX11 ? 13 : 12) : 0;
897 } else {
898 assert(!dlc);
899 }
900 out.push_back(encoding);
901 encoding = reg(ctx, instr->operands[0], 8);
902 if (!instr->definitions.empty())
903 encoding |= reg(ctx, instr->definitions[0], 8) << 24;
904 if (instr->operands.size() >= 3)
905 encoding |= reg(ctx, instr->operands[2], 8) << 8;
906 if (!instr->operands[1].isUndefined()) {
907 assert(ctx.gfx_level >= GFX10 || instr->operands[1].physReg() != 0x7F);
908 assert(instr->format != Format::FLAT);
909 encoding |= reg(ctx, instr->operands[1], 8) << 16;
910 } else if (instr->format != Format::FLAT ||
911 ctx.gfx_level >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
912 /* For GFX10.3 scratch, 0x7F disables both ADDR and SADDR, unlike sgpr_null, which only
913 * disables SADDR. On GFX11, this was replaced with SVE.
914 */
915 if (ctx.gfx_level <= GFX9 ||
916 (instr->isScratch() && instr->operands[0].isUndefined() && ctx.gfx_level < GFX11))
917 encoding |= 0x7F << 16;
918 else
919 encoding |= reg(ctx, sgpr_null) << 16;
920 }
921 if (ctx.gfx_level >= GFX11 && instr->isScratch())
922 encoding |= !instr->operands[0].isUndefined() ? 1 << 23 : 0;
923 else
924 encoding |= flat.nv ? 1 << 23 : 0;
925 out.push_back(encoding);
926 }
927
928 void
emit_flatlike_instruction_gfx12(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)929 emit_flatlike_instruction_gfx12(asm_context& ctx, std::vector<uint32_t>& out,
930 const Instruction* instr)
931 {
932 uint32_t opcode = ctx.opcode[(int)instr->opcode];
933 const FLAT_instruction& flat = instr->flatlike();
934 assert(!flat.lds);
935
936 uint32_t encoding = opcode << 14;
937 encoding |= 0b111011 << 26;
938 if (!instr->operands[1].isUndefined()) {
939 assert(!instr->isFlat());
940 encoding |= reg(ctx, instr->operands[1]);
941 } else {
942 encoding |= reg(ctx, sgpr_null);
943 }
944 if (instr->isScratch())
945 encoding |= 1 << 24;
946 else if (instr->isGlobal())
947 encoding |= 2 << 24;
948 out.push_back(encoding);
949
950 encoding = 0;
951 if (!instr->definitions.empty())
952 encoding |= reg(ctx, instr->definitions[0], 8);
953 if (instr->isScratch())
954 encoding |= !instr->operands[0].isUndefined() ? 1 << 17 : 0;
955 encoding |= get_gfx12_cpol(flat) << 18;
956 if (instr->operands.size() >= 3)
957 encoding |= reg(ctx, instr->operands[2], 8) << 23;
958 out.push_back(encoding);
959
960 encoding = 0;
961 if (!instr->operands[0].isUndefined())
962 encoding |= reg(ctx, instr->operands[0], 8);
963 encoding |= (flat.offset & 0x00ffffff) << 8;
964 out.push_back(encoding);
965 }
966
967 void
emit_exp_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)968 emit_exp_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
969 {
970 const Export_instruction& exp = instr->exp();
971 uint32_t encoding;
972 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9) {
973 encoding = (0b110001 << 26);
974 } else {
975 encoding = (0b111110 << 26);
976 }
977
978 if (ctx.gfx_level >= GFX11) {
979 encoding |= exp.row_en ? 0b1 << 13 : 0;
980 } else {
981 encoding |= exp.valid_mask ? 0b1 << 12 : 0;
982 encoding |= exp.compressed ? 0b1 << 10 : 0;
983 }
984 encoding |= exp.done ? 0b1 << 11 : 0;
985 encoding |= exp.dest << 4;
986 encoding |= exp.enabled_mask;
987 out.push_back(encoding);
988 encoding = reg(ctx, exp.operands[0], 8);
989 encoding |= reg(ctx, exp.operands[1], 8) << 8;
990 encoding |= reg(ctx, exp.operands[2], 8) << 16;
991 encoding |= reg(ctx, exp.operands[3], 8) << 24;
992 out.push_back(encoding);
993 }
994
995 void emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr);
996
997 void
emit_dpp16_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)998 emit_dpp16_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
999 {
1000 assert(ctx.gfx_level >= GFX8);
1001 DPP16_instruction& dpp = instr->dpp16();
1002
1003 /* first emit the instruction without the DPP operand */
1004 Operand dpp_op = instr->operands[0];
1005 instr->operands[0] = Operand(PhysReg{250}, v1);
1006 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP16);
1007 emit_instruction(ctx, out, instr);
1008 instr->format = (Format)((uint16_t)instr->format | (uint16_t)Format::DPP16);
1009 instr->operands[0] = dpp_op;
1010
1011 uint32_t encoding = (0xF & dpp.row_mask) << 28;
1012 encoding |= (0xF & dpp.bank_mask) << 24;
1013 encoding |= dpp.abs[1] << 23;
1014 encoding |= dpp.neg[1] << 22;
1015 encoding |= dpp.abs[0] << 21;
1016 encoding |= dpp.neg[0] << 20;
1017 encoding |= dpp.fetch_inactive << 18;
1018 encoding |= dpp.bound_ctrl << 19;
1019 encoding |= dpp.dpp_ctrl << 8;
1020 encoding |= reg(ctx, dpp_op, 8);
1021 encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
1022 out.push_back(encoding);
1023 }
1024
1025 void
emit_dpp8_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1026 emit_dpp8_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1027 {
1028 assert(ctx.gfx_level >= GFX10);
1029 DPP8_instruction& dpp = instr->dpp8();
1030
1031 /* first emit the instruction without the DPP operand */
1032 Operand dpp_op = instr->operands[0];
1033 instr->operands[0] = Operand(PhysReg{233u + dpp.fetch_inactive}, v1);
1034 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::DPP8);
1035 emit_instruction(ctx, out, instr);
1036 instr->format = (Format)((uint16_t)instr->format | (uint16_t)Format::DPP8);
1037 instr->operands[0] = dpp_op;
1038
1039 uint32_t encoding = reg(ctx, dpp_op, 8);
1040 encoding |= dpp.opsel[0] && !instr->isVOP3() ? 128 : 0;
1041 encoding |= dpp.lane_sel << 8;
1042 out.push_back(encoding);
1043 }
1044
1045 void
emit_vop3_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)1046 emit_vop3_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
1047 {
1048 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1049 const VALU_instruction& vop3 = instr->valu();
1050
1051 if (instr->isVOP2()) {
1052 opcode = opcode + 0x100;
1053 } else if (instr->isVOP1()) {
1054 if (ctx.gfx_level == GFX8 || ctx.gfx_level == GFX9)
1055 opcode = opcode + 0x140;
1056 else
1057 opcode = opcode + 0x180;
1058 } else if (instr->isVOPC()) {
1059 opcode = opcode + 0x0;
1060 } else if (instr->isVINTRP()) {
1061 opcode = opcode + 0x270;
1062 }
1063
1064 uint32_t encoding;
1065 if (ctx.gfx_level <= GFX9) {
1066 encoding = (0b110100 << 26);
1067 } else if (ctx.gfx_level >= GFX10) {
1068 encoding = (0b110101 << 26);
1069 } else {
1070 unreachable("Unknown gfx_level.");
1071 }
1072
1073 if (ctx.gfx_level <= GFX7) {
1074 encoding |= opcode << 17;
1075 encoding |= (vop3.clamp ? 1 : 0) << 11;
1076 } else {
1077 encoding |= opcode << 16;
1078 encoding |= (vop3.clamp ? 1 : 0) << 15;
1079 }
1080 encoding |= vop3.opsel << 11;
1081 for (unsigned i = 0; i < 3; i++)
1082 encoding |= vop3.abs[i] << (8 + i);
1083 /* On GFX9 and older, v_cmpx implicitly writes exec besides writing an SGPR pair.
1084 * On GFX10 and newer, v_cmpx always writes just exec.
1085 */
1086 if (instr->definitions.size() == 2 && instr->isVOPC())
1087 assert(ctx.gfx_level <= GFX9 && instr->definitions[1].physReg() == exec);
1088 else if (instr->definitions.size() == 2 && instr->opcode != aco_opcode::v_swap_b16)
1089 encoding |= reg(ctx, instr->definitions[1]) << 8;
1090 encoding |= reg(ctx, instr->definitions[0], 8);
1091 out.push_back(encoding);
1092 encoding = 0;
1093
1094 unsigned num_ops = instr->operands.size();
1095 /* Encoding implicit sources works fine with hardware but breaks some disassemblers. */
1096 if (instr->opcode == aco_opcode::v_writelane_b32_e64)
1097 num_ops = 2;
1098 else if (instr->opcode == aco_opcode::v_swap_b16)
1099 num_ops = 1;
1100
1101 for (unsigned i = 0; i < num_ops; i++)
1102 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
1103 encoding |= vop3.omod << 27;
1104 for (unsigned i = 0; i < 3; i++)
1105 encoding |= vop3.neg[i] << (29 + i);
1106 out.push_back(encoding);
1107 }
1108
1109 void
emit_vop3p_instruction(asm_context & ctx,std::vector<uint32_t> & out,const Instruction * instr)1110 emit_vop3p_instruction(asm_context& ctx, std::vector<uint32_t>& out, const Instruction* instr)
1111 {
1112 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1113 const VALU_instruction& vop3 = instr->valu();
1114
1115 uint32_t encoding;
1116 if (ctx.gfx_level == GFX9) {
1117 encoding = (0b110100111 << 23);
1118 } else if (ctx.gfx_level >= GFX10) {
1119 encoding = (0b110011 << 26);
1120 } else {
1121 unreachable("Unknown gfx_level.");
1122 }
1123
1124 encoding |= opcode << 16;
1125 encoding |= (vop3.clamp ? 1 : 0) << 15;
1126 encoding |= vop3.opsel_lo << 11;
1127 encoding |= ((vop3.opsel_hi & 0x4) ? 1 : 0) << 14;
1128 for (unsigned i = 0; i < 3; i++)
1129 encoding |= vop3.neg_hi[i] << (8 + i);
1130 encoding |= reg(ctx, instr->definitions[0], 8);
1131 out.push_back(encoding);
1132 encoding = 0;
1133 for (unsigned i = 0; i < instr->operands.size(); i++)
1134 encoding |= reg(ctx, instr->operands[i]) << (i * 9);
1135 encoding |= (vop3.opsel_hi & 0x3) << 27;
1136 for (unsigned i = 0; i < 3; i++)
1137 encoding |= vop3.neg_lo[i] << (29 + i);
1138 out.push_back(encoding);
1139 }
1140
1141 void
emit_sdwa_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1142 emit_sdwa_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1143 {
1144 assert(ctx.gfx_level >= GFX8 && ctx.gfx_level < GFX11);
1145 SDWA_instruction& sdwa = instr->sdwa();
1146
1147 /* first emit the instruction without the SDWA operand */
1148 Operand sdwa_op = instr->operands[0];
1149 instr->operands[0] = Operand(PhysReg{249}, v1);
1150 instr->format = (Format)((uint16_t)instr->format & ~(uint16_t)Format::SDWA);
1151 emit_instruction(ctx, out, instr);
1152 instr->format = (Format)((uint16_t)instr->format | (uint16_t)Format::SDWA);
1153 instr->operands[0] = sdwa_op;
1154
1155 uint32_t encoding = 0;
1156
1157 if (instr->isVOPC()) {
1158 if (instr->definitions[0].physReg() !=
1159 (ctx.gfx_level >= GFX10 && is_cmpx(instr->opcode) ? exec : vcc)) {
1160 encoding |= reg(ctx, instr->definitions[0]) << 8;
1161 encoding |= 1 << 15;
1162 }
1163 encoding |= (sdwa.clamp ? 1 : 0) << 13;
1164 } else {
1165 encoding |= sdwa.dst_sel.to_sdwa_sel(instr->definitions[0].physReg().byte()) << 8;
1166 uint32_t dst_u = sdwa.dst_sel.sign_extend() ? 1 : 0;
1167 if (instr->definitions[0].bytes() < 4) /* dst_preserve */
1168 dst_u = 2;
1169 encoding |= dst_u << 11;
1170 encoding |= (sdwa.clamp ? 1 : 0) << 13;
1171 encoding |= sdwa.omod << 14;
1172 }
1173
1174 encoding |= sdwa.sel[0].to_sdwa_sel(sdwa_op.physReg().byte()) << 16;
1175 encoding |= sdwa.sel[0].sign_extend() ? 1 << 19 : 0;
1176 encoding |= sdwa.abs[0] << 21;
1177 encoding |= sdwa.neg[0] << 20;
1178
1179 if (instr->operands.size() >= 2) {
1180 encoding |= sdwa.sel[1].to_sdwa_sel(instr->operands[1].physReg().byte()) << 24;
1181 encoding |= sdwa.sel[1].sign_extend() ? 1 << 27 : 0;
1182 encoding |= sdwa.abs[1] << 29;
1183 encoding |= sdwa.neg[1] << 28;
1184 }
1185
1186 encoding |= reg(ctx, sdwa_op, 8);
1187 encoding |= (sdwa_op.physReg() < 256) << 23;
1188 if (instr->operands.size() >= 2)
1189 encoding |= (instr->operands[1].physReg() < 256) << 31;
1190 out.push_back(encoding);
1191 }
1192
1193 void
emit_instruction(asm_context & ctx,std::vector<uint32_t> & out,Instruction * instr)1194 emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
1195 {
1196 /* lower remaining pseudo-instructions */
1197 if (instr->opcode == aco_opcode::p_constaddr_getpc) {
1198 ctx.constaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
1199
1200 instr->opcode = aco_opcode::s_getpc_b64;
1201 instr->operands.pop_back();
1202 } else if (instr->opcode == aco_opcode::p_constaddr_addlo) {
1203 ctx.constaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
1204
1205 instr->opcode = aco_opcode::s_add_u32;
1206 instr->operands.pop_back();
1207 assert(instr->operands[1].isConstant());
1208 /* in case it's an inline constant, make it a literal */
1209 instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
1210 } else if (instr->opcode == aco_opcode::p_resumeaddr_getpc) {
1211 ctx.resumeaddrs[instr->operands[0].constantValue()].getpc_end = out.size() + 1;
1212
1213 instr->opcode = aco_opcode::s_getpc_b64;
1214 instr->operands.pop_back();
1215 } else if (instr->opcode == aco_opcode::p_resumeaddr_addlo) {
1216 ctx.resumeaddrs[instr->operands[2].constantValue()].add_literal = out.size() + 1;
1217
1218 instr->opcode = aco_opcode::s_add_u32;
1219 instr->operands.pop_back();
1220 assert(instr->operands[1].isConstant());
1221 /* in case it's an inline constant, make it a literal */
1222 instr->operands[1] = Operand::literal32(instr->operands[1].constantValue());
1223 } else if (instr->opcode == aco_opcode::p_load_symbol) {
1224 assert(instr->operands[0].isConstant());
1225 assert(ctx.symbols);
1226
1227 struct aco_symbol info;
1228 info.id = (enum aco_symbol_id)instr->operands[0].constantValue();
1229 info.offset = out.size() + 1;
1230 ctx.symbols->push_back(info);
1231
1232 instr->opcode = aco_opcode::s_mov_b32;
1233 /* in case it's an inline constant, make it a literal */
1234 instr->operands[0] = Operand::literal32(0);
1235 } else if (instr->opcode == aco_opcode::p_debug_info) {
1236 assert(instr->operands[0].isConstant());
1237 uint32_t index = instr->operands[0].constantValue();
1238 ctx.program->debug_info[index].offset = (out.size() - 1) * 4;
1239 return;
1240 }
1241
1242 /* Promote VOP12C to VOP3 if necessary. */
1243 if ((instr->isVOP1() || instr->isVOP2() || instr->isVOPC()) && !instr->isVOP3() &&
1244 needs_vop3_gfx11(ctx, instr)) {
1245 instr->format = asVOP3(instr->format);
1246 if (instr->opcode == aco_opcode::v_fmaak_f16) {
1247 instr->opcode = aco_opcode::v_fma_f16;
1248 instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
1249 } else if (instr->opcode == aco_opcode::v_fmamk_f16) {
1250 instr->valu().swapOperands(1, 2);
1251 instr->opcode = aco_opcode::v_fma_f16;
1252 instr->format = (Format)((uint32_t)instr->format & ~(uint32_t)Format::VOP2);
1253 }
1254 }
1255
1256 uint32_t opcode = ctx.opcode[(int)instr->opcode];
1257 if (opcode == (uint32_t)-1) {
1258 char* outmem;
1259 size_t outsize;
1260 struct u_memstream mem;
1261 u_memstream_open(&mem, &outmem, &outsize);
1262 FILE* const memf = u_memstream_get(&mem);
1263
1264 fprintf(memf, "Unsupported opcode: ");
1265 aco_print_instr(ctx.gfx_level, instr, memf);
1266 u_memstream_close(&mem);
1267
1268 aco_err(ctx.program, outmem);
1269 free(outmem);
1270
1271 abort();
1272 }
1273
1274 switch (instr->format) {
1275 case Format::SOP2: {
1276 emit_sop2_instruction(ctx, out, instr);
1277 break;
1278 }
1279 case Format::SOPK: {
1280 emit_sopk_instruction(ctx, out, instr);
1281 break;
1282 }
1283 case Format::SOP1: {
1284 emit_sop1_instruction(ctx, out, instr);
1285 break;
1286 }
1287 case Format::SOPC: {
1288 emit_sopc_instruction(ctx, out, instr);
1289 break;
1290 }
1291 case Format::SOPP: {
1292 emit_sopp_instruction(ctx, out, instr);
1293 break;
1294 }
1295 case Format::SMEM: {
1296 emit_smem_instruction(ctx, out, instr);
1297 return;
1298 }
1299 case Format::VOP2: {
1300 emit_vop2_instruction(ctx, out, instr);
1301 break;
1302 }
1303 case Format::VOP1: {
1304 emit_vop1_instruction(ctx, out, instr);
1305 break;
1306 }
1307 case Format::VOPC: {
1308 emit_vopc_instruction(ctx, out, instr);
1309 break;
1310 }
1311 case Format::VINTRP: {
1312 emit_vintrp_instruction(ctx, out, instr);
1313 break;
1314 }
1315 case Format::VINTERP_INREG: {
1316 emit_vinterp_inreg_instruction(ctx, out, instr);
1317 break;
1318 }
1319 case Format::VOPD: {
1320 emit_vopd_instruction(ctx, out, instr);
1321 break;
1322 }
1323 case Format::DS: {
1324 emit_ds_instruction(ctx, out, instr);
1325 break;
1326 }
1327 case Format::LDSDIR: {
1328 emit_ldsdir_instruction(ctx, out, instr);
1329 break;
1330 }
1331 case Format::MUBUF: {
1332 if (ctx.gfx_level >= GFX12)
1333 emit_mubuf_instruction_gfx12(ctx, out, instr);
1334 else
1335 emit_mubuf_instruction(ctx, out, instr);
1336 break;
1337 }
1338 case Format::MTBUF: {
1339 if (ctx.gfx_level >= GFX12)
1340 emit_mtbuf_instruction_gfx12(ctx, out, instr);
1341 else
1342 emit_mtbuf_instruction(ctx, out, instr);
1343 break;
1344 }
1345 case Format::MIMG: {
1346 if (ctx.gfx_level >= GFX12)
1347 emit_mimg_instruction_gfx12(ctx, out, instr);
1348 else
1349 emit_mimg_instruction(ctx, out, instr);
1350 break;
1351 }
1352 case Format::FLAT:
1353 case Format::SCRATCH:
1354 case Format::GLOBAL: {
1355 if (ctx.gfx_level >= GFX12)
1356 emit_flatlike_instruction_gfx12(ctx, out, instr);
1357 else
1358 emit_flatlike_instruction(ctx, out, instr);
1359 break;
1360 }
1361 case Format::EXP: {
1362 emit_exp_instruction(ctx, out, instr);
1363 break;
1364 }
1365 case Format::PSEUDO:
1366 case Format::PSEUDO_BARRIER:
1367 if (instr->opcode != aco_opcode::p_unit_test)
1368 unreachable("Pseudo instructions should be lowered before assembly.");
1369 break;
1370 default:
1371 if (instr->isDPP16()) {
1372 emit_dpp16_instruction(ctx, out, instr);
1373 return;
1374 } else if (instr->isDPP8()) {
1375 emit_dpp8_instruction(ctx, out, instr);
1376 return;
1377 } else if (instr->isVOP3()) {
1378 emit_vop3_instruction(ctx, out, instr);
1379 } else if (instr->isVOP3P()) {
1380 emit_vop3p_instruction(ctx, out, instr);
1381 } else if (instr->isSDWA()) {
1382 emit_sdwa_instruction(ctx, out, instr);
1383 } else {
1384 unreachable("unimplemented instruction format");
1385 }
1386 break;
1387 }
1388
1389 /* append literal dword */
1390 for (const Operand& op : instr->operands) {
1391 if (op.isLiteral()) {
1392 out.push_back(op.constantValue());
1393 break;
1394 }
1395 }
1396 }
1397
1398 void
emit_block(asm_context & ctx,std::vector<uint32_t> & out,Block & block)1399 emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
1400 {
1401 for (aco_ptr<Instruction>& instr : block.instructions) {
1402 #if 0
1403 int start_idx = out.size();
1404 std::cerr << "Encoding:\t" << std::endl;
1405 aco_print_instr(&*instr, stderr);
1406 std::cerr << std::endl;
1407 #endif
1408 emit_instruction(ctx, out, instr.get());
1409 #if 0
1410 for (int i = start_idx; i < out.size(); i++)
1411 std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
1412 #endif
1413 }
1414 }
1415
1416 void
fix_exports(asm_context & ctx,std::vector<uint32_t> & out,Program * program)1417 fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
1418 {
1419 bool exported = false;
1420 for (Block& block : program->blocks) {
1421 if (!(block.kind & block_kind_export_end))
1422 continue;
1423 std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
1424 while (it != block.instructions.rend()) {
1425 if ((*it)->isEXP()) {
1426 Export_instruction& exp = (*it)->exp();
1427 if (program->stage.hw == AC_HW_VERTEX_SHADER ||
1428 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
1429 if (exp.dest >= V_008DFC_SQ_EXP_POS && exp.dest <= (V_008DFC_SQ_EXP_POS + 3)) {
1430 exp.done = true;
1431 exported = true;
1432 break;
1433 }
1434 } else {
1435 exp.done = true;
1436 exp.valid_mask = true;
1437 exported = true;
1438 break;
1439 }
1440 } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec) {
1441 break;
1442 }
1443 ++it;
1444 }
1445 }
1446
1447 /* GFX10+ FS may not export anything if no discard is used. */
1448 bool may_skip_export = program->stage.hw == AC_HW_PIXEL_SHADER && program->gfx_level >= GFX10;
1449
1450 if (!exported && !may_skip_export) {
1451 /* Abort in order to avoid a GPU hang. */
1452 bool is_vertex_or_ngg = (program->stage.hw == AC_HW_VERTEX_SHADER ||
1453 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER);
1454 aco_err(program,
1455 "Missing export in %s shader:", is_vertex_or_ngg ? "vertex or NGG" : "fragment");
1456 aco_print_program(program, stderr);
1457 abort();
1458 }
1459 }
1460
1461 static void
insert_code(asm_context & ctx,std::vector<uint32_t> & out,unsigned insert_before,unsigned insert_count,const uint32_t * insert_data)1462 insert_code(asm_context& ctx, std::vector<uint32_t>& out, unsigned insert_before,
1463 unsigned insert_count, const uint32_t* insert_data)
1464 {
1465 out.insert(out.begin() + insert_before, insert_data, insert_data + insert_count);
1466
1467 /* Update the offset of each affected block */
1468 for (Block& block : ctx.program->blocks) {
1469 if (block.offset >= insert_before)
1470 block.offset += insert_count;
1471 }
1472
1473 /* Update the locations of branches */
1474 for (branch_info& info : ctx.branches) {
1475 if (info.pos >= insert_before)
1476 info.pos += insert_count;
1477 }
1478
1479 /* Update the locations of p_constaddr instructions */
1480 for (auto& constaddr : ctx.constaddrs) {
1481 constaddr_info& info = constaddr.second;
1482 if (info.getpc_end >= insert_before)
1483 info.getpc_end += insert_count;
1484 if (info.add_literal >= insert_before)
1485 info.add_literal += insert_count;
1486 }
1487 for (auto& constaddr : ctx.resumeaddrs) {
1488 constaddr_info& info = constaddr.second;
1489 if (info.getpc_end >= insert_before)
1490 info.getpc_end += insert_count;
1491 if (info.add_literal >= insert_before)
1492 info.add_literal += insert_count;
1493 }
1494
1495 if (ctx.symbols) {
1496 for (auto& symbol : *ctx.symbols) {
1497 if (symbol.offset >= insert_before)
1498 symbol.offset += insert_count;
1499 }
1500 }
1501 }
1502
1503 static void
fix_branches_gfx10(asm_context & ctx,std::vector<uint32_t> & out)1504 fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
1505 {
1506 /* Branches with an offset of 0x3f are buggy on GFX10,
1507 * we workaround by inserting NOPs if needed.
1508 */
1509 bool gfx10_3f_bug = false;
1510
1511 do {
1512 auto buggy_branch_it = std::find_if(
1513 ctx.branches.begin(), ctx.branches.end(), [&](const branch_info& branch) -> bool
1514 { return ((int)ctx.program->blocks[branch.target].offset - branch.pos - 1) == 0x3f; });
1515 gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
1516
1517 if (gfx10_3f_bug) {
1518 /* Insert an s_nop after the branch */
1519 constexpr uint32_t s_nop_0 = 0xbf800000u;
1520 insert_code(ctx, out, buggy_branch_it->pos + 1, 1, &s_nop_0);
1521 }
1522 } while (gfx10_3f_bug);
1523 }
1524
1525 void
chain_branches(asm_context & ctx,std::vector<uint32_t> & out,branch_info & branch)1526 chain_branches(asm_context& ctx, std::vector<uint32_t>& out, branch_info& branch)
1527 {
1528 /* Create an empty block in order to remember the offset of the chained branch instruction.
1529 * The new branch instructions are inserted into the program in source code order.
1530 */
1531 Block* new_block = ctx.program->create_and_insert_block();
1532 Builder bld(ctx.program);
1533 std::vector<uint32_t> code;
1534 Instruction* branch_instr;
1535
1536 /* Re-direct original branch to new block (offset). */
1537 unsigned target = branch.target;
1538 branch.target = new_block->index;
1539
1540 /* Find suitable insertion point:
1541 * We define two offset ranges within our new branch instruction should be placed.
1542 * Then we try to maximize the distance from either the previous branch or the target.
1543 */
1544 const int half_dist = (INT16_MAX - 31) / 2;
1545 const unsigned upper_start = MIN2(ctx.program->blocks[target].offset, branch.pos) + half_dist;
1546 const unsigned upper_end = upper_start + half_dist;
1547 const unsigned lower_end = MAX2(ctx.program->blocks[target].offset, branch.pos) - half_dist;
1548 const unsigned lower_start = lower_end - half_dist;
1549 unsigned insert_at = 0;
1550 for (unsigned i = 0; i < ctx.program->blocks.size() - 1; i++) {
1551 Block& block = ctx.program->blocks[i];
1552 Block& next = ctx.program->blocks[i + 1];
1553 if (next.offset >= lower_end)
1554 break;
1555 if (next.offset < upper_start || (next.offset > upper_end && next.offset < lower_start))
1556 continue;
1557
1558 /* If this block ends in an unconditional branch, we can insert
1559 * another branch right after it without additional cost for the
1560 * existing code.
1561 */
1562 if (!block.instructions.empty() &&
1563 block.instructions.back()->opcode == aco_opcode::s_branch) {
1564 insert_at = next.offset;
1565 bld.reset(&block.instructions);
1566 if (next.offset >= lower_start)
1567 break;
1568 }
1569 }
1570
1571 /* If we didn't find a suitable insertion point, split the existing code. */
1572 if (insert_at == 0) {
1573 /* Find the last block that is still within reach. */
1574 unsigned insertion_block_idx = 0;
1575 while (ctx.program->blocks[insertion_block_idx + 1].offset < upper_end)
1576 insertion_block_idx++;
1577
1578 insert_at = ctx.program->blocks[insertion_block_idx].offset;
1579 auto it = ctx.program->blocks[insertion_block_idx].instructions.begin();
1580 int skip = 0;
1581 if (insert_at < upper_start) {
1582 /* Ensure some forward progress by splitting the block if necessary. */
1583 while (skip-- > 0 || insert_at < upper_start) {
1584 Instruction* instr = (it++)->get();
1585 if (instr->isSOPP()) {
1586 if (instr->opcode == aco_opcode::s_clause)
1587 skip = instr->salu().imm + 1;
1588 else if (instr->opcode == aco_opcode::s_delay_alu)
1589 skip = ((instr->salu().imm >> 4) & 0x7) + 1;
1590 else if (instr->opcode == aco_opcode::s_branch)
1591 skip = 1;
1592 insert_at++;
1593 continue;
1594 }
1595 emit_instruction(ctx, code, instr);
1596 assert(out[insert_at] == code[0]);
1597 insert_at += code.size();
1598 code.clear();
1599 }
1600
1601 /* If the insertion point is in the middle of the block, insert the branch instructions
1602 * into that block instead. */
1603 bld.reset(&ctx.program->blocks[insertion_block_idx].instructions, it);
1604 } else {
1605 bld.reset(&ctx.program->blocks[insertion_block_idx - 1].instructions);
1606 }
1607
1608 /* Since we insert a branch into existing code, mitigate LdsBranchVmemWARHazard on GFX10. */
1609 if (ctx.program->gfx_level == GFX10) {
1610 emit_sopk_instruction(
1611 ctx, code, bld.sopk(aco_opcode::s_waitcnt_vscnt, Operand(sgpr_null, s1), 0).instr);
1612 }
1613
1614 /* For the existing code, create a short jump over the new branch. */
1615 branch_instr = bld.sopp(aco_opcode::s_branch, 1).instr;
1616 emit_sopp_instruction(ctx, code, branch_instr, true);
1617 }
1618 const unsigned block_offset = insert_at + code.size();
1619
1620 branch_instr = bld.sopp(aco_opcode::s_branch, 0);
1621 emit_sopp_instruction(ctx, code, branch_instr, true);
1622 insert_code(ctx, out, insert_at, code.size(), code.data());
1623
1624 new_block->offset = block_offset;
1625 ctx.branches.push_back({block_offset, target});
1626 assert(out[ctx.branches.back().pos] == code.back());
1627 }
1628
1629 void
fix_branches(asm_context & ctx,std::vector<uint32_t> & out)1630 fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
1631 {
1632 bool repeat = false;
1633 do {
1634 repeat = false;
1635
1636 if (ctx.gfx_level == GFX10)
1637 fix_branches_gfx10(ctx, out);
1638
1639 for (branch_info& branch : ctx.branches) {
1640 int offset = (int)ctx.program->blocks[branch.target].offset - branch.pos - 1;
1641 if (offset >= INT16_MIN && offset <= INT16_MAX) {
1642 out[branch.pos] &= 0xffff0000u;
1643 out[branch.pos] |= (uint16_t)offset;
1644 } else {
1645 chain_branches(ctx, out, branch);
1646 repeat = true;
1647 break;
1648 }
1649 }
1650 } while (repeat);
1651 }
1652
1653 void
fix_constaddrs(asm_context & ctx,std::vector<uint32_t> & out)1654 fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
1655 {
1656 for (auto& constaddr : ctx.constaddrs) {
1657 constaddr_info& info = constaddr.second;
1658 out[info.add_literal] += (out.size() - info.getpc_end) * 4u;
1659
1660 if (ctx.symbols) {
1661 struct aco_symbol sym;
1662 sym.id = aco_symbol_const_data_addr;
1663 sym.offset = info.add_literal;
1664 ctx.symbols->push_back(sym);
1665 }
1666 }
1667 for (auto& addr : ctx.resumeaddrs) {
1668 constaddr_info& info = addr.second;
1669 const Block& block = ctx.program->blocks[out[info.add_literal]];
1670 assert(block.kind & block_kind_resume);
1671 out[info.add_literal] = (block.offset - info.getpc_end) * 4u;
1672 }
1673 }
1674
1675 void
align_block(asm_context & ctx,std::vector<uint32_t> & code,Block & block)1676 align_block(asm_context& ctx, std::vector<uint32_t>& code, Block& block)
1677 {
1678 /* Blocks with block_kind_loop_exit might be eliminated after jump threading, so we instead find
1679 * loop exits using loop_nest_depth.
1680 */
1681 if (ctx.loop_header != -1u && !block.linear_preds.empty() &&
1682 block.loop_nest_depth < ctx.program->blocks[ctx.loop_header].loop_nest_depth) {
1683 Block& loop_header = ctx.program->blocks[ctx.loop_header];
1684 ctx.loop_header = -1u;
1685 std::vector<uint32_t> nops;
1686
1687 const unsigned loop_num_cl = DIV_ROUND_UP(block.offset - loop_header.offset, 16);
1688
1689 /* On GFX10.3+, change the prefetch mode if the loop fits into 2 or 3 cache lines.
1690 * Don't use the s_inst_prefetch instruction on GFX10 as it might cause hangs.
1691 */
1692 const bool change_prefetch = ctx.program->gfx_level >= GFX10_3 &&
1693 ctx.program->gfx_level <= GFX11 && loop_num_cl > 1 &&
1694 loop_num_cl <= 3;
1695
1696 if (change_prefetch) {
1697 Builder bld(ctx.program, &ctx.program->blocks[loop_header.linear_preds[0]]);
1698 int16_t prefetch_mode = loop_num_cl == 3 ? 0x1 : 0x2;
1699 Instruction* instr = bld.sopp(aco_opcode::s_inst_prefetch, prefetch_mode);
1700 emit_instruction(ctx, nops, instr);
1701 insert_code(ctx, code, loop_header.offset, nops.size(), nops.data());
1702
1703 /* Change prefetch mode back to default (0x3). */
1704 bld.reset(&block.instructions, block.instructions.begin());
1705 bld.sopp(aco_opcode::s_inst_prefetch, 0x3);
1706 }
1707
1708 const unsigned loop_start_cl = loop_header.offset >> 4;
1709 const unsigned loop_end_cl = (block.offset - 1) >> 4;
1710
1711 /* Align the loop if it fits into the fetched cache lines or if we can
1712 * reduce the number of cache lines with less than 8 NOPs.
1713 */
1714 const bool align_loop = loop_end_cl - loop_start_cl >= loop_num_cl &&
1715 (loop_num_cl == 1 || change_prefetch || loop_header.offset % 16 > 8);
1716
1717 if (align_loop) {
1718 nops.clear();
1719 nops.resize(16 - (loop_header.offset % 16), 0xbf800000u);
1720 insert_code(ctx, code, loop_header.offset, nops.size(), nops.data());
1721 }
1722 }
1723
1724 if (block.kind & block_kind_loop_header) {
1725 /* In case of nested loops, only handle the inner-most loops in order
1726 * to not break the alignment of inner loops by handling outer loops.
1727 * Also ignore loops without back-edge.
1728 */
1729 if (block.linear_preds.size() > 1)
1730 ctx.loop_header = block.index;
1731 }
1732
1733 /* align resume shaders with cache line */
1734 if (block.kind & block_kind_resume) {
1735 size_t cache_aligned = align(code.size(), 16);
1736 code.resize(cache_aligned, 0xbf800000u); /* s_nop 0 */
1737 block.offset = code.size();
1738 }
1739 }
1740
1741 unsigned
emit_program(Program * program,std::vector<uint32_t> & code,std::vector<struct aco_symbol> * symbols,bool append_endpgm)1742 emit_program(Program* program, std::vector<uint32_t>& code, std::vector<struct aco_symbol>* symbols,
1743 bool append_endpgm)
1744 {
1745 asm_context ctx(program, symbols);
1746
1747 bool is_separately_compiled_ngg_vs_or_es =
1748 (program->stage.sw == SWStage::VS || program->stage.sw == SWStage::TES) &&
1749 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER &&
1750 program->info.merged_shader_compiled_separately;
1751
1752 /* Prolog has no exports. */
1753 if (!program->is_prolog && !program->info.ps.has_epilog &&
1754 !is_separately_compiled_ngg_vs_or_es &&
1755 (program->stage.hw == AC_HW_VERTEX_SHADER || program->stage.hw == AC_HW_PIXEL_SHADER ||
1756 program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER))
1757 fix_exports(ctx, code, program);
1758
1759 for (Block& block : program->blocks) {
1760 block.offset = code.size();
1761 align_block(ctx, code, block);
1762 emit_block(ctx, code, block);
1763 }
1764
1765 fix_branches(ctx, code);
1766
1767 unsigned exec_size = code.size() * sizeof(uint32_t);
1768
1769 /* Add end-of-code markers for the UMR disassembler. */
1770 if (append_endpgm)
1771 code.resize(code.size() + 5, 0xbf9f0000u);
1772
1773 fix_constaddrs(ctx, code);
1774
1775 while (program->constant_data.size() % 4u)
1776 program->constant_data.push_back(0);
1777 /* Copy constant data */
1778 code.insert(code.end(), (uint32_t*)program->constant_data.data(),
1779 (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
1780
1781 program->config->scratch_bytes_per_wave =
1782 align(program->config->scratch_bytes_per_wave, program->dev.scratch_alloc_granule);
1783
1784 return exec_size;
1785 }
1786
1787 } // namespace aco
1788