1 /*
2 * Copyright © 2021 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "aco_builder.h"
26 #include "aco_ir.h"
27
28 #include <algorithm>
29 #include <array>
30 #include <bitset>
31 #include <vector>
32
33 namespace aco {
34 namespace {
35
36 constexpr const size_t max_reg_cnt = 512;
37 constexpr const size_t max_sgpr_cnt = 128;
38 constexpr const size_t min_vgpr = 256;
39 constexpr const size_t max_vgpr_cnt = 256;
40
41 struct Idx {
operator ==aco::__anonfa90520f0111::Idx42 bool operator==(const Idx& other) const { return block == other.block && instr == other.instr; }
operator !=aco::__anonfa90520f0111::Idx43 bool operator!=(const Idx& other) const { return !operator==(other); }
44
foundaco::__anonfa90520f0111::Idx45 bool found() const { return block != UINT32_MAX; }
46
47 uint32_t block;
48 uint32_t instr;
49 };
50
51 Idx not_written_in_block{UINT32_MAX, 0};
52 Idx clobbered{UINT32_MAX, 1};
53 Idx const_or_undef{UINT32_MAX, 2};
54 Idx written_by_multiple_instrs{UINT32_MAX, 3};
55
56 struct pr_opt_ctx {
57 Program* program;
58 Block* current_block;
59 uint32_t current_instr_idx;
60 std::vector<uint16_t> uses;
61 std::vector<std::array<Idx, max_reg_cnt>> instr_idx_by_regs;
62
reset_blockaco::__anonfa90520f0111::pr_opt_ctx63 void reset_block(Block* block)
64 {
65 current_block = block;
66 current_instr_idx = 0;
67
68 if ((block->kind & block_kind_loop_header) || block->linear_preds.empty()) {
69 std::fill(instr_idx_by_regs[block->index].begin(), instr_idx_by_regs[block->index].end(),
70 not_written_in_block);
71 } else {
72 const uint32_t first_linear_pred = block->linear_preds[0];
73 const std::vector<uint32_t>& linear_preds = block->linear_preds;
74
75 for (unsigned i = 0; i < max_sgpr_cnt; i++) {
76 const bool all_same = std::all_of(
77 std::next(linear_preds.begin()), linear_preds.end(),
78 [=](unsigned pred)
79 { return instr_idx_by_regs[pred][i] == instr_idx_by_regs[first_linear_pred][i]; });
80
81 if (all_same)
82 instr_idx_by_regs[block->index][i] = instr_idx_by_regs[first_linear_pred][i];
83 else
84 instr_idx_by_regs[block->index][i] = written_by_multiple_instrs;
85 }
86
87 if (!block->logical_preds.empty()) {
88 /* We assume that VGPRs are only read by blocks which have a logical predecessor,
89 * ie. any block that reads any VGPR has at least 1 logical predecessor.
90 */
91 const unsigned first_logical_pred = block->logical_preds[0];
92 const std::vector<uint32_t>& logical_preds = block->logical_preds;
93
94 for (unsigned i = min_vgpr; i < (min_vgpr + max_vgpr_cnt); i++) {
95 const bool all_same = std::all_of(
96 std::next(logical_preds.begin()), logical_preds.end(),
97 [=](unsigned pred) {
98 return instr_idx_by_regs[pred][i] == instr_idx_by_regs[first_logical_pred][i];
99 });
100
101 if (all_same)
102 instr_idx_by_regs[block->index][i] = instr_idx_by_regs[first_logical_pred][i];
103 else
104 instr_idx_by_regs[block->index][i] = written_by_multiple_instrs;
105 }
106 } else {
107 /* If a block has no logical predecessors, it is not part of the
108 * logical CFG and therefore it also won't have any logical successors.
109 * Such a block does not write any VGPRs ever.
110 */
111 assert(block->logical_succs.empty());
112 }
113 }
114 }
115
getaco::__anonfa90520f0111::pr_opt_ctx116 Instruction* get(Idx idx) { return program->blocks[idx.block].instructions[idx.instr].get(); }
117 };
118
119 void
save_reg_writes(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)120 save_reg_writes(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
121 {
122 for (const Definition& def : instr->definitions) {
123 assert(def.regClass().type() != RegType::sgpr || def.physReg().reg() <= 255);
124 assert(def.regClass().type() != RegType::vgpr || def.physReg().reg() >= 256);
125
126 unsigned dw_size = DIV_ROUND_UP(def.bytes(), 4u);
127 unsigned r = def.physReg().reg();
128 Idx idx{ctx.current_block->index, ctx.current_instr_idx};
129
130 if (def.regClass().is_subdword())
131 idx = clobbered;
132
133 assert((r + dw_size) <= max_reg_cnt);
134 assert(def.size() == dw_size || def.regClass().is_subdword());
135 std::fill(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
136 ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size, idx);
137 }
138 }
139
140 Idx
last_writer_idx(pr_opt_ctx & ctx,PhysReg physReg,RegClass rc)141 last_writer_idx(pr_opt_ctx& ctx, PhysReg physReg, RegClass rc)
142 {
143 /* Verify that all of the operand's registers are written by the same instruction. */
144 assert(physReg.reg() < max_reg_cnt);
145 Idx instr_idx = ctx.instr_idx_by_regs[ctx.current_block->index][physReg.reg()];
146 unsigned dw_size = DIV_ROUND_UP(rc.bytes(), 4u);
147 unsigned r = physReg.reg();
148 bool all_same =
149 std::all_of(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
150 ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size,
151 [instr_idx](Idx i) { return i == instr_idx; });
152
153 return all_same ? instr_idx : written_by_multiple_instrs;
154 }
155
156 Idx
last_writer_idx(pr_opt_ctx & ctx,const Operand & op)157 last_writer_idx(pr_opt_ctx& ctx, const Operand& op)
158 {
159 if (op.isConstant() || op.isUndefined())
160 return const_or_undef;
161
162 return last_writer_idx(ctx, op.physReg(), op.regClass());
163 }
164
165 bool
is_clobbered_since(pr_opt_ctx & ctx,PhysReg reg,RegClass rc,const Idx & idx)166 is_clobbered_since(pr_opt_ctx& ctx, PhysReg reg, RegClass rc, const Idx& idx)
167 {
168 /* If we didn't find an instruction, assume that the register is clobbered. */
169 if (!idx.found())
170 return true;
171
172 /* TODO: We currently can't keep track of subdword registers. */
173 if (rc.is_subdword())
174 return true;
175
176 unsigned begin_reg = reg.reg();
177 unsigned end_reg = begin_reg + rc.size();
178 unsigned current_block_idx = ctx.current_block->index;
179
180 for (unsigned r = begin_reg; r < end_reg; ++r) {
181 Idx& i = ctx.instr_idx_by_regs[current_block_idx][r];
182 if (i == clobbered || i == written_by_multiple_instrs)
183 return true;
184 else if (i == not_written_in_block)
185 continue;
186
187 assert(i.found());
188
189 if (i.block > idx.block || (i.block == idx.block && i.instr > idx.instr))
190 return true;
191 }
192
193 return false;
194 }
195
196 template <typename T>
197 bool
is_clobbered_since(pr_opt_ctx & ctx,const T & t,const Idx & idx)198 is_clobbered_since(pr_opt_ctx& ctx, const T& t, const Idx& idx)
199 {
200 return is_clobbered_since(ctx, t.physReg(), t.regClass(), idx);
201 }
202
203 void
try_apply_branch_vcc(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)204 try_apply_branch_vcc(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
205 {
206 /* We are looking for the following pattern:
207 *
208 * vcc = ... ; last_vcc_wr
209 * sX, scc = s_and_bXX vcc, exec ; op0_instr
210 * (...vcc and exec must not be clobbered inbetween...)
211 * s_cbranch_XX scc ; instr
212 *
213 * If possible, the above is optimized into:
214 *
215 * vcc = ... ; last_vcc_wr
216 * s_cbranch_XX vcc ; instr modified to use vcc
217 */
218
219 /* Don't try to optimize this on GFX6-7 because SMEM may corrupt the vccz bit. */
220 if (ctx.program->gfx_level < GFX8)
221 return;
222
223 if (instr->format != Format::PSEUDO_BRANCH || instr->operands.size() == 0 ||
224 instr->operands[0].physReg() != scc)
225 return;
226
227 Idx op0_instr_idx = last_writer_idx(ctx, instr->operands[0]);
228 Idx last_vcc_wr_idx = last_writer_idx(ctx, vcc, ctx.program->lane_mask);
229
230 /* We need to make sure:
231 * - the instructions that wrote the operand register and VCC are both found
232 * - the operand register used by the branch, and VCC were both written in the current block
233 * - EXEC hasn't been clobbered since the last VCC write
234 * - VCC hasn't been clobbered since the operand register was written
235 * (ie. the last VCC writer precedes the op0 writer)
236 */
237 if (!op0_instr_idx.found() || !last_vcc_wr_idx.found() ||
238 op0_instr_idx.block != ctx.current_block->index ||
239 last_vcc_wr_idx.block != ctx.current_block->index ||
240 is_clobbered_since(ctx, exec, ctx.program->lane_mask, last_vcc_wr_idx) ||
241 is_clobbered_since(ctx, vcc, ctx.program->lane_mask, op0_instr_idx))
242 return;
243
244 Instruction* op0_instr = ctx.get(op0_instr_idx);
245 Instruction* last_vcc_wr = ctx.get(last_vcc_wr_idx);
246
247 if ((op0_instr->opcode != aco_opcode::s_and_b64 /* wave64 */ &&
248 op0_instr->opcode != aco_opcode::s_and_b32 /* wave32 */) ||
249 op0_instr->operands[0].physReg() != vcc || op0_instr->operands[1].physReg() != exec ||
250 !last_vcc_wr->isVOPC())
251 return;
252
253 assert(last_vcc_wr->definitions[0].tempId() == op0_instr->operands[0].tempId());
254
255 /* Reduce the uses of the SCC def */
256 ctx.uses[instr->operands[0].tempId()]--;
257 /* Use VCC instead of SCC in the branch */
258 instr->operands[0] = op0_instr->operands[0];
259 }
260
261 void
try_optimize_scc_nocompare(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)262 try_optimize_scc_nocompare(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
263 {
264 /* We are looking for the following pattern:
265 *
266 * s_bfe_u32 s0, s3, 0x40018 ; outputs SGPR and SCC if the SGPR != 0
267 * s_cmp_eq_i32 s0, 0 ; comparison between the SGPR and 0
268 * s_cbranch_scc0 BB3 ; use the result of the comparison, eg. branch or cselect
269 *
270 * If possible, the above is optimized into:
271 *
272 * s_bfe_u32 s0, s3, 0x40018 ; original instruction
273 * s_cbranch_scc1 BB3 ; modified to use SCC directly rather than the SGPR with comparison
274 *
275 */
276
277 if (!instr->isSALU() && !instr->isBranch())
278 return;
279
280 if (instr->isSOPC() &&
281 (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
282 instr->opcode == aco_opcode::s_cmp_lg_u32 || instr->opcode == aco_opcode::s_cmp_lg_i32 ||
283 instr->opcode == aco_opcode::s_cmp_eq_u64 || instr->opcode == aco_opcode::s_cmp_lg_u64) &&
284 (instr->operands[0].constantEquals(0) || instr->operands[1].constantEquals(0)) &&
285 (instr->operands[0].isTemp() || instr->operands[1].isTemp())) {
286 /* Make sure the constant is always in operand 1 */
287 if (instr->operands[0].isConstant())
288 std::swap(instr->operands[0], instr->operands[1]);
289
290 if (ctx.uses[instr->operands[0].tempId()] > 1)
291 return;
292
293 /* Make sure both SCC and Operand 0 are written by the same instruction. */
294 Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
295 Idx sccwr_idx = last_writer_idx(ctx, scc, s1);
296 if (!wr_idx.found() || wr_idx != sccwr_idx)
297 return;
298
299 Instruction* wr_instr = ctx.get(wr_idx);
300 if (!wr_instr->isSALU() || wr_instr->definitions.size() < 2 ||
301 wr_instr->definitions[1].physReg() != scc)
302 return;
303
304 /* Look for instructions which set SCC := (D != 0) */
305 switch (wr_instr->opcode) {
306 case aco_opcode::s_bfe_i32:
307 case aco_opcode::s_bfe_i64:
308 case aco_opcode::s_bfe_u32:
309 case aco_opcode::s_bfe_u64:
310 case aco_opcode::s_and_b32:
311 case aco_opcode::s_and_b64:
312 case aco_opcode::s_andn2_b32:
313 case aco_opcode::s_andn2_b64:
314 case aco_opcode::s_or_b32:
315 case aco_opcode::s_or_b64:
316 case aco_opcode::s_orn2_b32:
317 case aco_opcode::s_orn2_b64:
318 case aco_opcode::s_xor_b32:
319 case aco_opcode::s_xor_b64:
320 case aco_opcode::s_not_b32:
321 case aco_opcode::s_not_b64:
322 case aco_opcode::s_nor_b32:
323 case aco_opcode::s_nor_b64:
324 case aco_opcode::s_xnor_b32:
325 case aco_opcode::s_xnor_b64:
326 case aco_opcode::s_nand_b32:
327 case aco_opcode::s_nand_b64:
328 case aco_opcode::s_lshl_b32:
329 case aco_opcode::s_lshl_b64:
330 case aco_opcode::s_lshr_b32:
331 case aco_opcode::s_lshr_b64:
332 case aco_opcode::s_ashr_i32:
333 case aco_opcode::s_ashr_i64:
334 case aco_opcode::s_abs_i32:
335 case aco_opcode::s_absdiff_i32: break;
336 default: return;
337 }
338
339 /* Use the SCC def from wr_instr */
340 ctx.uses[instr->operands[0].tempId()]--;
341 instr->operands[0] = Operand(wr_instr->definitions[1].getTemp(), scc);
342 ctx.uses[instr->operands[0].tempId()]++;
343
344 /* Set the opcode and operand to 32-bit */
345 instr->operands[1] = Operand::zero();
346 instr->opcode =
347 (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
348 instr->opcode == aco_opcode::s_cmp_eq_u64)
349 ? aco_opcode::s_cmp_eq_u32
350 : aco_opcode::s_cmp_lg_u32;
351 } else if ((instr->format == Format::PSEUDO_BRANCH && instr->operands.size() == 1 &&
352 instr->operands[0].physReg() == scc) ||
353 instr->opcode == aco_opcode::s_cselect_b32) {
354
355 /* For cselect, operand 2 is the SCC condition */
356 unsigned scc_op_idx = 0;
357 if (instr->opcode == aco_opcode::s_cselect_b32) {
358 scc_op_idx = 2;
359 }
360
361 Idx wr_idx = last_writer_idx(ctx, instr->operands[scc_op_idx]);
362 if (!wr_idx.found())
363 return;
364
365 Instruction* wr_instr = ctx.get(wr_idx);
366
367 /* Check if we found the pattern above. */
368 if (wr_instr->opcode != aco_opcode::s_cmp_eq_u32 &&
369 wr_instr->opcode != aco_opcode::s_cmp_lg_u32)
370 return;
371 if (wr_instr->operands[0].physReg() != scc)
372 return;
373 if (!wr_instr->operands[1].constantEquals(0))
374 return;
375
376 /* The optimization can be unsafe when there are other users. */
377 if (ctx.uses[instr->operands[scc_op_idx].tempId()] > 1)
378 return;
379
380 if (wr_instr->opcode == aco_opcode::s_cmp_eq_u32) {
381 /* Flip the meaning of the instruction to correctly use the SCC. */
382 if (instr->format == Format::PSEUDO_BRANCH)
383 instr->opcode = instr->opcode == aco_opcode::p_cbranch_z ? aco_opcode::p_cbranch_nz
384 : aco_opcode::p_cbranch_z;
385 else if (instr->opcode == aco_opcode::s_cselect_b32)
386 std::swap(instr->operands[0], instr->operands[1]);
387 else
388 unreachable(
389 "scc_nocompare optimization is only implemented for p_cbranch and s_cselect");
390 }
391
392 /* Use the SCC def from the original instruction, not the comparison */
393 ctx.uses[instr->operands[scc_op_idx].tempId()]--;
394 instr->operands[scc_op_idx] = wr_instr->operands[0];
395 }
396 }
397
398 void
try_combine_dpp(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)399 try_combine_dpp(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
400 {
401 /* We are looking for the following pattern:
402 *
403 * v_mov_dpp vA, vB, ... ; move instruction with DPP
404 * v_xxx vC, vA, ... ; current instr that uses the result from the move
405 *
406 * If possible, the above is optimized into:
407 *
408 * v_xxx_dpp vC, vB, ... ; current instr modified to use DPP directly
409 *
410 */
411
412 if (!instr->isVALU() || instr->isDPP())
413 return;
414
415 for (unsigned i = 0; i < MIN2(2, instr->operands.size()); i++) {
416 Idx op_instr_idx = last_writer_idx(ctx, instr->operands[i]);
417 if (!op_instr_idx.found())
418 continue;
419
420 const Instruction* mov = ctx.get(op_instr_idx);
421 if (mov->opcode != aco_opcode::v_mov_b32 || !mov->isDPP())
422 continue;
423 bool dpp8 = mov->isDPP8();
424 if (!can_use_DPP(instr, false, dpp8))
425 return;
426
427 /* If we aren't going to remove the v_mov_b32, we have to ensure that it doesn't overwrite
428 * it's own operand before we use it.
429 */
430 if (mov->definitions[0].physReg() == mov->operands[0].physReg() &&
431 (!mov->definitions[0].tempId() || ctx.uses[mov->definitions[0].tempId()] > 1))
432 continue;
433
434 /* Don't propagate DPP if the source register is overwritten since the move. */
435 if (is_clobbered_since(ctx, mov->operands[0], op_instr_idx))
436 continue;
437
438 if (i && !can_swap_operands(instr, &instr->opcode))
439 continue;
440
441 if (!dpp8) /* anything else doesn't make sense in SSA */
442 assert(mov->dpp16().row_mask == 0xf && mov->dpp16().bank_mask == 0xf);
443
444 if (--ctx.uses[mov->definitions[0].tempId()])
445 ctx.uses[mov->operands[0].tempId()]++;
446
447 convert_to_DPP(instr, dpp8);
448
449 if (dpp8) {
450 DPP8_instruction* dpp = &instr->dpp8();
451 if (i) {
452 std::swap(dpp->operands[0], dpp->operands[1]);
453 }
454 dpp->operands[0] = mov->operands[0];
455 memcpy(dpp->lane_sel, mov->dpp8().lane_sel, sizeof(dpp->lane_sel));
456 } else {
457 DPP16_instruction* dpp = &instr->dpp16();
458 if (i) {
459 std::swap(dpp->operands[0], dpp->operands[1]);
460 std::swap(dpp->neg[0], dpp->neg[1]);
461 std::swap(dpp->abs[0], dpp->abs[1]);
462 }
463 dpp->operands[0] = mov->operands[0];
464 dpp->dpp_ctrl = mov->dpp16().dpp_ctrl;
465 dpp->bound_ctrl = true;
466 dpp->neg[0] ^= mov->dpp16().neg[0] && !dpp->abs[0];
467 dpp->abs[0] |= mov->dpp16().abs[0];
468 }
469 return;
470 }
471 }
472
473 void
process_instruction(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)474 process_instruction(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
475 {
476 try_apply_branch_vcc(ctx, instr);
477
478 try_optimize_scc_nocompare(ctx, instr);
479
480 try_combine_dpp(ctx, instr);
481
482 if (instr)
483 save_reg_writes(ctx, instr);
484
485 ctx.current_instr_idx++;
486 }
487
488 } // namespace
489
490 void
optimize_postRA(Program * program)491 optimize_postRA(Program* program)
492 {
493 pr_opt_ctx ctx;
494 ctx.program = program;
495 ctx.uses = dead_code_analysis(program);
496 ctx.instr_idx_by_regs.resize(program->blocks.size());
497
498 /* Forward pass
499 * Goes through each instruction exactly once, and can transform
500 * instructions or adjust the use counts of temps.
501 */
502 for (auto& block : program->blocks) {
503 ctx.reset_block(&block);
504
505 for (aco_ptr<Instruction>& instr : block.instructions)
506 process_instruction(ctx, instr);
507 }
508
509 /* Cleanup pass
510 * Gets rid of instructions which are manually deleted or
511 * no longer have any uses.
512 */
513 for (auto& block : program->blocks) {
514 auto new_end = std::remove_if(block.instructions.begin(), block.instructions.end(),
515 [&ctx](const aco_ptr<Instruction>& instr)
516 { return !instr || is_dead(ctx.uses, instr.get()); });
517 block.instructions.resize(new_end - block.instructions.begin());
518 }
519 }
520
521 } // namespace aco
522