• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9 
10 #include <algorithm>
11 #include <array>
12 #include <bitset>
13 #include <vector>
14 
15 namespace aco {
16 namespace {
17 
18 constexpr const size_t max_reg_cnt = 512;
19 constexpr const size_t max_sgpr_cnt = 128;
20 constexpr const size_t min_vgpr = 256;
21 constexpr const size_t max_vgpr_cnt = 256;
22 
23 struct Idx {
operator ==aco::__anon5ca5ac130111::Idx24    bool operator==(const Idx& other) const { return block == other.block && instr == other.instr; }
operator !=aco::__anon5ca5ac130111::Idx25    bool operator!=(const Idx& other) const { return !operator==(other); }
26 
foundaco::__anon5ca5ac130111::Idx27    bool found() const { return block != UINT32_MAX; }
28 
29    uint32_t block;
30    uint32_t instr;
31 };
32 
33 /** Indicates that a register was not yet written in the shader. */
34 Idx not_written_yet{UINT32_MAX, 0};
35 
36 /** Indicates that an operand is constant or undefined, not written by any instruction. */
37 Idx const_or_undef{UINT32_MAX, 2};
38 
39 /** Indicates that a register was overwritten by different instructions in previous blocks. */
40 Idx overwritten_untrackable{UINT32_MAX, 3};
41 
42 /** Indicates that there isn't a clear single writer, for example due to subdword operations. */
43 Idx overwritten_unknown_instr{UINT32_MAX, 4};
44 
45 struct pr_opt_ctx {
46    using Idx_array = std::array<Idx, max_reg_cnt>;
47 
48    Program* program;
49    Block* current_block;
50    uint32_t current_instr_idx;
51    std::vector<uint16_t> uses;
52    std::unique_ptr<Idx_array[]> instr_idx_by_regs;
53 
pr_opt_ctxaco::__anon5ca5ac130111::pr_opt_ctx54    pr_opt_ctx(Program* p)
55        : program(p), current_block(nullptr), current_instr_idx(0), uses(dead_code_analysis(p)),
56          instr_idx_by_regs(std::unique_ptr<Idx_array[]>{new Idx_array[p->blocks.size()]})
57    {}
58 
reset_block_regsaco::__anon5ca5ac130111::pr_opt_ctx59    ALWAYS_INLINE void reset_block_regs(const Block::edge_vec& preds, const unsigned block_index,
60                                        const unsigned min_reg, const unsigned num_regs)
61    {
62       const unsigned num_preds = preds.size();
63       const unsigned first_pred = preds[0];
64 
65       /* Copy information from the first predecessor. */
66       memcpy(&instr_idx_by_regs[block_index][min_reg], &instr_idx_by_regs[first_pred][min_reg],
67              num_regs * sizeof(Idx));
68 
69       /* Mark overwritten if it doesn't match with other predecessors. */
70       const unsigned until_reg = min_reg + num_regs;
71       for (unsigned i = 1; i < num_preds; ++i) {
72          unsigned pred = preds[i];
73          for (unsigned reg = min_reg; reg < until_reg; ++reg) {
74             Idx& idx = instr_idx_by_regs[block_index][reg];
75             if (idx == overwritten_untrackable)
76                continue;
77 
78             if (idx != instr_idx_by_regs[pred][reg])
79                idx = overwritten_untrackable;
80          }
81       }
82    }
83 
reset_blockaco::__anon5ca5ac130111::pr_opt_ctx84    void reset_block(Block* block)
85    {
86       current_block = block;
87       current_instr_idx = 0;
88 
89       if (block->linear_preds.empty()) {
90          std::fill(instr_idx_by_regs[block->index].begin(), instr_idx_by_regs[block->index].end(),
91                    not_written_yet);
92       } else if (block->kind & block_kind_loop_header) {
93          /* Instructions inside the loop may overwrite registers of temporaries that are
94           * not live inside the loop, but we can't detect that because we haven't processed
95           * the blocks in the loop yet. As a workaround, mark all registers as untrackable.
96           * TODO: Consider improving this in the future.
97           */
98          std::fill(instr_idx_by_regs[block->index].begin(), instr_idx_by_regs[block->index].end(),
99                    overwritten_untrackable);
100       } else {
101          reset_block_regs(block->linear_preds, block->index, 0, max_sgpr_cnt);
102          reset_block_regs(block->linear_preds, block->index, 251, 3);
103 
104          if (!block->logical_preds.empty()) {
105             /* We assume that VGPRs are only read by blocks which have a logical predecessor,
106              * ie. any block that reads any VGPR has at least 1 logical predecessor.
107              */
108             reset_block_regs(block->logical_preds, block->index, min_vgpr, max_vgpr_cnt);
109          } else {
110             /* If a block has no logical predecessors, it is not part of the
111              * logical CFG and therefore it also won't have any logical successors.
112              * Such a block does not write any VGPRs ever.
113              */
114             assert(block->logical_succs.empty());
115          }
116       }
117    }
118 
getaco::__anon5ca5ac130111::pr_opt_ctx119    Instruction* get(Idx idx) { return program->blocks[idx.block].instructions[idx.instr].get(); }
120 };
121 
122 void
save_reg_writes(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)123 save_reg_writes(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
124 {
125    for (const Definition& def : instr->definitions) {
126       assert(def.regClass().type() != RegType::sgpr || def.physReg().reg() <= 255);
127       assert(def.regClass().type() != RegType::vgpr || def.physReg().reg() >= 256);
128 
129       unsigned dw_size = DIV_ROUND_UP(def.bytes(), 4u);
130       unsigned r = def.physReg().reg();
131       Idx idx{ctx.current_block->index, ctx.current_instr_idx};
132 
133       if (def.regClass().is_subdword())
134          idx = overwritten_unknown_instr;
135 
136       assert((r + dw_size) <= max_reg_cnt);
137       assert(def.size() == dw_size || def.regClass().is_subdword());
138       std::fill(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
139                 ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size, idx);
140    }
141    if (instr->isPseudo() && instr->pseudo().needs_scratch_reg) {
142       ctx.instr_idx_by_regs[ctx.current_block->index][instr->pseudo().scratch_sgpr] =
143          overwritten_unknown_instr;
144    }
145 }
146 
147 Idx
last_writer_idx(pr_opt_ctx & ctx,PhysReg physReg,RegClass rc)148 last_writer_idx(pr_opt_ctx& ctx, PhysReg physReg, RegClass rc)
149 {
150    /* Verify that all of the operand's registers are written by the same instruction. */
151    assert(physReg.reg() < max_reg_cnt);
152    Idx instr_idx = ctx.instr_idx_by_regs[ctx.current_block->index][physReg.reg()];
153    unsigned dw_size = DIV_ROUND_UP(rc.bytes(), 4u);
154    unsigned r = physReg.reg();
155    bool all_same =
156       std::all_of(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
157                   ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size,
158                   [instr_idx](Idx i) { return i == instr_idx; });
159 
160    return all_same ? instr_idx : overwritten_untrackable;
161 }
162 
163 Idx
last_writer_idx(pr_opt_ctx & ctx,const Operand & op)164 last_writer_idx(pr_opt_ctx& ctx, const Operand& op)
165 {
166    if (op.isConstant() || op.isUndefined())
167       return const_or_undef;
168 
169    return last_writer_idx(ctx, op.physReg(), op.regClass());
170 }
171 
172 /**
173  * Check whether a register has been overwritten since the given location.
174  * This is an important part of checking whether certain optimizations are
175  * valid.
176  * Note that the decision is made based on registers and not on SSA IDs.
177  */
178 bool
is_overwritten_since(pr_opt_ctx & ctx,PhysReg reg,RegClass rc,const Idx & since_idx,bool inclusive=false)179 is_overwritten_since(pr_opt_ctx& ctx, PhysReg reg, RegClass rc, const Idx& since_idx,
180                      bool inclusive = false)
181 {
182    /* If we didn't find an instruction, assume that the register is overwritten. */
183    if (!since_idx.found())
184       return true;
185 
186    /* TODO: We currently can't keep track of subdword registers. */
187    if (rc.is_subdword())
188       return true;
189 
190    unsigned begin_reg = reg.reg();
191    unsigned end_reg = begin_reg + rc.size();
192    unsigned current_block_idx = ctx.current_block->index;
193 
194    for (unsigned r = begin_reg; r < end_reg; ++r) {
195       Idx& i = ctx.instr_idx_by_regs[current_block_idx][r];
196       if (i == overwritten_untrackable && current_block_idx > since_idx.block)
197          return true;
198       else if (i == overwritten_untrackable || i == not_written_yet)
199          continue;
200       else if (i == overwritten_unknown_instr)
201          return true;
202 
203       assert(i.found());
204 
205       bool since_instr = inclusive ? i.instr >= since_idx.instr : i.instr > since_idx.instr;
206       if (i.block > since_idx.block || (i.block == since_idx.block && since_instr))
207          return true;
208    }
209 
210    return false;
211 }
212 
213 bool
is_overwritten_since(pr_opt_ctx & ctx,const Definition & def,const Idx & idx,bool inclusive=false)214 is_overwritten_since(pr_opt_ctx& ctx, const Definition& def, const Idx& idx, bool inclusive = false)
215 {
216    return is_overwritten_since(ctx, def.physReg(), def.regClass(), idx, inclusive);
217 }
218 
219 bool
is_overwritten_since(pr_opt_ctx & ctx,const Operand & op,const Idx & idx,bool inclusive=false)220 is_overwritten_since(pr_opt_ctx& ctx, const Operand& op, const Idx& idx, bool inclusive = false)
221 {
222    if (op.isConstant())
223       return false;
224 
225    return is_overwritten_since(ctx, op.physReg(), op.regClass(), idx, inclusive);
226 }
227 
228 void
try_apply_branch_vcc(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)229 try_apply_branch_vcc(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
230 {
231    /* We are looking for the following pattern:
232     *
233     * vcc = ...                      ; last_vcc_wr
234     * sX, scc = s_and_bXX vcc, exec  ; op0_instr
235     * (...vcc and exec must not be overwritten inbetween...)
236     * s_cbranch_XX scc               ; instr
237     *
238     * If possible, the above is optimized into:
239     *
240     * vcc = ...                      ; last_vcc_wr
241     * s_cbranch_XX vcc               ; instr modified to use vcc
242     */
243 
244    /* Don't try to optimize this on GFX6-7 because SMEM may corrupt the vccz bit. */
245    if (ctx.program->gfx_level < GFX8)
246       return;
247 
248    if (instr->format != Format::PSEUDO_BRANCH || instr->operands.size() == 0 ||
249        instr->operands[0].physReg() != scc)
250       return;
251 
252    Idx op0_instr_idx = last_writer_idx(ctx, instr->operands[0]);
253    Idx last_vcc_wr_idx = last_writer_idx(ctx, vcc, ctx.program->lane_mask);
254 
255    /* We need to make sure:
256     * - the instructions that wrote the operand register and VCC are both found
257     * - the operand register used by the branch, and VCC were both written in the current block
258     * - EXEC hasn't been overwritten since the last VCC write
259     * - VCC hasn't been overwritten since the operand register was written
260     *   (ie. the last VCC writer precedes the op0 writer)
261     */
262    if (!op0_instr_idx.found() || !last_vcc_wr_idx.found() ||
263        op0_instr_idx.block != ctx.current_block->index ||
264        last_vcc_wr_idx.block != ctx.current_block->index ||
265        is_overwritten_since(ctx, exec, ctx.program->lane_mask, last_vcc_wr_idx) ||
266        is_overwritten_since(ctx, vcc, ctx.program->lane_mask, op0_instr_idx))
267       return;
268 
269    Instruction* op0_instr = ctx.get(op0_instr_idx);
270    Instruction* last_vcc_wr = ctx.get(last_vcc_wr_idx);
271 
272    if ((op0_instr->opcode != aco_opcode::s_and_b64 /* wave64 */ &&
273         op0_instr->opcode != aco_opcode::s_and_b32 /* wave32 */) ||
274        op0_instr->operands[0].physReg() != vcc || op0_instr->operands[1].physReg() != exec ||
275        !last_vcc_wr->isVOPC())
276       return;
277 
278    assert(last_vcc_wr->definitions[0].tempId() == op0_instr->operands[0].tempId());
279 
280    /* Reduce the uses of the SCC def */
281    ctx.uses[instr->operands[0].tempId()]--;
282    /* Use VCC instead of SCC in the branch */
283    instr->operands[0] = op0_instr->operands[0];
284 }
285 
286 void
try_optimize_scc_nocompare(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)287 try_optimize_scc_nocompare(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
288 {
289    /* We are looking for the following pattern:
290     *
291     * s_bfe_u32 s0, s3, 0x40018  ; outputs SGPR and SCC if the SGPR != 0
292     * s_cmp_eq_i32 s0, 0         ; comparison between the SGPR and 0
293     * s_cbranch_scc0 BB3         ; use the result of the comparison, eg. branch or cselect
294     *
295     * If possible, the above is optimized into:
296     *
297     * s_bfe_u32 s0, s3, 0x40018  ; original instruction
298     * s_cbranch_scc1 BB3         ; modified to use SCC directly rather than the SGPR with comparison
299     *
300     */
301 
302    if (!instr->isSALU() && !instr->isBranch())
303       return;
304 
305    if (instr->isSOPC() &&
306        (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
307         instr->opcode == aco_opcode::s_cmp_lg_u32 || instr->opcode == aco_opcode::s_cmp_lg_i32 ||
308         instr->opcode == aco_opcode::s_cmp_eq_u64 || instr->opcode == aco_opcode::s_cmp_lg_u64) &&
309        (instr->operands[0].constantEquals(0) || instr->operands[1].constantEquals(0)) &&
310        (instr->operands[0].isTemp() || instr->operands[1].isTemp())) {
311       /* Make sure the constant is always in operand 1 */
312       if (instr->operands[0].isConstant())
313          std::swap(instr->operands[0], instr->operands[1]);
314 
315       /* Find the writer instruction of Operand 0. */
316       Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
317       if (!wr_idx.found())
318          return;
319 
320       Instruction* wr_instr = ctx.get(wr_idx);
321       if (!wr_instr->isSALU() || wr_instr->definitions.size() < 2 ||
322           wr_instr->definitions[1].physReg() != scc)
323          return;
324 
325       /* Look for instructions which set SCC := (D != 0) */
326       switch (wr_instr->opcode) {
327       case aco_opcode::s_bfe_i32:
328       case aco_opcode::s_bfe_i64:
329       case aco_opcode::s_bfe_u32:
330       case aco_opcode::s_bfe_u64:
331       case aco_opcode::s_and_b32:
332       case aco_opcode::s_and_b64:
333       case aco_opcode::s_andn2_b32:
334       case aco_opcode::s_andn2_b64:
335       case aco_opcode::s_or_b32:
336       case aco_opcode::s_or_b64:
337       case aco_opcode::s_orn2_b32:
338       case aco_opcode::s_orn2_b64:
339       case aco_opcode::s_xor_b32:
340       case aco_opcode::s_xor_b64:
341       case aco_opcode::s_not_b32:
342       case aco_opcode::s_not_b64:
343       case aco_opcode::s_nor_b32:
344       case aco_opcode::s_nor_b64:
345       case aco_opcode::s_xnor_b32:
346       case aco_opcode::s_xnor_b64:
347       case aco_opcode::s_nand_b32:
348       case aco_opcode::s_nand_b64:
349       case aco_opcode::s_lshl_b32:
350       case aco_opcode::s_lshl_b64:
351       case aco_opcode::s_lshr_b32:
352       case aco_opcode::s_lshr_b64:
353       case aco_opcode::s_ashr_i32:
354       case aco_opcode::s_ashr_i64:
355       case aco_opcode::s_abs_i32:
356       case aco_opcode::s_absdiff_i32: break;
357       default: return;
358       }
359 
360       /* Check whether both SCC and Operand 0 are written by the same instruction. */
361       Idx sccwr_idx = last_writer_idx(ctx, scc, s1);
362       if (wr_idx != sccwr_idx) {
363          /* Check whether the current instruction is the only user of its first operand. */
364          if (ctx.uses[wr_instr->definitions[1].tempId()] ||
365              ctx.uses[wr_instr->definitions[0].tempId()] > 1)
366             return;
367 
368          /* Check whether the operands of the writer are overwritten. */
369          for (const Operand& op : wr_instr->operands) {
370             if (is_overwritten_since(ctx, op, wr_idx))
371                return;
372          }
373 
374          aco_opcode pulled_opcode = wr_instr->opcode;
375          if (instr->opcode == aco_opcode::s_cmp_eq_u32 ||
376              instr->opcode == aco_opcode::s_cmp_eq_i32 ||
377              instr->opcode == aco_opcode::s_cmp_eq_u64) {
378             /* When s_cmp_eq is used, it effectively inverts the SCC def.
379              * However, we can't simply invert the opcodes here because that
380              * would change the meaning of the program.
381              */
382             return;
383          }
384 
385          Definition scc_def = instr->definitions[0];
386          ctx.uses[wr_instr->definitions[0].tempId()]--;
387 
388          /* Copy the writer instruction, but use SCC from the current instr.
389           * This means that the original instruction will be eliminated.
390           */
391          if (wr_instr->format == Format::SOP2) {
392             instr.reset(create_instruction(pulled_opcode, Format::SOP2, 2, 2));
393             instr->operands[1] = wr_instr->operands[1];
394          } else if (wr_instr->format == Format::SOP1) {
395             instr.reset(create_instruction(pulled_opcode, Format::SOP1, 1, 2));
396          }
397          instr->definitions[0] = wr_instr->definitions[0];
398          instr->definitions[1] = scc_def;
399          instr->operands[0] = wr_instr->operands[0];
400          return;
401       }
402 
403       /* Use the SCC def from wr_instr */
404       ctx.uses[instr->operands[0].tempId()]--;
405       instr->operands[0] = Operand(wr_instr->definitions[1].getTemp());
406       instr->operands[0].setFixed(scc);
407       ctx.uses[instr->operands[0].tempId()]++;
408 
409       /* Set the opcode and operand to 32-bit */
410       instr->operands[1] = Operand::zero();
411       instr->opcode =
412          (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
413           instr->opcode == aco_opcode::s_cmp_eq_u64)
414             ? aco_opcode::s_cmp_eq_u32
415             : aco_opcode::s_cmp_lg_u32;
416    } else if ((instr->format == Format::PSEUDO_BRANCH && instr->operands.size() == 1 &&
417                instr->operands[0].physReg() == scc) ||
418               instr->opcode == aco_opcode::s_cselect_b32 ||
419               instr->opcode == aco_opcode::s_cselect_b64) {
420 
421       /* For cselect, operand 2 is the SCC condition */
422       unsigned scc_op_idx = 0;
423       if (instr->opcode == aco_opcode::s_cselect_b32 ||
424           instr->opcode == aco_opcode::s_cselect_b64) {
425          scc_op_idx = 2;
426       }
427 
428       Idx wr_idx = last_writer_idx(ctx, instr->operands[scc_op_idx]);
429       if (!wr_idx.found())
430          return;
431 
432       Instruction* wr_instr = ctx.get(wr_idx);
433 
434       /* Check if we found the pattern above. */
435       if (wr_instr->opcode != aco_opcode::s_cmp_eq_u32 &&
436           wr_instr->opcode != aco_opcode::s_cmp_lg_u32)
437          return;
438       if (wr_instr->operands[0].physReg() != scc)
439          return;
440       if (!wr_instr->operands[1].constantEquals(0))
441          return;
442 
443       /* The optimization can be unsafe when there are other users. */
444       if (ctx.uses[instr->operands[scc_op_idx].tempId()] > 1)
445          return;
446 
447       if (wr_instr->opcode == aco_opcode::s_cmp_eq_u32) {
448          /* Flip the meaning of the instruction to correctly use the SCC. */
449          if (instr->format == Format::PSEUDO_BRANCH)
450             instr->opcode = instr->opcode == aco_opcode::p_cbranch_z ? aco_opcode::p_cbranch_nz
451                                                                      : aco_opcode::p_cbranch_z;
452          else if (instr->opcode == aco_opcode::s_cselect_b32 ||
453                   instr->opcode == aco_opcode::s_cselect_b64)
454             std::swap(instr->operands[0], instr->operands[1]);
455          else
456             unreachable(
457                "scc_nocompare optimization is only implemented for p_cbranch and s_cselect");
458       }
459 
460       /* Use the SCC def from the original instruction, not the comparison */
461       ctx.uses[instr->operands[scc_op_idx].tempId()]--;
462       instr->operands[scc_op_idx] = wr_instr->operands[0];
463    }
464 }
465 
466 static bool
is_scc_copy(const Instruction * instr)467 is_scc_copy(const Instruction* instr)
468 {
469    return instr->opcode == aco_opcode::p_parallelcopy && instr->operands.size() == 1 &&
470           instr->operands[0].isTemp() && instr->operands[0].physReg().reg() == scc;
471 }
472 
473 void
save_scc_copy_producer(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)474 save_scc_copy_producer(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
475 {
476    if (!is_scc_copy(instr.get()))
477       return;
478 
479    Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
480    if (wr_idx.found() && wr_idx.block == ctx.current_block->index)
481       instr->pass_flags = wr_idx.instr;
482    else
483       instr->pass_flags = UINT32_MAX;
484 }
485 
486 void
try_eliminate_scc_copy(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)487 try_eliminate_scc_copy(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
488 {
489    /* Try to eliminate an SCC copy by duplicating the instruction that produced the SCC. */
490 
491    if (instr->opcode != aco_opcode::p_parallelcopy || instr->definitions.size() != 1 ||
492        instr->definitions[0].physReg().reg() != scc)
493       return;
494 
495    /* Find the instruction that copied SCC into an SGPR. */
496    Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
497    if (!wr_idx.found())
498       return;
499 
500    const Instruction* wr_instr = ctx.get(wr_idx);
501    if (!is_scc_copy(wr_instr) || wr_instr->pass_flags == UINT32_MAX)
502       return;
503 
504    Idx producer_idx = {wr_idx.block, wr_instr->pass_flags};
505    Instruction* producer_instr = ctx.get(producer_idx);
506 
507    if (!producer_instr || !producer_instr->isSALU())
508       return;
509 
510    /* Verify that the operands of the producer instruction haven't been overwritten. */
511    for (const Operand& op : producer_instr->operands) {
512       if (is_overwritten_since(ctx, op, producer_idx, true))
513          return;
514    }
515 
516    /* Verify that the definitions (except SCC) of the producer haven't been overwritten. */
517    for (const Definition& def : producer_instr->definitions) {
518       if (def.physReg().reg() == scc)
519          continue;
520       if (is_overwritten_since(ctx, def, producer_idx))
521          return;
522    }
523 
524    /* Duplicate the original producer of the SCC */
525    Definition scc_def = instr->definitions[0];
526    instr.reset(create_instruction(producer_instr->opcode, producer_instr->format,
527                                   producer_instr->operands.size(),
528                                   producer_instr->definitions.size()));
529    instr->salu().imm = producer_instr->salu().imm;
530 
531    /* The copy is no longer needed. */
532    if (--ctx.uses[wr_instr->definitions[0].tempId()] == 0)
533       ctx.uses[wr_instr->operands[0].tempId()]--;
534 
535    /* Copy the operands of the original producer. */
536    for (unsigned i = 0; i < producer_instr->operands.size(); ++i) {
537       instr->operands[i] = producer_instr->operands[i];
538       if (producer_instr->operands[i].isTemp() && !is_dead(ctx.uses, producer_instr))
539          ctx.uses[producer_instr->operands[i].tempId()]++;
540    }
541 
542    /* Copy the definitions of the original producer,
543     * but mark them as non-temp to keep SSA quasi-intact.
544     */
545    for (unsigned i = 0; i < producer_instr->definitions.size(); ++i)
546       instr->definitions[i] = Definition(producer_instr->definitions[i].physReg(),
547                                          producer_instr->definitions[i].regClass());
548    instr->definitions.back() = scc_def; /* Keep temporary ID. */
549 }
550 
551 void
try_combine_dpp(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)552 try_combine_dpp(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
553 {
554    /* We are looking for the following pattern:
555     *
556     * v_mov_dpp vA, vB, ...      ; move instruction with DPP
557     * v_xxx vC, vA, ...          ; current instr that uses the result from the move
558     *
559     * If possible, the above is optimized into:
560     *
561     * v_xxx_dpp vC, vB, ...      ; current instr modified to use DPP directly
562     *
563     */
564 
565    if (!instr->isVALU() || instr->isDPP())
566       return;
567 
568    for (unsigned i = 0; i < instr->operands.size(); i++) {
569       Idx op_instr_idx = last_writer_idx(ctx, instr->operands[i]);
570       if (!op_instr_idx.found())
571          continue;
572 
573       /* is_overwritten_since only considers active lanes when the register could possibly
574        * have been overwritten from inactive lanes. Restrict this optimization to at most
575        * one block so that there is no possibility for clobbered inactive lanes.
576        */
577       if (ctx.current_block->index - op_instr_idx.block > 1)
578          continue;
579 
580       const Instruction* mov = ctx.get(op_instr_idx);
581       if (mov->opcode != aco_opcode::v_mov_b32 || !mov->isDPP())
582          continue;
583 
584       /* If we aren't going to remove the v_mov_b32, we have to ensure that it doesn't overwrite
585        * it's own operand before we use it.
586        */
587       if (mov->definitions[0].physReg() == mov->operands[0].physReg() &&
588           (!mov->definitions[0].tempId() || ctx.uses[mov->definitions[0].tempId()] > 1))
589          continue;
590 
591       /* Don't propagate DPP if the source register is overwritten since the move. */
592       if (is_overwritten_since(ctx, mov->operands[0], op_instr_idx))
593          continue;
594 
595       bool dpp8 = mov->isDPP8();
596 
597       /* Fetch-inactive means exec is ignored, which allows us to combine across exec changes. */
598       if (!(dpp8 ? mov->dpp8().fetch_inactive : mov->dpp16().fetch_inactive) &&
599           is_overwritten_since(ctx, Operand(exec, ctx.program->lane_mask), op_instr_idx))
600          continue;
601 
602       /* We won't eliminate the DPP mov if the operand is used twice */
603       bool op_used_twice = false;
604       for (unsigned j = 0; j < instr->operands.size(); j++)
605          op_used_twice |= i != j && instr->operands[i] == instr->operands[j];
606       if (op_used_twice)
607          continue;
608 
609       bool input_mods = can_use_input_modifiers(ctx.program->gfx_level, instr->opcode, i) &&
610                         get_operand_size(instr, i) == 32;
611       bool mov_uses_mods = mov->valu().neg[0] || mov->valu().abs[0];
612       if (((dpp8 && ctx.program->gfx_level < GFX11) || !input_mods) && mov_uses_mods)
613          continue;
614 
615       if (i != 0) {
616          if (!can_swap_operands(instr, &instr->opcode, 0, i))
617             continue;
618          instr->valu().swapOperands(0, i);
619       }
620 
621       if (!can_use_DPP(ctx.program->gfx_level, instr, dpp8))
622          continue;
623 
624       if (!dpp8) /* anything else doesn't make sense in SSA */
625          assert(mov->dpp16().row_mask == 0xf && mov->dpp16().bank_mask == 0xf);
626 
627       if (--ctx.uses[mov->definitions[0].tempId()])
628          ctx.uses[mov->operands[0].tempId()]++;
629 
630       convert_to_DPP(ctx.program->gfx_level, instr, dpp8);
631 
632       instr->operands[0] = mov->operands[0];
633 
634       if (dpp8) {
635          DPP8_instruction* dpp = &instr->dpp8();
636          dpp->lane_sel = mov->dpp8().lane_sel;
637          dpp->fetch_inactive = mov->dpp8().fetch_inactive;
638          if (mov_uses_mods)
639             instr->format = asVOP3(instr->format);
640       } else {
641          DPP16_instruction* dpp = &instr->dpp16();
642          dpp->dpp_ctrl = mov->dpp16().dpp_ctrl;
643          dpp->bound_ctrl = true;
644          dpp->fetch_inactive = mov->dpp16().fetch_inactive;
645       }
646       instr->valu().neg[0] ^= mov->valu().neg[0] && !instr->valu().abs[0];
647       instr->valu().abs[0] |= mov->valu().abs[0];
648       return;
649    }
650 }
651 
652 unsigned
num_encoded_alu_operands(const aco_ptr<Instruction> & instr)653 num_encoded_alu_operands(const aco_ptr<Instruction>& instr)
654 {
655    if (instr->isSALU()) {
656       if (instr->isSOP2() || instr->isSOPC())
657          return 2;
658       else if (instr->isSOP1())
659          return 1;
660 
661       return 0;
662    }
663 
664    if (instr->isVALU()) {
665       if (instr->isVOP1())
666          return 1;
667       else if (instr->isVOPC() || instr->isVOP2())
668          return 2;
669       else if (instr->opcode == aco_opcode::v_writelane_b32_e64 ||
670                instr->opcode == aco_opcode::v_writelane_b32)
671          return 2; /* potentially VOP3, but reads VDST as SRC2 */
672       else if (instr->isVOP3() || instr->isVOP3P() || instr->isVINTERP_INREG())
673          return instr->operands.size();
674    }
675 
676    return 0;
677 }
678 
679 void
try_reassign_split_vector(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)680 try_reassign_split_vector(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
681 {
682    /* Any unused split_vector definition can always use the same register
683     * as the operand. This avoids creating unnecessary copies.
684     */
685    if (instr->opcode == aco_opcode::p_split_vector) {
686       Operand& op = instr->operands[0];
687       if (!op.isTemp() || op.isKill())
688          return;
689 
690       PhysReg reg = op.physReg();
691       for (Definition& def : instr->definitions) {
692          if (def.getTemp().type() == op.getTemp().type() && def.isKill())
693             def.setFixed(reg);
694 
695          reg = reg.advance(def.bytes());
696       }
697 
698       return;
699    }
700 
701    /* We are looking for the following pattern:
702     *
703     * sA, sB = p_split_vector s[X:Y]
704     * ... X and Y not overwritten here ...
705     * use sA or sB <--- current instruction
706     *
707     * If possible, we propagate the registers from the p_split_vector
708     * operand into the current instruction and the above is optimized into:
709     *
710     * use sX or sY
711     *
712     * Thereby, we might violate register assignment rules.
713     * This optimization exists because it's too difficult to solve it
714     * in RA, and should be removed after we solved this in RA.
715     */
716 
717    if (!instr->isVALU() && !instr->isSALU())
718       return;
719 
720    for (unsigned i = 0; i < num_encoded_alu_operands(instr); i++) {
721       /* Find the instruction that writes the current operand. */
722       const Operand& op = instr->operands[i];
723       Idx op_instr_idx = last_writer_idx(ctx, op);
724       if (!op_instr_idx.found())
725          continue;
726 
727       /* Check if the operand is written by p_split_vector. */
728       Instruction* split_vec = ctx.get(op_instr_idx);
729       if (split_vec->opcode != aco_opcode::p_split_vector &&
730           split_vec->opcode != aco_opcode::p_extract_vector)
731          continue;
732 
733       Operand& split_op = split_vec->operands[0];
734 
735       /* Don't do anything if the p_split_vector operand is not a temporary
736        * or is killed by the p_split_vector.
737        * In this case the definitions likely already reuse the same registers as the operand.
738        */
739       if (!split_op.isTemp() || split_op.isKill())
740          continue;
741 
742       /* Only propagate operands of the same type */
743       if (split_op.getTemp().type() != op.getTemp().type())
744          continue;
745 
746       /* Check if the p_split_vector operand's registers are overwritten. */
747       if (is_overwritten_since(ctx, split_op, op_instr_idx))
748          continue;
749 
750       PhysReg reg = split_op.physReg();
751       if (split_vec->opcode == aco_opcode::p_extract_vector) {
752          reg =
753             reg.advance(split_vec->definitions[0].bytes() * split_vec->operands[1].constantValue());
754       }
755       for (Definition& def : split_vec->definitions) {
756          if (def.getTemp() != op.getTemp()) {
757             reg = reg.advance(def.bytes());
758             continue;
759          }
760 
761          /* Don't propagate misaligned SGPRs.
762           * Note: No ALU instruction can take a variable larger than 64bit.
763           */
764          if (op.regClass() == s2 && reg.reg() % 2 != 0)
765             break;
766 
767          /* Sub dword operands might need updates to SDWA/opsel,
768           * but we only track full register writes at the moment.
769           */
770          assert(op.physReg().byte() == reg.byte());
771 
772          /* If there is only one use (left), recolor the split_vector definition */
773          if (ctx.uses[op.tempId()] == 1)
774             def.setFixed(reg);
775          else
776             ctx.uses[op.tempId()]--;
777 
778          /* Use the p_split_vector operand register directly.
779           *
780           * Note: this might violate register assignment rules to some extend
781           *       in case the definition does not get recolored, eventually.
782           */
783          instr->operands[i].setFixed(reg);
784          break;
785       }
786    }
787 }
788 
789 void
try_convert_fma_to_vop2(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)790 try_convert_fma_to_vop2(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
791 {
792    /* We convert v_fma_f32 with inline constant to fmamk/fmaak.
793     * This is only benefical if it allows more VOPD.
794     */
795    if (ctx.program->gfx_level < GFX11 || ctx.program->wave_size != 32 ||
796        instr->opcode != aco_opcode::v_fma_f32 || instr->usesModifiers())
797       return;
798 
799    int constant_idx = -1;
800    int vgpr_idx = -1;
801    for (int i = 0; i < 3; i++) {
802       const Operand& op = instr->operands[i];
803       if (op.isConstant() && !op.isLiteral())
804          constant_idx = i;
805       else if (op.isOfType(RegType::vgpr))
806          vgpr_idx = i;
807       else
808          return;
809    }
810 
811    if (constant_idx < 0 || vgpr_idx < 0)
812       return;
813 
814    std::swap(instr->operands[constant_idx], instr->operands[2]);
815    if (constant_idx == 0 || vgpr_idx == 0)
816       std::swap(instr->operands[0], instr->operands[1]);
817    instr->operands[2] = Operand::literal32(instr->operands[2].constantValue());
818    instr->opcode = constant_idx == 2 ? aco_opcode::v_fmaak_f32 : aco_opcode::v_fmamk_f32;
819    instr->format = Format::VOP2;
820 }
821 
822 bool
instr_overwrites(Instruction * instr,PhysReg reg,unsigned size)823 instr_overwrites(Instruction* instr, PhysReg reg, unsigned size)
824 {
825    for (Definition def : instr->definitions) {
826       if (def.physReg() + def.size() > reg && reg + size > def.physReg())
827          return true;
828    }
829    if (instr->isPseudo() && instr->pseudo().needs_scratch_reg) {
830       PhysReg scratch_reg = instr->pseudo().scratch_sgpr;
831       if (scratch_reg >= reg && reg + size > scratch_reg)
832          return true;
833    }
834    return false;
835 }
836 
837 bool
try_insert_saveexec_out_of_loop(pr_opt_ctx & ctx,Block * block,Definition saved_exec,unsigned saveexec_pos)838 try_insert_saveexec_out_of_loop(pr_opt_ctx& ctx, Block* block, Definition saved_exec,
839                                 unsigned saveexec_pos)
840 {
841    /* This pattern can be created by try_optimize_branching_sequence:
842     * BB1: // loop-header
843     *    ...                              // nothing that clobbers s[0:1] or writes exec
844     *    s[0:1] = p_parallelcopy exec     // we will move this
845     *    exec = v_cmpx_...
846     *    p_branch_z exec BB3, BB2
847     * BB2:
848     *    ...
849     *    p_branch BB3
850     * BB3:
851     *    exec = p_parallelcopy s[0:1]     // exec and s[0:1] contain the same mask
852     *    ...                              // nothing that clobbers s[0:1] or writes exec
853     *    p_branch_nz scc BB1, BB4
854     * BB4:
855     *    ...
856     *
857     * If we know that that exec copy in the loop header is only needed in the
858     * first iteration, it can be inserted into the preheader by adding a phi:
859     *
860     * BB1: // loop-header
861     *    s[0:1] = p_linear_phi exec, s[0:1]
862     *
863     * will be lowered to a parallelcopy at the loop preheader.
864     */
865    if (block->linear_preds.size() != 2)
866       return false;
867 
868    /* Check if exec is written, or the copy's dst overwritten in the loop header. */
869    for (unsigned i = 0; i < saveexec_pos; i++) {
870       if (!block->instructions[i])
871          continue;
872       if (block->instructions[i]->writes_exec())
873          return false;
874       if (instr_overwrites(block->instructions[i].get(), saved_exec.physReg(), saved_exec.size()))
875          return false;
876    }
877 
878    /* The register(s) must already contain the same value as exec in the continue block. */
879    Block* cont = &ctx.program->blocks[block->linear_preds[1]];
880    do {
881       for (int i = cont->instructions.size() - 1; i >= 0; i--) {
882          Instruction* instr = cont->instructions[i].get();
883          if (instr->opcode == aco_opcode::p_parallelcopy && instr->definitions.size() == 1 &&
884              instr->definitions[0].physReg() == exec &&
885              instr->operands[0].physReg() == saved_exec.physReg()) {
886 
887             /* Insert after existing phis at the loop header because
888              * the first phi might contain a valid scratch reg if needed.
889              */
890             auto it = std::find_if(block->instructions.begin(), block->instructions.end(),
891                                    [](aco_ptr<Instruction>& phi) { return phi && !is_phi(phi); });
892 
893             Instruction* phi = create_instruction(aco_opcode::p_linear_phi, Format::PSEUDO, 2, 1);
894             phi->definitions[0] = saved_exec;
895             phi->operands[0] = Operand(exec, ctx.program->lane_mask);
896             phi->operands[1] = instr->operands[0];
897             block->instructions.emplace(it, phi);
898             return true;
899          }
900 
901          if (instr->writes_exec())
902             return false;
903          if (instr_overwrites(instr, saved_exec.physReg(), saved_exec.size()))
904             return false;
905       }
906    } while (cont->linear_preds.size() == 1 && (cont = &ctx.program->blocks[cont->linear_preds[0]]));
907 
908    return false;
909 }
910 
911 void
fixup_reg_writes(pr_opt_ctx & ctx,unsigned start)912 fixup_reg_writes(pr_opt_ctx& ctx, unsigned start)
913 {
914    const unsigned current_idx = ctx.current_instr_idx;
915    for (unsigned i = start; i < current_idx; i++) {
916       ctx.current_instr_idx = i;
917       if (ctx.current_block->instructions[i])
918          save_reg_writes(ctx, ctx.current_block->instructions[i]);
919    }
920 
921    ctx.current_instr_idx = current_idx;
922 }
923 
924 bool
try_optimize_branching_sequence(pr_opt_ctx & ctx,aco_ptr<Instruction> & exec_copy)925 try_optimize_branching_sequence(pr_opt_ctx& ctx, aco_ptr<Instruction>& exec_copy)
926 {
927    /* Try to optimize the branching sequence at the end of a block.
928     *
929     * We are looking for blocks that look like this:
930     *
931     * BB:
932     * ... instructions ...
933     * s[N:M] = <exec_val instruction>
934     * ... other instructions that don't depend on exec ...
935     * p_logical_end
936     * exec = <exec_copy instruction> s[N:M]
937     * p_cbranch exec
938     *
939     * The main motivation is to eliminate exec_copy.
940     * Depending on the context, we try to do the following:
941     *
942     * 1. Reassign exec_val to write exec directly
943     * 2. If possible, eliminate exec_copy
944     * 3. When exec_copy also saves the old exec mask, insert a
945     *    new copy instruction before exec_val
946     * 4. Reassign any instruction that used s[N:M] to use exec
947     *
948     * This is beneficial for the following reasons:
949     *
950     * - Fewer instructions in the block when exec_copy can be eliminated
951     * - As a result, when exec_val is VOPC this also improves the stalls
952     *   due to SALU waiting for VALU. This works best when we can also
953     *   remove the branching instruction, in which case the stall
954     *   is entirely eliminated.
955     * - When exec_copy can't be removed, the reassignment may still be
956     *   very slightly beneficial to latency.
957     */
958 
959    if (!exec_copy->writes_exec())
960       return false;
961 
962    const aco_opcode and_saveexec = ctx.program->lane_mask == s2 ? aco_opcode::s_and_saveexec_b64
963                                                                 : aco_opcode::s_and_saveexec_b32;
964 
965    const aco_opcode s_and =
966       ctx.program->lane_mask == s2 ? aco_opcode::s_and_b64 : aco_opcode::s_and_b32;
967 
968    const aco_opcode s_andn2 =
969       ctx.program->lane_mask == s2 ? aco_opcode::s_andn2_b64 : aco_opcode::s_andn2_b32;
970 
971    if (exec_copy->opcode != and_saveexec && exec_copy->opcode != aco_opcode::p_parallelcopy &&
972        (exec_copy->opcode != s_and || exec_copy->operands[1].physReg() != exec) &&
973        (exec_copy->opcode != s_andn2 || exec_copy->operands[0].physReg() != exec))
974       return false;
975 
976    const bool negate = exec_copy->opcode == s_andn2;
977    const Operand& exec_copy_op = exec_copy->operands[negate];
978 
979    /* The SCC def of s_and/s_and_saveexec must be unused. */
980    if (exec_copy->opcode != aco_opcode::p_parallelcopy && !exec_copy->definitions[1].isKill())
981       return false;
982 
983    Idx exec_val_idx = last_writer_idx(ctx, exec_copy_op);
984    if (!exec_val_idx.found() || exec_val_idx.block != ctx.current_block->index)
985       return false;
986 
987    if (is_overwritten_since(ctx, exec, ctx.program->lane_mask, exec_val_idx)) {
988       // TODO: in case nothing needs the previous exec mask, just remove it
989       return false;
990    }
991 
992    Instruction* exec_val = ctx.get(exec_val_idx);
993 
994    /* Only allow SALU with multiple definitions. */
995    if (!exec_val->isSALU() && exec_val->definitions.size() > 1)
996       return false;
997 
998    const bool vcmpx_exec_only = ctx.program->gfx_level >= GFX10;
999 
1000    if (negate && !exec_val->isVOPC())
1001       return false;
1002 
1003    /* Check if a suitable v_cmpx opcode exists. */
1004    const aco_opcode v_cmpx_op =
1005       exec_val->isVOPC()
1006          ? (negate ? get_vcmpx(get_vcmp_inverse(exec_val->opcode)) : get_vcmpx(exec_val->opcode))
1007          : aco_opcode::num_opcodes;
1008    const bool vopc = v_cmpx_op != aco_opcode::num_opcodes;
1009 
1010    /* V_CMPX+DPP returns 0 with reads from disabled lanes, unlike V_CMP+DPP (RDNA3 ISA doc, 7.7) */
1011    if (vopc && exec_val->isDPP())
1012       return false;
1013 
1014    /* If s_and_saveexec is used, we'll need to insert a new instruction to save the old exec. */
1015    bool save_original_exec =
1016       exec_copy->opcode == and_saveexec && !exec_copy->definitions[0].isKill();
1017 
1018    const Definition exec_wr_def = exec_val->definitions[0];
1019    const Definition exec_copy_def = exec_copy->definitions[0];
1020 
1021    /* If we need to negate, the instruction has to be otherwise unused. */
1022    if (negate && ctx.uses[exec_copy_op.tempId()] != 1)
1023       return false;
1024 
1025    /* The copy can be removed when it kills its operand.
1026     * v_cmpx also writes the original destination pre GFX10.
1027     */
1028    const bool can_remove_copy = exec_copy_op.isKill() || (vopc && !vcmpx_exec_only);
1029 
1030    /* Always allow reassigning when the value is written by (usable) VOPC.
1031     * Note, VOPC implicitly contains "& exec" because it yields zero on inactive lanes.
1032     * Additionally, when value is copied as-is, also allow SALU and parallelcopies.
1033     */
1034    const bool can_reassign =
1035       vopc || (exec_copy->opcode == aco_opcode::p_parallelcopy &&
1036                (exec_val->isSALU() || exec_val->opcode == aco_opcode::p_parallelcopy ||
1037                 exec_val->opcode == aco_opcode::p_create_vector));
1038 
1039    /* The reassignment is not worth it when both the original exec needs to be copied
1040     * and the new exec copy can't be removed. In this case we'd end up with more instructions.
1041     */
1042    if (!can_reassign || (save_original_exec && !can_remove_copy))
1043       return false;
1044 
1045    /* Ensure that nothing needs a previous exec between exec_val_idx and the current exec write. */
1046    for (unsigned i = exec_val_idx.instr + 1; i < ctx.current_instr_idx; i++) {
1047       Instruction* instr = ctx.current_block->instructions[i].get();
1048       if (instr && needs_exec_mask(instr))
1049          return false;
1050 
1051       /* If the successor has phis, copies might have to be inserted at p_logical_end. */
1052       if (instr && instr->opcode == aco_opcode::p_logical_end &&
1053           ctx.current_block->logical_succs.size() == 1)
1054          return false;
1055    }
1056 
1057    /* When exec_val and exec_copy are non-adjacent, check whether there are any
1058     * instructions inbetween (besides p_logical_end) which may inhibit the optimization.
1059     */
1060    if (save_original_exec) {
1061       if (is_overwritten_since(ctx, exec_copy_def, exec_val_idx))
1062          return false;
1063 
1064       unsigned prev_wr_idx = ctx.current_instr_idx;
1065       if (exec_copy_op.physReg() == exec_copy_def.physReg()) {
1066          /* We'd overwrite the saved original exec */
1067          if (vopc && !vcmpx_exec_only)
1068             return false;
1069 
1070          /* Other instructions can use exec directly, so only check exec_val instr */
1071          prev_wr_idx = exec_val_idx.instr + 1;
1072       }
1073       /* Make sure that nothing else needs these registers in-between. */
1074       for (unsigned i = exec_val_idx.instr; i < prev_wr_idx; i++) {
1075          if (ctx.current_block->instructions[i]) {
1076             for (const Operand op : ctx.current_block->instructions[i]->operands) {
1077                if (op.physReg() + op.size() > exec_copy_def.physReg() &&
1078                    exec_copy_def.physReg() + exec_copy_def.size() > op.physReg())
1079                   return false;
1080             }
1081          }
1082       }
1083    }
1084 
1085    /* Reassign the instruction to write exec directly. */
1086    if (vopc) {
1087       /* Add one extra definition for exec and copy the VOP3-specific fields if present. */
1088       if (!vcmpx_exec_only) {
1089          if (exec_val->isSDWA()) {
1090             /* This might work but it needs testing and more code to copy the instruction. */
1091             return false;
1092          } else {
1093             Instruction* tmp =
1094                create_instruction(v_cmpx_op, exec_val->format, exec_val->operands.size(),
1095                                   exec_val->definitions.size() + 1);
1096             std::copy(exec_val->operands.cbegin(), exec_val->operands.cend(),
1097                       tmp->operands.begin());
1098             std::copy(exec_val->definitions.cbegin(), exec_val->definitions.cend(),
1099                       tmp->definitions.begin());
1100 
1101             VALU_instruction& src = exec_val->valu();
1102             VALU_instruction& dst = tmp->valu();
1103             dst.opsel = src.opsel;
1104             dst.omod = src.omod;
1105             dst.clamp = src.clamp;
1106             dst.neg = src.neg;
1107             dst.abs = src.abs;
1108 
1109             ctx.current_block->instructions[exec_val_idx.instr].reset(tmp);
1110             exec_val = ctx.get(exec_val_idx);
1111          }
1112       }
1113 
1114       /* Set v_cmpx opcode. */
1115       exec_val->opcode = v_cmpx_op;
1116       exec_val->definitions.back() = Definition(exec, ctx.program->lane_mask);
1117 
1118       /* Change instruction from VOP3 to plain VOPC when possible. */
1119       if (vcmpx_exec_only && !exec_val->usesModifiers() &&
1120           (exec_val->operands.size() < 2 || exec_val->operands[1].isOfType(RegType::vgpr)))
1121          exec_val->format = Format::VOPC;
1122    } else {
1123       exec_val->definitions[0] = Definition(exec, ctx.program->lane_mask);
1124    }
1125    for (unsigned i = 0; i < ctx.program->lane_mask.size(); i++)
1126       ctx.instr_idx_by_regs[ctx.current_block->index][exec + i] =
1127          ctx.instr_idx_by_regs[ctx.current_block->index][exec_copy_op.physReg() + i];
1128 
1129    /* If there are other instructions (besides p_logical_end) between
1130     * writing the value and copying it to exec, reassign uses
1131     * of the old definition.
1132     */
1133    Temp exec_temp = exec_copy_op.getTemp();
1134    for (unsigned i = exec_val_idx.instr + 1; i < ctx.current_instr_idx; i++) {
1135       if (ctx.current_block->instructions[i]) {
1136          for (Operand& op : ctx.current_block->instructions[i]->operands) {
1137             if (op.isTemp() && op.getTemp() == exec_temp) {
1138                op = Operand(exec, op.regClass());
1139                ctx.uses[exec_temp.id()]--;
1140             }
1141          }
1142       }
1143    }
1144 
1145    if (can_remove_copy) {
1146       /* Remove the copy. */
1147       exec_copy.reset();
1148       ctx.uses[exec_temp.id()]--;
1149    } else {
1150       /* Reassign the copy to write the register of the original value. */
1151       exec_copy.reset(create_instruction(aco_opcode::p_parallelcopy, Format::PSEUDO, 1, 1));
1152       exec_copy->definitions[0] = exec_wr_def;
1153       exec_copy->operands[0] = Operand(exec, ctx.program->lane_mask);
1154    }
1155 
1156    if (save_original_exec) {
1157       /* Insert a new instruction that saves the original exec before it is overwritten.
1158        * Do this last, because inserting in the instructions vector may invalidate the exec_val
1159        * reference.
1160        */
1161       if (ctx.current_block->kind & block_kind_loop_header) {
1162          if (try_insert_saveexec_out_of_loop(ctx, ctx.current_block, exec_copy_def,
1163                                              exec_val_idx.instr)) {
1164             /* We inserted something after the last phi, so fixup indices from the start. */
1165             fixup_reg_writes(ctx, 0);
1166             return true;
1167          }
1168       }
1169       Instruction* copy = create_instruction(aco_opcode::p_parallelcopy, Format::PSEUDO, 1, 1);
1170       copy->definitions[0] = exec_copy_def;
1171       copy->operands[0] = Operand(exec, ctx.program->lane_mask);
1172       auto it = std::next(ctx.current_block->instructions.begin(), exec_val_idx.instr);
1173       ctx.current_block->instructions.emplace(it, copy);
1174 
1175       /* Fixup indices after inserting an instruction. */
1176       fixup_reg_writes(ctx, exec_val_idx.instr);
1177       return true;
1178    }
1179 
1180    return true;
1181 }
1182 
1183 void
try_skip_const_branch(pr_opt_ctx & ctx,aco_ptr<Instruction> & branch)1184 try_skip_const_branch(pr_opt_ctx& ctx, aco_ptr<Instruction>& branch)
1185 {
1186    if (branch->opcode != aco_opcode::p_cbranch_z || branch->operands[0].physReg() != exec)
1187       return;
1188    if (branch->branch().never_taken)
1189       return;
1190 
1191    Idx exec_val_idx = last_writer_idx(ctx, branch->operands[0]);
1192    if (!exec_val_idx.found())
1193       return;
1194 
1195    Instruction* exec_val = ctx.get(exec_val_idx);
1196    if ((exec_val->opcode == aco_opcode::p_parallelcopy && exec_val->operands.size() == 1) ||
1197        exec_val->opcode == aco_opcode::p_create_vector) {
1198       /* Remove the branch instruction when exec is constant non-zero. */
1199       bool is_const_val = std::any_of(exec_val->operands.begin(), exec_val->operands.end(),
1200                                       [](const Operand& op) -> bool
1201                                       { return op.isConstant() && op.constantValue(); });
1202       branch->branch().never_taken |= is_const_val;
1203    }
1204 }
1205 
1206 void
process_instruction(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)1207 process_instruction(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
1208 {
1209    /* Don't try to optimize instructions which are already dead. */
1210    if (!instr || is_dead(ctx.uses, instr.get())) {
1211       instr.reset();
1212       ctx.current_instr_idx++;
1213       return;
1214    }
1215    if (try_optimize_branching_sequence(ctx, instr))
1216       return;
1217 
1218    try_apply_branch_vcc(ctx, instr);
1219 
1220    try_optimize_scc_nocompare(ctx, instr);
1221 
1222    try_combine_dpp(ctx, instr);
1223 
1224    try_reassign_split_vector(ctx, instr);
1225 
1226    try_convert_fma_to_vop2(ctx, instr);
1227 
1228    try_eliminate_scc_copy(ctx, instr);
1229 
1230    save_scc_copy_producer(ctx, instr);
1231 
1232    save_reg_writes(ctx, instr);
1233 
1234    ctx.current_instr_idx++;
1235 }
1236 
1237 } // namespace
1238 
1239 void
optimize_postRA(Program * program)1240 optimize_postRA(Program* program)
1241 {
1242    pr_opt_ctx ctx(program);
1243 
1244    /* Forward pass
1245     * Goes through each instruction exactly once, and can transform
1246     * instructions or adjust the use counts of temps.
1247     */
1248    for (auto& block : program->blocks) {
1249       ctx.reset_block(&block);
1250 
1251       while (ctx.current_instr_idx < block.instructions.size()) {
1252          aco_ptr<Instruction>& instr = block.instructions[ctx.current_instr_idx];
1253          process_instruction(ctx, instr);
1254       }
1255 
1256       try_skip_const_branch(ctx, block.instructions.back());
1257    }
1258 
1259    /* Cleanup pass
1260     * Gets rid of instructions which are manually deleted or
1261     * no longer have any uses.
1262     */
1263    for (auto& block : program->blocks) {
1264       std::vector<aco_ptr<Instruction>> instructions;
1265       instructions.reserve(block.instructions.size());
1266 
1267       for (aco_ptr<Instruction>& instr : block.instructions) {
1268          if (!instr || is_dead(ctx.uses, instr.get()))
1269             continue;
1270 
1271          instructions.emplace_back(std::move(instr));
1272       }
1273 
1274       block.instructions = std::move(instructions);
1275    }
1276 }
1277 
1278 } // namespace aco
1279