• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_ir.h"
26 
27 #include <algorithm>
28 #include <map>
29 #include <vector>
30 
31 namespace aco {
32 namespace {
33 
34 struct phi_info_item {
35    Definition def;
36    Operand op;
37 };
38 
39 struct ssa_elimination_ctx {
40    /* The outer vectors should be indexed by block index. The inner vectors store phi information
41     * for each block. */
42    std::vector<std::vector<phi_info_item>> logical_phi_info;
43    std::vector<std::vector<phi_info_item>> linear_phi_info;
44    std::vector<bool> empty_blocks;
45    std::vector<bool> blocks_incoming_exec_used;
46    Program* program;
47 
ssa_elimination_ctxaco::__anonb137b1550111::ssa_elimination_ctx48    ssa_elimination_ctx(Program* program_)
49        : logical_phi_info(program_->blocks.size()), linear_phi_info(program_->blocks.size()),
50          empty_blocks(program_->blocks.size(), true),
51          blocks_incoming_exec_used(program_->blocks.size(), true), program(program_)
52    {}
53 };
54 
55 void
collect_phi_info(ssa_elimination_ctx & ctx)56 collect_phi_info(ssa_elimination_ctx& ctx)
57 {
58    for (Block& block : ctx.program->blocks) {
59       for (aco_ptr<Instruction>& phi : block.instructions) {
60          if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
61             break;
62 
63          for (unsigned i = 0; i < phi->operands.size(); i++) {
64             if (phi->operands[i].isUndefined())
65                continue;
66             if (phi->operands[i].physReg() == phi->definitions[0].physReg())
67                continue;
68 
69             assert(phi->definitions[0].size() == phi->operands[i].size());
70 
71             std::vector<unsigned>& preds =
72                phi->opcode == aco_opcode::p_phi ? block.logical_preds : block.linear_preds;
73             uint32_t pred_idx = preds[i];
74             auto& info_vec = phi->opcode == aco_opcode::p_phi ? ctx.logical_phi_info[pred_idx]
75                                                               : ctx.linear_phi_info[pred_idx];
76             info_vec.push_back({phi->definitions[0], phi->operands[i]});
77             ctx.empty_blocks[pred_idx] = false;
78          }
79       }
80    }
81 }
82 
83 void
insert_parallelcopies(ssa_elimination_ctx & ctx)84 insert_parallelcopies(ssa_elimination_ctx& ctx)
85 {
86    /* insert the parallelcopies from logical phis before p_logical_end */
87    for (unsigned block_idx = 0; block_idx < ctx.program->blocks.size(); ++block_idx) {
88       auto& logical_phi_info = ctx.logical_phi_info[block_idx];
89       if (logical_phi_info.empty())
90          continue;
91 
92       Block& block = ctx.program->blocks[block_idx];
93       unsigned idx = block.instructions.size() - 1;
94       while (block.instructions[idx]->opcode != aco_opcode::p_logical_end) {
95          assert(idx > 0);
96          idx--;
97       }
98 
99       std::vector<aco_ptr<Instruction>>::iterator it = std::next(block.instructions.begin(), idx);
100       aco_ptr<Pseudo_instruction> pc{
101          create_instruction<Pseudo_instruction>(aco_opcode::p_parallelcopy, Format::PSEUDO,
102                                                 logical_phi_info.size(), logical_phi_info.size())};
103       unsigned i = 0;
104       for (auto& phi_info : logical_phi_info) {
105          pc->definitions[i] = phi_info.def;
106          pc->operands[i] = phi_info.op;
107          i++;
108       }
109       /* this shouldn't be needed since we're only copying vgprs */
110       pc->tmp_in_scc = false;
111       block.instructions.insert(it, std::move(pc));
112    }
113 
114    /* insert parallelcopies for the linear phis at the end of blocks just before the branch */
115    for (unsigned block_idx = 0; block_idx < ctx.program->blocks.size(); ++block_idx) {
116       auto& linear_phi_info = ctx.linear_phi_info[block_idx];
117       if (linear_phi_info.empty())
118          continue;
119 
120       Block& block = ctx.program->blocks[block_idx];
121       std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.end();
122       --it;
123       assert((*it)->isBranch());
124       PhysReg scratch_sgpr = (*it)->definitions[0].physReg();
125       aco_ptr<Pseudo_instruction> pc{
126          create_instruction<Pseudo_instruction>(aco_opcode::p_parallelcopy, Format::PSEUDO,
127                                                 linear_phi_info.size(), linear_phi_info.size())};
128       unsigned i = 0;
129       for (auto& phi_info : linear_phi_info) {
130          pc->definitions[i] = phi_info.def;
131          pc->operands[i] = phi_info.op;
132          i++;
133       }
134       pc->tmp_in_scc = block.scc_live_out;
135       pc->scratch_sgpr = scratch_sgpr;
136       block.instructions.insert(it, std::move(pc));
137    }
138 }
139 
140 bool
is_empty_block(Block * block,bool ignore_exec_writes)141 is_empty_block(Block* block, bool ignore_exec_writes)
142 {
143    /* check if this block is empty and the exec mask is not needed */
144    for (aco_ptr<Instruction>& instr : block->instructions) {
145       switch (instr->opcode) {
146       case aco_opcode::p_linear_phi:
147       case aco_opcode::p_phi:
148       case aco_opcode::p_logical_start:
149       case aco_opcode::p_logical_end:
150       case aco_opcode::p_branch: break;
151       case aco_opcode::p_parallelcopy:
152          for (unsigned i = 0; i < instr->definitions.size(); i++) {
153             if (ignore_exec_writes && instr->definitions[i].physReg() == exec)
154                continue;
155             if (instr->definitions[i].physReg() != instr->operands[i].physReg())
156                return false;
157          }
158          break;
159       case aco_opcode::s_andn2_b64:
160       case aco_opcode::s_andn2_b32:
161          if (ignore_exec_writes && instr->definitions[0].physReg() == exec)
162             break;
163          return false;
164       default: return false;
165       }
166    }
167    return true;
168 }
169 
170 void
try_remove_merge_block(ssa_elimination_ctx & ctx,Block * block)171 try_remove_merge_block(ssa_elimination_ctx& ctx, Block* block)
172 {
173    /* check if the successor is another merge block which restores exec */
174    // TODO: divergent loops also restore exec
175    if (block->linear_succs.size() != 1 ||
176        !(ctx.program->blocks[block->linear_succs[0]].kind & block_kind_merge))
177       return;
178 
179    /* check if this block is empty */
180    if (!is_empty_block(block, true))
181       return;
182 
183    /* keep the branch instruction and remove the rest */
184    aco_ptr<Instruction> branch = std::move(block->instructions.back());
185    block->instructions.clear();
186    block->instructions.emplace_back(std::move(branch));
187 }
188 
189 void
try_remove_invert_block(ssa_elimination_ctx & ctx,Block * block)190 try_remove_invert_block(ssa_elimination_ctx& ctx, Block* block)
191 {
192    assert(block->linear_succs.size() == 2);
193    /* only remove this block if the successor got removed as well */
194    if (block->linear_succs[0] != block->linear_succs[1])
195       return;
196 
197    /* check if block is otherwise empty */
198    if (!is_empty_block(block, true))
199       return;
200 
201    unsigned succ_idx = block->linear_succs[0];
202    assert(block->linear_preds.size() == 2);
203    for (unsigned i = 0; i < 2; i++) {
204       Block* pred = &ctx.program->blocks[block->linear_preds[i]];
205       pred->linear_succs[0] = succ_idx;
206       ctx.program->blocks[succ_idx].linear_preds[i] = pred->index;
207 
208       Pseudo_branch_instruction& branch = pred->instructions.back()->branch();
209       assert(branch.isBranch());
210       branch.target[0] = succ_idx;
211       branch.target[1] = succ_idx;
212    }
213 
214    block->instructions.clear();
215    block->linear_preds.clear();
216    block->linear_succs.clear();
217 }
218 
219 void
try_remove_simple_block(ssa_elimination_ctx & ctx,Block * block)220 try_remove_simple_block(ssa_elimination_ctx& ctx, Block* block)
221 {
222    if (!is_empty_block(block, false))
223       return;
224 
225    Block& pred = ctx.program->blocks[block->linear_preds[0]];
226    Block& succ = ctx.program->blocks[block->linear_succs[0]];
227    Pseudo_branch_instruction& branch = pred.instructions.back()->branch();
228    if (branch.opcode == aco_opcode::p_branch) {
229       branch.target[0] = succ.index;
230       branch.target[1] = succ.index;
231    } else if (branch.target[0] == block->index) {
232       branch.target[0] = succ.index;
233    } else if (branch.target[0] == succ.index) {
234       assert(branch.target[1] == block->index);
235       branch.target[1] = succ.index;
236       branch.opcode = aco_opcode::p_branch;
237    } else if (branch.target[1] == block->index) {
238       /* check if there is a fall-through path from block to succ */
239       bool falls_through = block->index < succ.index;
240       for (unsigned j = block->index + 1; falls_through && j < succ.index; j++) {
241          assert(ctx.program->blocks[j].index == j);
242          if (!ctx.program->blocks[j].instructions.empty())
243             falls_through = false;
244       }
245       if (falls_through) {
246          branch.target[1] = succ.index;
247       } else {
248          /* check if there is a fall-through path for the alternative target */
249          if (block->index >= branch.target[0])
250             return;
251          for (unsigned j = block->index + 1; j < branch.target[0]; j++) {
252             if (!ctx.program->blocks[j].instructions.empty())
253                return;
254          }
255 
256          /* This is a (uniform) break or continue block. The branch condition has to be inverted. */
257          if (branch.opcode == aco_opcode::p_cbranch_z)
258             branch.opcode = aco_opcode::p_cbranch_nz;
259          else if (branch.opcode == aco_opcode::p_cbranch_nz)
260             branch.opcode = aco_opcode::p_cbranch_z;
261          else
262             assert(false);
263          /* also invert the linear successors */
264          pred.linear_succs[0] = pred.linear_succs[1];
265          pred.linear_succs[1] = succ.index;
266          branch.target[1] = branch.target[0];
267          branch.target[0] = succ.index;
268       }
269    } else {
270       assert(false);
271    }
272 
273    if (branch.target[0] == branch.target[1]) {
274       while (branch.operands.size())
275          branch.operands.pop_back();
276 
277       branch.opcode = aco_opcode::p_branch;
278    }
279 
280    for (unsigned i = 0; i < pred.linear_succs.size(); i++)
281       if (pred.linear_succs[i] == block->index)
282          pred.linear_succs[i] = succ.index;
283 
284    for (unsigned i = 0; i < succ.linear_preds.size(); i++)
285       if (succ.linear_preds[i] == block->index)
286          succ.linear_preds[i] = pred.index;
287 
288    block->instructions.clear();
289    block->linear_preds.clear();
290    block->linear_succs.clear();
291 }
292 
293 bool
instr_writes_exec(Instruction * instr)294 instr_writes_exec(Instruction* instr)
295 {
296    for (Definition& def : instr->definitions)
297       if (def.physReg() == exec || def.physReg() == exec_hi)
298          return true;
299 
300    return false;
301 }
302 
303 template <typename T, typename U>
304 bool
regs_intersect(const T & a,const U & b)305 regs_intersect(const T& a, const U& b)
306 {
307    const unsigned a_lo = a.physReg();
308    const unsigned a_hi = a_lo + a.size();
309    const unsigned b_lo = b.physReg();
310    const unsigned b_hi = b_lo + b.size();
311 
312    return a_hi > b_lo && b_hi > a_lo;
313 }
314 
315 void
try_optimize_branching_sequence(ssa_elimination_ctx & ctx,Block & block,const int exec_val_idx,const int exec_copy_idx)316 try_optimize_branching_sequence(ssa_elimination_ctx& ctx, Block& block, const int exec_val_idx,
317                                 const int exec_copy_idx)
318 {
319    /* Try to optimize the branching sequence at the end of a block.
320     *
321     * We are looking for blocks that look like this:
322     *
323     * BB:
324     * ... instructions ...
325     * s[N:M] = <exec_val instruction>
326     * ... other instructions that don't depend on exec ...
327     * p_logical_end
328     * exec = <exec_copy instruction> s[N:M]
329     * p_cbranch exec
330     *
331     * The main motivation is to eliminate exec_copy.
332     * Depending on the context, we try to do the following:
333     *
334     * 1. Reassign exec_val to write exec directly
335     * 2. If possible, eliminate exec_copy
336     * 3. When exec_copy also saves the old exec mask, insert a
337     *    new copy instruction before exec_val
338     * 4. Reassign any instruction that used s[N:M] to use exec
339     *
340     * This is beneficial for the following reasons:
341     *
342     * - Fewer instructions in the block when exec_copy can be eliminated
343     * - As a result, when exec_val is VOPC this also improves the stalls
344     *   due to SALU waiting for VALU. This works best when we can also
345     *   remove the branching instruction, in which case the stall
346     *   is entirely eliminated.
347     * - When exec_copy can't be removed, the reassignment may still be
348     *   very slightly beneficial to latency.
349     */
350 
351    aco_ptr<Instruction>& exec_val = block.instructions[exec_val_idx];
352    aco_ptr<Instruction>& exec_copy = block.instructions[exec_copy_idx];
353 
354    const aco_opcode and_saveexec = ctx.program->lane_mask == s2 ? aco_opcode::s_and_saveexec_b64
355                                                                 : aco_opcode::s_and_saveexec_b32;
356 
357    if (exec_copy->opcode != and_saveexec && exec_copy->opcode != aco_opcode::p_parallelcopy)
358       return;
359 
360    if (exec_val->definitions.size() > 1)
361       return;
362 
363    const bool vcmpx_exec_only = ctx.program->gfx_level >= GFX10;
364 
365    /* Check if a suitable v_cmpx opcode exists. */
366    const aco_opcode v_cmpx_op =
367       exec_val->isVOPC() ? get_vcmpx(exec_val->opcode) : aco_opcode::num_opcodes;
368    const bool vopc = v_cmpx_op != aco_opcode::num_opcodes;
369 
370    /* V_CMPX+DPP returns 0 with reads from disabled lanes, unlike V_CMP+DPP (RDNA3 ISA doc, 7.7) */
371    if (vopc && exec_val->isDPP())
372       return;
373 
374    /* If s_and_saveexec is used, we'll need to insert a new instruction to save the old exec. */
375    bool save_original_exec = exec_copy->opcode == and_saveexec;
376 
377    const Definition exec_wr_def = exec_val->definitions[0];
378    const Definition exec_copy_def = exec_copy->definitions[0];
379 
380    if (save_original_exec) {
381       for (int i = exec_copy_idx - 1; i >= 0; i--) {
382          const aco_ptr<Instruction>& instr = block.instructions[i];
383          if (instr->opcode == aco_opcode::p_parallelcopy &&
384              instr->definitions[0].physReg() == exec &&
385              instr->definitions[0].regClass() == ctx.program->lane_mask &&
386              instr->operands[0].physReg() == exec_copy_def.physReg()) {
387             /* The register that we should save exec to already contains the same value as exec. */
388             save_original_exec = false;
389             break;
390          }
391          /* exec_copy_def is clobbered or exec written before we found a copy. */
392          if ((i != exec_val_idx || !vcmpx_exec_only) &&
393              std::any_of(instr->definitions.begin(), instr->definitions.end(),
394                          [&exec_copy_def, &ctx](const Definition& def) -> bool
395                          {
396                             return regs_intersect(exec_copy_def, def) ||
397                                    regs_intersect(Definition(exec, ctx.program->lane_mask), def);
398                          }))
399             break;
400       }
401    }
402 
403    /* Position where the original exec mask copy should be inserted. */
404    const int save_original_exec_idx = exec_val_idx;
405    /* The copy can be removed when it kills its operand.
406     * v_cmpx also writes the original destination pre GFX10.
407     */
408    const bool can_remove_copy = exec_copy->operands[0].isKill() || (vopc && !vcmpx_exec_only);
409 
410    /* Always allow reassigning when the value is written by (usable) VOPC.
411     * Note, VOPC implicitly contains "& exec" because it yields zero on inactive lanes.
412     * Additionally, when value is copied as-is, also allow SALU and parallelcopies.
413     */
414    const bool can_reassign =
415       vopc || (exec_copy->opcode == aco_opcode::p_parallelcopy &&
416                (exec_val->isSALU() || exec_val->opcode == aco_opcode::p_parallelcopy));
417 
418    /* The reassignment is not worth it when both the original exec needs to be copied
419     * and the new exec copy can't be removed. In this case we'd end up with more instructions.
420     */
421    if (!can_reassign || (save_original_exec && !can_remove_copy))
422       return;
423 
424    /* When exec_val and exec_copy are non-adjacent, check whether there are any
425     * instructions inbetween (besides p_logical_end) which may inhibit the optimization.
426     */
427    for (int idx = exec_val_idx + 1; idx < exec_copy_idx; ++idx) {
428       aco_ptr<Instruction>& instr = block.instructions[idx];
429 
430       if (save_original_exec) {
431          /* Check if the instruction uses the exec_copy_def register, in which case we can't
432           * optimize. */
433          for (const Operand& op : instr->operands)
434             if (regs_intersect(exec_copy_def, op))
435                return;
436          for (const Definition& def : instr->definitions)
437             if (regs_intersect(exec_copy_def, def))
438                return;
439       }
440 
441       /* Check if the instruction may implicitly read VCC, eg. v_cndmask or add with carry.
442        * Rewriting these operands may require format conversion because of encoding limitations.
443        */
444       if (exec_wr_def.physReg() == vcc && instr->isVALU() && instr->operands.size() >= 3 &&
445           !instr->isVOP3())
446          return;
447    }
448 
449    if (save_original_exec) {
450       /* We insert the exec copy before exec_val, so exec_val can't use those registers. */
451       for (const Operand& op : exec_val->operands)
452          if (regs_intersect(exec_copy_def, op))
453             return;
454       /* We would write over the saved exec value in this case. */
455       if (((vopc && !vcmpx_exec_only) || !can_remove_copy) &&
456           regs_intersect(exec_copy_def, exec_wr_def))
457          return;
458    }
459 
460    if (vopc) {
461       /* Add one extra definition for exec and copy the VOP3-specific fields if present. */
462       if (!vcmpx_exec_only) {
463          if (exec_val->isSDWA()) {
464             /* This might work but it needs testing and more code to copy the instruction. */
465             return;
466          } else {
467             aco_ptr<Instruction> tmp = std::move(exec_val);
468             exec_val.reset(create_instruction<VALU_instruction>(
469                tmp->opcode, tmp->format, tmp->operands.size(), tmp->definitions.size() + 1));
470             std::copy(tmp->operands.cbegin(), tmp->operands.cend(), exec_val->operands.begin());
471             std::copy(tmp->definitions.cbegin(), tmp->definitions.cend(),
472                       exec_val->definitions.begin());
473 
474             VALU_instruction& src = tmp->valu();
475             VALU_instruction& dst = exec_val->valu();
476             dst.opsel = src.opsel;
477             dst.omod = src.omod;
478             dst.clamp = src.clamp;
479             dst.neg = src.neg;
480             dst.abs = src.abs;
481          }
482       }
483 
484       /* Set v_cmpx opcode. */
485       exec_val->opcode = v_cmpx_op;
486 
487       *exec_val->definitions.rbegin() = Definition(exec, ctx.program->lane_mask);
488 
489       /* Change instruction from VOP3 to plain VOPC when possible. */
490       if (vcmpx_exec_only && !exec_val->usesModifiers() &&
491           (exec_val->operands.size() < 2 || exec_val->operands[1].isOfType(RegType::vgpr)))
492          exec_val->format = Format::VOPC;
493    } else {
494       /* Reassign the instruction to write exec directly. */
495       exec_val->definitions[0] = Definition(exec, ctx.program->lane_mask);
496    }
497 
498    /* If there are other instructions (besides p_logical_end) between
499     * writing the value and copying it to exec, reassign uses
500     * of the old definition.
501     */
502    for (int idx = exec_val_idx + 1; idx < exec_copy_idx; ++idx) {
503       aco_ptr<Instruction>& instr = block.instructions[idx];
504       for (Operand& op : instr->operands) {
505          if (op.physReg() == exec_wr_def.physReg())
506             op = Operand(exec, op.regClass());
507          if (exec_wr_def.size() == 2 && op.physReg() == exec_wr_def.physReg().advance(4))
508             op = Operand(exec_hi, op.regClass());
509       }
510    }
511 
512    if (can_remove_copy) {
513       /* Remove the copy. */
514       exec_copy.reset();
515    } else {
516       /* Reassign the copy to write the register of the original value. */
517       exec_copy.reset(
518          create_instruction<Pseudo_instruction>(aco_opcode::p_parallelcopy, Format::PSEUDO, 1, 1));
519       exec_copy->definitions[0] = exec_wr_def;
520       exec_copy->operands[0] = Operand(exec, ctx.program->lane_mask);
521    }
522 
523    if (exec_val->opcode == aco_opcode::p_parallelcopy && exec_val->operands[0].isConstant() &&
524        exec_val->operands[0].constantValue()) {
525       /* Remove the branch instruction when exec is constant non-zero. */
526       aco_ptr<Instruction>& branch = block.instructions.back();
527       if (branch->opcode == aco_opcode::p_cbranch_z && branch->operands[0].physReg() == exec)
528          block.instructions.back().reset();
529    }
530 
531    if (save_original_exec) {
532       /* Insert a new instruction that saves the original exec before it is overwritten.
533        * Do this last, because inserting in the instructions vector may invalidate the exec_val
534        * reference.
535        */
536       const auto it = std::next(block.instructions.begin(), save_original_exec_idx);
537       aco_ptr<Instruction> copy(
538          create_instruction<Pseudo_instruction>(aco_opcode::p_parallelcopy, Format::PSEUDO, 1, 1));
539       copy->definitions[0] = exec_copy_def;
540       copy->operands[0] = Operand(exec, ctx.program->lane_mask);
541       block.instructions.insert(it, std::move(copy));
542    }
543 }
544 
545 void
eliminate_useless_exec_writes_in_block(ssa_elimination_ctx & ctx,Block & block)546 eliminate_useless_exec_writes_in_block(ssa_elimination_ctx& ctx, Block& block)
547 {
548    /* Check if any successor needs the outgoing exec mask from the current block. */
549 
550    bool exec_write_used;
551    if (block.kind & block_kind_end_with_regs) {
552       /* Last block of a program with succeed shader part should respect final exec write. */
553       exec_write_used = true;
554    } else {
555       bool copy_to_exec = false;
556       bool copy_from_exec = false;
557 
558       for (const auto& successor_phi_info : ctx.linear_phi_info[block.index]) {
559          copy_to_exec |= successor_phi_info.def.physReg() == exec;
560          copy_from_exec |= successor_phi_info.op.physReg() == exec;
561       }
562 
563       if (copy_from_exec)
564          exec_write_used = true;
565       else if (copy_to_exec)
566          exec_write_used = false;
567       else
568          /* blocks_incoming_exec_used is initialized to true, so this is correct even for loops. */
569          exec_write_used =
570             std::any_of(block.linear_succs.begin(), block.linear_succs.end(),
571                         [&ctx](int succ_idx) { return ctx.blocks_incoming_exec_used[succ_idx]; });
572    }
573 
574    /* Collect information about the branching sequence. */
575 
576    bool branch_exec_val_found = false;
577    int branch_exec_val_idx = -1;
578    int branch_exec_copy_idx = -1;
579    unsigned branch_exec_tempid = 0;
580 
581    /* Go through all instructions and eliminate useless exec writes. */
582 
583    for (int i = block.instructions.size() - 1; i >= 0; --i) {
584       aco_ptr<Instruction>& instr = block.instructions[i];
585 
586       /* We already take information from phis into account before the loop, so let's just break on
587        * phis. */
588       if (instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi)
589          break;
590 
591       /* See if the current instruction needs or writes exec. */
592       bool needs_exec =
593          needs_exec_mask(instr.get()) ||
594          (instr->opcode == aco_opcode::p_logical_end && !ctx.logical_phi_info[block.index].empty());
595       bool writes_exec = instr_writes_exec(instr.get());
596 
597       /* See if we found an unused exec write. */
598       if (writes_exec && !exec_write_used) {
599          /* Don't eliminate an instruction that writes registers other than exec and scc.
600           * It is possible that this is eg. an s_and_saveexec and the saved value is
601           * used by a later branch.
602           */
603          bool writes_other = std::any_of(instr->definitions.begin(), instr->definitions.end(),
604                                          [](const Definition& def) -> bool
605                                          { return def.physReg() != exec && def.physReg() != scc; });
606          if (!writes_other) {
607             instr.reset();
608             continue;
609          }
610       }
611 
612       /* For a newly encountered exec write, clear the used flag. */
613       if (writes_exec) {
614          if (instr->operands.size() && !branch_exec_val_found) {
615             /* We are in a branch that jumps according to exec.
616              * We just found the instruction that copies to exec before the branch.
617              */
618             assert(branch_exec_copy_idx == -1);
619             branch_exec_copy_idx = i;
620             branch_exec_tempid = instr->operands[0].tempId();
621             branch_exec_val_found = true;
622          } else if (branch_exec_val_idx == -1) {
623             /* The current instruction overwrites exec before branch_exec_val_idx was
624              * found, therefore we can't optimize the branching sequence.
625              */
626             branch_exec_copy_idx = -1;
627             branch_exec_tempid = 0;
628          }
629 
630          exec_write_used = false;
631       } else if (branch_exec_tempid && instr->definitions.size() &&
632                  instr->definitions[0].tempId() == branch_exec_tempid) {
633          /* We just found the instruction that produces the exec mask that is copied. */
634          assert(branch_exec_val_idx == -1);
635          branch_exec_val_idx = i;
636       } else if (branch_exec_tempid && branch_exec_val_idx == -1 && needs_exec) {
637          /* There is an instruction that needs the original exec mask before
638           * branch_exec_val_idx was found, so we can't optimize the branching sequence. */
639          branch_exec_copy_idx = -1;
640          branch_exec_tempid = 0;
641       }
642 
643       /* If the current instruction needs exec, mark it as used. */
644       exec_write_used |= needs_exec;
645    }
646 
647    /* Remember if the current block needs an incoming exec mask from its predecessors. */
648    ctx.blocks_incoming_exec_used[block.index] = exec_write_used;
649 
650    /* See if we can optimize the instruction that produces the exec mask. */
651    if (branch_exec_val_idx != -1) {
652       assert(branch_exec_tempid && branch_exec_copy_idx != -1);
653       try_optimize_branching_sequence(ctx, block, branch_exec_val_idx, branch_exec_copy_idx);
654    }
655 
656    /* Cleanup: remove deleted instructions from the vector. */
657    auto new_end = std::remove(block.instructions.begin(), block.instructions.end(), nullptr);
658    block.instructions.resize(new_end - block.instructions.begin());
659 }
660 
661 void
jump_threading(ssa_elimination_ctx & ctx)662 jump_threading(ssa_elimination_ctx& ctx)
663 {
664    for (int i = ctx.program->blocks.size() - 1; i >= 0; i--) {
665       Block* block = &ctx.program->blocks[i];
666       eliminate_useless_exec_writes_in_block(ctx, *block);
667 
668       if (!ctx.empty_blocks[i])
669          continue;
670 
671       if (block->kind & block_kind_invert) {
672          try_remove_invert_block(ctx, block);
673          continue;
674       }
675 
676       if (block->linear_succs.size() > 1)
677          continue;
678 
679       if (block->kind & block_kind_merge || block->kind & block_kind_loop_exit)
680          try_remove_merge_block(ctx, block);
681 
682       if (block->linear_preds.size() == 1)
683          try_remove_simple_block(ctx, block);
684    }
685 }
686 
687 } /* end namespace */
688 
689 void
ssa_elimination(Program * program)690 ssa_elimination(Program* program)
691 {
692    ssa_elimination_ctx ctx(program);
693 
694    /* Collect information about every phi-instruction */
695    collect_phi_info(ctx);
696 
697    /* eliminate empty blocks */
698    jump_threading(ctx);
699 
700    /* insert parallelcopies from SSA elimination */
701    insert_parallelcopies(ctx);
702 }
703 } // namespace aco
704