• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_ir.h"
26 
27 #include <algorithm>
28 #include <map>
29 #include <vector>
30 
31 namespace aco {
32 namespace {
33 
34 struct phi_info_item {
35    Definition def;
36    Operand op;
37 };
38 
39 struct ssa_elimination_ctx {
40    /* The outer vectors should be indexed by block index. The inner vectors store phi information
41     * for each block. */
42    std::vector<std::vector<phi_info_item>> logical_phi_info;
43    std::vector<std::vector<phi_info_item>> linear_phi_info;
44    std::vector<bool> empty_blocks;
45    std::vector<bool> blocks_incoming_exec_used;
46    Program* program;
47 
ssa_elimination_ctxaco::__anon58ac48940111::ssa_elimination_ctx48    ssa_elimination_ctx(Program* program_)
49        : logical_phi_info(program_->blocks.size()), linear_phi_info(program_->blocks.size()),
50          empty_blocks(program_->blocks.size(), true),
51          blocks_incoming_exec_used(program_->blocks.size(), true), program(program_)
52    {}
53 };
54 
55 void
collect_phi_info(ssa_elimination_ctx & ctx)56 collect_phi_info(ssa_elimination_ctx& ctx)
57 {
58    for (Block& block : ctx.program->blocks) {
59       for (aco_ptr<Instruction>& phi : block.instructions) {
60          if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
61             break;
62 
63          for (unsigned i = 0; i < phi->operands.size(); i++) {
64             if (phi->operands[i].isUndefined())
65                continue;
66             if (phi->operands[i].physReg() == phi->definitions[0].physReg())
67                continue;
68 
69             assert(phi->definitions[0].size() == phi->operands[i].size());
70 
71             std::vector<unsigned>& preds =
72                phi->opcode == aco_opcode::p_phi ? block.logical_preds : block.linear_preds;
73             uint32_t pred_idx = preds[i];
74             auto& info_vec = phi->opcode == aco_opcode::p_phi ? ctx.logical_phi_info[pred_idx]
75                                                               : ctx.linear_phi_info[pred_idx];
76             info_vec.push_back({phi->definitions[0], phi->operands[i]});
77             ctx.empty_blocks[pred_idx] = false;
78          }
79       }
80    }
81 }
82 
83 void
insert_parallelcopies(ssa_elimination_ctx & ctx)84 insert_parallelcopies(ssa_elimination_ctx& ctx)
85 {
86    /* insert the parallelcopies from logical phis before p_logical_end */
87    for (unsigned block_idx = 0; block_idx < ctx.program->blocks.size(); ++block_idx) {
88       auto& logical_phi_info = ctx.logical_phi_info[block_idx];
89       if (logical_phi_info.empty())
90          continue;
91 
92       Block& block = ctx.program->blocks[block_idx];
93       unsigned idx = block.instructions.size() - 1;
94       while (block.instructions[idx]->opcode != aco_opcode::p_logical_end) {
95          assert(idx > 0);
96          idx--;
97       }
98 
99       std::vector<aco_ptr<Instruction>>::iterator it = std::next(block.instructions.begin(), idx);
100       aco_ptr<Pseudo_instruction> pc{
101          create_instruction<Pseudo_instruction>(aco_opcode::p_parallelcopy, Format::PSEUDO,
102                                                 logical_phi_info.size(), logical_phi_info.size())};
103       unsigned i = 0;
104       for (auto& phi_info : logical_phi_info) {
105          pc->definitions[i] = phi_info.def;
106          pc->operands[i] = phi_info.op;
107          i++;
108       }
109       /* this shouldn't be needed since we're only copying vgprs */
110       pc->tmp_in_scc = false;
111       block.instructions.insert(it, std::move(pc));
112    }
113 
114    /* insert parallelcopies for the linear phis at the end of blocks just before the branch */
115    for (unsigned block_idx = 0; block_idx < ctx.program->blocks.size(); ++block_idx) {
116       auto& linear_phi_info = ctx.linear_phi_info[block_idx];
117       if (linear_phi_info.empty())
118          continue;
119 
120       Block& block = ctx.program->blocks[block_idx];
121       std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.end();
122       --it;
123       assert((*it)->isBranch());
124       aco_ptr<Pseudo_instruction> pc{
125          create_instruction<Pseudo_instruction>(aco_opcode::p_parallelcopy, Format::PSEUDO,
126                                                 linear_phi_info.size(), linear_phi_info.size())};
127       unsigned i = 0;
128       for (auto& phi_info : linear_phi_info) {
129          pc->definitions[i] = phi_info.def;
130          pc->operands[i] = phi_info.op;
131          i++;
132       }
133       pc->tmp_in_scc = block.scc_live_out;
134       pc->scratch_sgpr = block.scratch_sgpr;
135       block.instructions.insert(it, std::move(pc));
136    }
137 }
138 
139 bool
is_empty_block(Block * block,bool ignore_exec_writes)140 is_empty_block(Block* block, bool ignore_exec_writes)
141 {
142    /* check if this block is empty and the exec mask is not needed */
143    for (aco_ptr<Instruction>& instr : block->instructions) {
144       switch (instr->opcode) {
145       case aco_opcode::p_linear_phi:
146       case aco_opcode::p_phi:
147       case aco_opcode::p_logical_start:
148       case aco_opcode::p_logical_end:
149       case aco_opcode::p_branch: break;
150       case aco_opcode::p_parallelcopy:
151          for (unsigned i = 0; i < instr->definitions.size(); i++) {
152             if (ignore_exec_writes && instr->definitions[i].physReg() == exec)
153                continue;
154             if (instr->definitions[i].physReg() != instr->operands[i].physReg())
155                return false;
156          }
157          break;
158       case aco_opcode::s_andn2_b64:
159       case aco_opcode::s_andn2_b32:
160          if (ignore_exec_writes && instr->definitions[0].physReg() == exec)
161             break;
162          return false;
163       default: return false;
164       }
165    }
166    return true;
167 }
168 
169 void
try_remove_merge_block(ssa_elimination_ctx & ctx,Block * block)170 try_remove_merge_block(ssa_elimination_ctx& ctx, Block* block)
171 {
172    /* check if the successor is another merge block which restores exec */
173    // TODO: divergent loops also restore exec
174    if (block->linear_succs.size() != 1 ||
175        !(ctx.program->blocks[block->linear_succs[0]].kind & block_kind_merge))
176       return;
177 
178    /* check if this block is empty */
179    if (!is_empty_block(block, true))
180       return;
181 
182    /* keep the branch instruction and remove the rest */
183    aco_ptr<Instruction> branch = std::move(block->instructions.back());
184    block->instructions.clear();
185    block->instructions.emplace_back(std::move(branch));
186 }
187 
188 void
try_remove_invert_block(ssa_elimination_ctx & ctx,Block * block)189 try_remove_invert_block(ssa_elimination_ctx& ctx, Block* block)
190 {
191    assert(block->linear_succs.size() == 2);
192    /* only remove this block if the successor got removed as well */
193    if (block->linear_succs[0] != block->linear_succs[1])
194       return;
195 
196    /* check if block is otherwise empty */
197    if (!is_empty_block(block, true))
198       return;
199 
200    unsigned succ_idx = block->linear_succs[0];
201    assert(block->linear_preds.size() == 2);
202    for (unsigned i = 0; i < 2; i++) {
203       Block* pred = &ctx.program->blocks[block->linear_preds[i]];
204       pred->linear_succs[0] = succ_idx;
205       ctx.program->blocks[succ_idx].linear_preds[i] = pred->index;
206 
207       Pseudo_branch_instruction& branch = pred->instructions.back()->branch();
208       assert(branch.isBranch());
209       branch.target[0] = succ_idx;
210       branch.target[1] = succ_idx;
211    }
212 
213    block->instructions.clear();
214    block->linear_preds.clear();
215    block->linear_succs.clear();
216 }
217 
218 void
try_remove_simple_block(ssa_elimination_ctx & ctx,Block * block)219 try_remove_simple_block(ssa_elimination_ctx& ctx, Block* block)
220 {
221    if (!is_empty_block(block, false))
222       return;
223 
224    Block& pred = ctx.program->blocks[block->linear_preds[0]];
225    Block& succ = ctx.program->blocks[block->linear_succs[0]];
226    Pseudo_branch_instruction& branch = pred.instructions.back()->branch();
227    if (branch.opcode == aco_opcode::p_branch) {
228       branch.target[0] = succ.index;
229       branch.target[1] = succ.index;
230    } else if (branch.target[0] == block->index) {
231       branch.target[0] = succ.index;
232    } else if (branch.target[0] == succ.index) {
233       assert(branch.target[1] == block->index);
234       branch.target[1] = succ.index;
235       branch.opcode = aco_opcode::p_branch;
236    } else if (branch.target[1] == block->index) {
237       /* check if there is a fall-through path from block to succ */
238       bool falls_through = block->index < succ.index;
239       for (unsigned j = block->index + 1; falls_through && j < succ.index; j++) {
240          assert(ctx.program->blocks[j].index == j);
241          if (!ctx.program->blocks[j].instructions.empty())
242             falls_through = false;
243       }
244       if (falls_through) {
245          branch.target[1] = succ.index;
246       } else {
247          /* check if there is a fall-through path for the alternative target */
248          if (block->index >= branch.target[0])
249             return;
250          for (unsigned j = block->index + 1; j < branch.target[0]; j++) {
251             if (!ctx.program->blocks[j].instructions.empty())
252                return;
253          }
254 
255          /* This is a (uniform) break or continue block. The branch condition has to be inverted. */
256          if (branch.opcode == aco_opcode::p_cbranch_z)
257             branch.opcode = aco_opcode::p_cbranch_nz;
258          else if (branch.opcode == aco_opcode::p_cbranch_nz)
259             branch.opcode = aco_opcode::p_cbranch_z;
260          else
261             assert(false);
262          /* also invert the linear successors */
263          pred.linear_succs[0] = pred.linear_succs[1];
264          pred.linear_succs[1] = succ.index;
265          branch.target[1] = branch.target[0];
266          branch.target[0] = succ.index;
267       }
268    } else {
269       assert(false);
270    }
271 
272    if (branch.target[0] == branch.target[1])
273       branch.opcode = aco_opcode::p_branch;
274 
275    for (unsigned i = 0; i < pred.linear_succs.size(); i++)
276       if (pred.linear_succs[i] == block->index)
277          pred.linear_succs[i] = succ.index;
278 
279    for (unsigned i = 0; i < succ.linear_preds.size(); i++)
280       if (succ.linear_preds[i] == block->index)
281          succ.linear_preds[i] = pred.index;
282 
283    block->instructions.clear();
284    block->linear_preds.clear();
285    block->linear_succs.clear();
286 }
287 
288 bool
instr_writes_exec(Instruction * instr)289 instr_writes_exec(Instruction* instr)
290 {
291    for (Definition& def : instr->definitions)
292       if (def.physReg() == exec || def.physReg() == exec_hi)
293          return true;
294 
295    return false;
296 }
297 
298 void
eliminate_useless_exec_writes_in_block(ssa_elimination_ctx & ctx,Block & block)299 eliminate_useless_exec_writes_in_block(ssa_elimination_ctx& ctx, Block& block)
300 {
301    /* Check if any successor needs the outgoing exec mask from the current block. */
302 
303    bool exec_write_used;
304 
305    if (!ctx.logical_phi_info[block.index].empty()) {
306       exec_write_used = true;
307    } else {
308       bool copy_to_exec = false;
309       bool copy_from_exec = false;
310 
311       for (const auto& successor_phi_info : ctx.linear_phi_info[block.index]) {
312          copy_to_exec |= successor_phi_info.def.physReg() == exec;
313          copy_from_exec |= successor_phi_info.op.physReg() == exec;
314       }
315 
316       if (copy_from_exec)
317          exec_write_used = true;
318       else if (copy_to_exec)
319          exec_write_used = false;
320       else
321          /* blocks_incoming_exec_used is initialized to true, so this is correct even for loops. */
322          exec_write_used =
323             std::any_of(block.linear_succs.begin(), block.linear_succs.end(),
324                         [&ctx](int succ_idx) { return ctx.blocks_incoming_exec_used[succ_idx]; });
325    }
326 
327    /* Go through all instructions and eliminate useless exec writes. */
328 
329    for (int i = block.instructions.size() - 1; i >= 0; --i) {
330       aco_ptr<Instruction>& instr = block.instructions[i];
331 
332       /* We already take information from phis into account before the loop, so let's just break on
333        * phis. */
334       if (instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi)
335          break;
336 
337       /* See if the current instruction needs or writes exec. */
338       bool needs_exec = needs_exec_mask(instr.get());
339       bool writes_exec = instr_writes_exec(instr.get());
340 
341       /* See if we found an unused exec write. */
342       if (writes_exec && !exec_write_used) {
343          instr.reset();
344          continue;
345       }
346 
347       /* For a newly encountered exec write, clear the used flag. */
348       if (writes_exec)
349          exec_write_used = false;
350 
351       /* If the current instruction needs exec, mark it as used. */
352       exec_write_used |= needs_exec;
353    }
354 
355    /* Remember if the current block needs an incoming exec mask from its predecessors. */
356    ctx.blocks_incoming_exec_used[block.index] = exec_write_used;
357 
358    /* Cleanup: remove deleted instructions from the vector. */
359    auto new_end = std::remove(block.instructions.begin(), block.instructions.end(), nullptr);
360    block.instructions.resize(new_end - block.instructions.begin());
361 }
362 
363 void
jump_threading(ssa_elimination_ctx & ctx)364 jump_threading(ssa_elimination_ctx& ctx)
365 {
366    for (int i = ctx.program->blocks.size() - 1; i >= 0; i--) {
367       Block* block = &ctx.program->blocks[i];
368       eliminate_useless_exec_writes_in_block(ctx, *block);
369 
370       if (!ctx.empty_blocks[i])
371          continue;
372 
373       if (block->kind & block_kind_invert) {
374          try_remove_invert_block(ctx, block);
375          continue;
376       }
377 
378       if (block->linear_succs.size() > 1)
379          continue;
380 
381       if (block->kind & block_kind_merge || block->kind & block_kind_loop_exit)
382          try_remove_merge_block(ctx, block);
383 
384       if (block->linear_preds.size() == 1)
385          try_remove_simple_block(ctx, block);
386    }
387 }
388 
389 } /* end namespace */
390 
391 void
ssa_elimination(Program * program)392 ssa_elimination(Program* program)
393 {
394    ssa_elimination_ctx ctx(program);
395 
396    /* Collect information about every phi-instruction */
397    collect_phi_info(ctx);
398 
399    /* eliminate empty blocks */
400    jump_threading(ctx);
401 
402    /* insert parallelcopies from SSA elimination */
403    insert_parallelcopies(ctx);
404 }
405 } // namespace aco
406