• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_builder.h"
26 #include "aco_ir.h"
27 
28 #include "util/u_math.h"
29 
30 #include <set>
31 #include <vector>
32 
33 namespace aco {
34 
35 namespace {
36 
37 enum WQMState : uint8_t {
38    Unspecified = 0,
39    Exact = 1 << 0,
40    WQM = 1 << 1, /* with control flow applied */
41 };
42 
43 enum mask_type : uint8_t {
44    mask_type_global = 1 << 0,
45    mask_type_exact = 1 << 1,
46    mask_type_wqm = 1 << 2,
47    mask_type_loop = 1 << 3, /* active lanes of a loop */
48 };
49 
50 struct loop_info {
51    Block* loop_header;
52    uint16_t num_exec_masks;
53    bool has_divergent_break;
54    bool has_divergent_continue;
55    bool has_discard; /* has a discard or demote */
loop_infoaco::__anonb993dfda0111::loop_info56    loop_info(Block* b, uint16_t num, bool breaks, bool cont, bool discard)
57        : loop_header(b), num_exec_masks(num), has_divergent_break(breaks),
58          has_divergent_continue(cont), has_discard(discard)
59    {}
60 };
61 
62 struct block_info {
63    std::vector<std::pair<Operand, uint8_t>>
64       exec; /* Vector of exec masks. Either a temporary or const -1. */
65 };
66 
67 struct exec_ctx {
68    Program* program;
69    std::vector<block_info> info;
70    std::vector<loop_info> loop;
71    bool handle_wqm = false;
exec_ctxaco::__anonb993dfda0111::exec_ctx72    exec_ctx(Program* program_) : program(program_), info(program->blocks.size()) {}
73 };
74 
75 bool
needs_exact(aco_ptr<Instruction> & instr)76 needs_exact(aco_ptr<Instruction>& instr)
77 {
78    if (instr->isMUBUF()) {
79       return instr->mubuf().disable_wqm;
80    } else if (instr->isMTBUF()) {
81       return instr->mtbuf().disable_wqm;
82    } else if (instr->isMIMG()) {
83       return instr->mimg().disable_wqm;
84    } else if (instr->isFlatLike()) {
85       return instr->flatlike().disable_wqm;
86    } else {
87       /* Require Exact for p_jump_to_epilog because if p_exit_early_if is
88        * emitted inside the same block, the main FS will always jump to the PS
89        * epilog without considering the exec mask.
90        */
91       return instr->isEXP() || instr->opcode == aco_opcode::p_jump_to_epilog ||
92              instr->opcode == aco_opcode::p_dual_src_export_gfx11;
93    }
94 }
95 
96 WQMState
get_instr_needs(aco_ptr<Instruction> & instr)97 get_instr_needs(aco_ptr<Instruction>& instr)
98 {
99    if (needs_exact(instr))
100       return Exact;
101 
102    bool pred_by_exec = needs_exec_mask(instr.get()) || instr->opcode == aco_opcode::p_logical_end ||
103                        instr->isBranch();
104 
105    return pred_by_exec ? WQM : Unspecified;
106 }
107 
108 Operand
get_exec_op(Operand t)109 get_exec_op(Operand t)
110 {
111    if (t.isUndefined())
112       return Operand(exec, t.regClass());
113    else
114       return t;
115 }
116 
117 void
transition_to_WQM(exec_ctx & ctx,Builder bld,unsigned idx)118 transition_to_WQM(exec_ctx& ctx, Builder bld, unsigned idx)
119 {
120    if (ctx.info[idx].exec.back().second & mask_type_wqm)
121       return;
122    if (ctx.info[idx].exec.back().second & mask_type_global) {
123       Operand exec_mask = ctx.info[idx].exec.back().first;
124       if (exec_mask.isUndefined())
125          ctx.info[idx].exec.back().first = bld.copy(bld.def(bld.lm), Operand(exec, bld.lm));
126 
127       exec_mask = bld.sop1(Builder::s_wqm, Definition(exec, bld.lm), bld.def(s1, scc),
128                            get_exec_op(exec_mask));
129       ctx.info[idx].exec.emplace_back(exec_mask, mask_type_global | mask_type_wqm);
130       return;
131    }
132    /* otherwise, the WQM mask should be one below the current mask */
133    ctx.info[idx].exec.pop_back();
134    assert(ctx.info[idx].exec.back().second & mask_type_wqm);
135    assert(ctx.info[idx].exec.back().first.size() == bld.lm.size());
136    assert(ctx.info[idx].exec.back().first.isTemp());
137    ctx.info[idx].exec.back().first =
138       bld.copy(Definition(exec, bld.lm), ctx.info[idx].exec.back().first);
139 }
140 
141 void
transition_to_Exact(exec_ctx & ctx,Builder bld,unsigned idx)142 transition_to_Exact(exec_ctx& ctx, Builder bld, unsigned idx)
143 {
144    if (ctx.info[idx].exec.back().second & mask_type_exact)
145       return;
146    /* We can't remove the loop exec mask, because that can cause exec.size() to
147     * be less than num_exec_masks. The loop exec mask also needs to be kept
148     * around for various uses. */
149    if ((ctx.info[idx].exec.back().second & mask_type_global) &&
150        !(ctx.info[idx].exec.back().second & mask_type_loop)) {
151       ctx.info[idx].exec.pop_back();
152       assert(ctx.info[idx].exec.back().second & mask_type_exact);
153       assert(ctx.info[idx].exec.back().first.size() == bld.lm.size());
154       assert(ctx.info[idx].exec.back().first.isTemp());
155       ctx.info[idx].exec.back().first =
156          bld.copy(Definition(exec, bld.lm), ctx.info[idx].exec.back().first);
157       return;
158    }
159    /* otherwise, we create an exact mask and push to the stack */
160    Operand wqm = ctx.info[idx].exec.back().first;
161    if (wqm.isUndefined()) {
162       wqm = bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.def(s1, scc),
163                      Definition(exec, bld.lm), ctx.info[idx].exec[0].first, Operand(exec, bld.lm));
164    } else {
165       bld.sop2(Builder::s_and, Definition(exec, bld.lm), bld.def(s1, scc),
166                ctx.info[idx].exec[0].first, wqm);
167    }
168    ctx.info[idx].exec.back().first = Operand(wqm);
169    ctx.info[idx].exec.emplace_back(Operand(bld.lm), mask_type_exact);
170 }
171 
172 unsigned
add_coupling_code(exec_ctx & ctx,Block * block,std::vector<aco_ptr<Instruction>> & instructions)173 add_coupling_code(exec_ctx& ctx, Block* block, std::vector<aco_ptr<Instruction>>& instructions)
174 {
175    unsigned idx = block->index;
176    Builder bld(ctx.program, &instructions);
177    std::vector<unsigned>& preds = block->linear_preds;
178    bool restore_exec = false;
179 
180    /* start block */
181    if (preds.empty()) {
182       aco_ptr<Instruction>& startpgm = block->instructions[0];
183       assert(startpgm->opcode == aco_opcode::p_startpgm);
184       bld.insert(std::move(startpgm));
185 
186       unsigned count = 1;
187       if (block->instructions[1]->opcode == aco_opcode::p_init_scratch) {
188          bld.insert(std::move(block->instructions[1]));
189          count++;
190       }
191 
192       Operand start_exec(bld.lm);
193 
194       /* exec seems to need to be manually initialized with combined shaders */
195       if (ctx.program->stage.num_sw_stages() > 1 ||
196           ctx.program->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER ||
197           (ctx.program->stage.sw == SWStage::VS &&
198            (ctx.program->stage.hw == AC_HW_HULL_SHADER ||
199             ctx.program->stage.hw == AC_HW_LEGACY_GEOMETRY_SHADER)) ||
200           (ctx.program->stage.sw == SWStage::TES &&
201            ctx.program->stage.hw == AC_HW_LEGACY_GEOMETRY_SHADER)) {
202          start_exec = Operand::c32_or_c64(-1u, bld.lm == s2);
203          bld.copy(Definition(exec, bld.lm), start_exec);
204       }
205 
206       /* EXEC is automatically initialized by the HW for compute shaders.
207        * We know for sure exec is initially -1 when the shader always has full subgroups.
208        */
209       if (ctx.program->stage == compute_cs && ctx.program->info.cs.uses_full_subgroups)
210          start_exec = Operand::c32_or_c64(-1u, bld.lm == s2);
211 
212       if (ctx.handle_wqm) {
213          ctx.info[idx].exec.emplace_back(start_exec, mask_type_global | mask_type_exact);
214          /* Initialize WQM already */
215          transition_to_WQM(ctx, bld, idx);
216       } else {
217          uint8_t mask = mask_type_global;
218          if (ctx.program->needs_wqm) {
219             bld.sop1(Builder::s_wqm, Definition(exec, bld.lm), bld.def(s1, scc),
220                      Operand(exec, bld.lm));
221             mask |= mask_type_wqm;
222          } else {
223             mask |= mask_type_exact;
224          }
225          ctx.info[idx].exec.emplace_back(start_exec, mask);
226       }
227 
228       return count;
229    }
230 
231    /* loop entry block */
232    if (block->kind & block_kind_loop_header) {
233       assert(preds[0] == idx - 1);
234       ctx.info[idx].exec = ctx.info[idx - 1].exec;
235       loop_info& info = ctx.loop.back();
236       assert(ctx.info[idx].exec.size() == info.num_exec_masks);
237 
238       /* create ssa names for outer exec masks */
239       if (info.has_discard && preds.size() > 1) {
240          aco_ptr<Pseudo_instruction> phi;
241          for (int i = 0; i < info.num_exec_masks - 1; i++) {
242             phi.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi,
243                                                              Format::PSEUDO, preds.size(), 1));
244             phi->definitions[0] = bld.def(bld.lm);
245             phi->operands[0] = get_exec_op(ctx.info[preds[0]].exec[i].first);
246             ctx.info[idx].exec[i].first = bld.insert(std::move(phi));
247          }
248       }
249 
250       ctx.info[idx].exec.back().second |= mask_type_loop;
251 
252       if (info.has_divergent_continue) {
253          /* create ssa name for loop active mask */
254          aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(
255             aco_opcode::p_linear_phi, Format::PSEUDO, preds.size(), 1)};
256          phi->definitions[0] = bld.def(bld.lm);
257          phi->operands[0] = get_exec_op(ctx.info[preds[0]].exec.back().first);
258          ctx.info[idx].exec.back().first = bld.insert(std::move(phi));
259 
260          restore_exec = true;
261          uint8_t mask_type = ctx.info[idx].exec.back().second & (mask_type_wqm | mask_type_exact);
262          ctx.info[idx].exec.emplace_back(ctx.info[idx].exec.back().first, mask_type);
263       }
264 
265    } else if (block->kind & block_kind_loop_exit) {
266       Block* header = ctx.loop.back().loop_header;
267       loop_info& info = ctx.loop.back();
268 
269       for (ASSERTED unsigned pred : preds)
270          assert(ctx.info[pred].exec.size() >= info.num_exec_masks);
271 
272       /* fill the loop header phis */
273       std::vector<unsigned>& header_preds = header->linear_preds;
274       int instr_idx = 0;
275       if (info.has_discard && header_preds.size() > 1) {
276          while (instr_idx < info.num_exec_masks - 1) {
277             aco_ptr<Instruction>& phi = header->instructions[instr_idx];
278             assert(phi->opcode == aco_opcode::p_linear_phi);
279             for (unsigned i = 1; i < phi->operands.size(); i++)
280                phi->operands[i] = get_exec_op(ctx.info[header_preds[i]].exec[instr_idx].first);
281             instr_idx++;
282          }
283       }
284 
285       if (info.has_divergent_continue) {
286          aco_ptr<Instruction>& phi = header->instructions[instr_idx++];
287          assert(phi->opcode == aco_opcode::p_linear_phi);
288          for (unsigned i = 1; i < phi->operands.size(); i++)
289             phi->operands[i] =
290                get_exec_op(ctx.info[header_preds[i]].exec[info.num_exec_masks - 1].first);
291       }
292 
293       if (info.has_divergent_break) {
294          restore_exec = true;
295          /* Drop the loop active mask. */
296          info.num_exec_masks--;
297       }
298       assert(!(block->kind & block_kind_top_level) || info.num_exec_masks <= 2);
299 
300       /* create the loop exit phis if not trivial */
301       for (unsigned exec_idx = 0; exec_idx < info.num_exec_masks; exec_idx++) {
302          Operand same = ctx.info[preds[0]].exec[exec_idx].first;
303          uint8_t type = ctx.info[header_preds[0]].exec[exec_idx].second;
304          bool trivial = true;
305 
306          for (unsigned i = 1; i < preds.size() && trivial; i++) {
307             if (ctx.info[preds[i]].exec[exec_idx].first != same)
308                trivial = false;
309          }
310 
311          if (trivial) {
312             ctx.info[idx].exec.emplace_back(same, type);
313          } else {
314             /* create phi for loop footer */
315             aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(
316                aco_opcode::p_linear_phi, Format::PSEUDO, preds.size(), 1)};
317             phi->definitions[0] = bld.def(bld.lm);
318             for (unsigned i = 0; i < phi->operands.size(); i++)
319                phi->operands[i] = get_exec_op(ctx.info[preds[i]].exec[exec_idx].first);
320             ctx.info[idx].exec.emplace_back(bld.insert(std::move(phi)), type);
321          }
322       }
323 
324       assert(ctx.info[idx].exec.size() == info.num_exec_masks);
325       ctx.loop.pop_back();
326 
327    } else if (preds.size() == 1) {
328       ctx.info[idx].exec = ctx.info[preds[0]].exec;
329    } else {
330       assert(preds.size() == 2);
331       /* if one of the predecessors ends in exact mask, we pop it from stack */
332       unsigned num_exec_masks =
333          std::min(ctx.info[preds[0]].exec.size(), ctx.info[preds[1]].exec.size());
334 
335       if (block->kind & block_kind_merge) {
336          restore_exec = true;
337          num_exec_masks--;
338       }
339       if (block->kind & block_kind_top_level)
340          num_exec_masks = std::min(num_exec_masks, 2u);
341 
342       /* create phis for diverged exec masks */
343       for (unsigned i = 0; i < num_exec_masks; i++) {
344          /* skip trivial phis */
345          if (ctx.info[preds[0]].exec[i].first == ctx.info[preds[1]].exec[i].first) {
346             Operand t = ctx.info[preds[0]].exec[i].first;
347             /* discard/demote can change the state of the current exec mask */
348             assert(!t.isTemp() ||
349                    ctx.info[preds[0]].exec[i].second == ctx.info[preds[1]].exec[i].second);
350             uint8_t mask = ctx.info[preds[0]].exec[i].second & ctx.info[preds[1]].exec[i].second;
351             ctx.info[idx].exec.emplace_back(t, mask);
352             continue;
353          }
354 
355          Temp phi = bld.pseudo(aco_opcode::p_linear_phi, bld.def(bld.lm),
356                                get_exec_op(ctx.info[preds[0]].exec[i].first),
357                                get_exec_op(ctx.info[preds[1]].exec[i].first));
358          uint8_t mask_type = ctx.info[preds[0]].exec[i].second & ctx.info[preds[1]].exec[i].second;
359          ctx.info[idx].exec.emplace_back(phi, mask_type);
360       }
361    }
362 
363    unsigned i = 0;
364    while (block->instructions[i]->opcode == aco_opcode::p_phi ||
365           block->instructions[i]->opcode == aco_opcode::p_linear_phi) {
366       bld.insert(std::move(block->instructions[i]));
367       i++;
368    }
369 
370    if (ctx.handle_wqm) {
371       /* End WQM handling if not needed anymore */
372       if (block->kind & block_kind_top_level && ctx.info[idx].exec.size() == 2) {
373          if (block->instructions[i]->opcode == aco_opcode::p_end_wqm) {
374             ctx.info[idx].exec.back().second |= mask_type_global;
375             transition_to_Exact(ctx, bld, idx);
376             ctx.handle_wqm = false;
377             restore_exec = false;
378             i++;
379          }
380       }
381    }
382 
383    /* restore exec mask after divergent control flow */
384    if (restore_exec) {
385       Operand restore = get_exec_op(ctx.info[idx].exec.back().first);
386       assert(restore.size() == bld.lm.size());
387       bld.copy(Definition(exec, bld.lm), restore);
388       if (!restore.isConstant())
389          ctx.info[idx].exec.back().first = Operand(bld.lm);
390    }
391 
392    return i;
393 }
394 
395 /* Avoid live-range splits in Exact mode:
396  * Because the data register of atomic VMEM instructions
397  * is shared between src and dst, it might be necessary
398  * to create live-range splits during RA.
399  * Make the live-range splits explicit in WQM mode.
400  */
401 void
handle_atomic_data(exec_ctx & ctx,Builder & bld,unsigned block_idx,aco_ptr<Instruction> & instr)402 handle_atomic_data(exec_ctx& ctx, Builder& bld, unsigned block_idx, aco_ptr<Instruction>& instr)
403 {
404    /* check if this is an atomic VMEM instruction */
405    int idx = -1;
406    if (!instr->isVMEM() || instr->definitions.empty())
407       return;
408    else if (instr->isMIMG())
409       idx = instr->operands[2].isTemp() ? 2 : -1;
410    else if (instr->operands.size() == 4)
411       idx = 3;
412 
413    if (idx != -1) {
414       /* insert explicit copy of atomic data in WQM-mode */
415       transition_to_WQM(ctx, bld, block_idx);
416       Temp data = instr->operands[idx].getTemp();
417       data = bld.copy(bld.def(data.regClass()), data);
418       instr->operands[idx].setTemp(data);
419    }
420 }
421 
422 void
process_instructions(exec_ctx & ctx,Block * block,std::vector<aco_ptr<Instruction>> & instructions,unsigned idx)423 process_instructions(exec_ctx& ctx, Block* block, std::vector<aco_ptr<Instruction>>& instructions,
424                      unsigned idx)
425 {
426    block_info& info = ctx.info[block->index];
427    WQMState state;
428    if (info.exec.back().second & mask_type_wqm) {
429       state = WQM;
430    } else {
431       assert(!ctx.handle_wqm || info.exec.back().second & mask_type_exact);
432       state = Exact;
433    }
434 
435    Builder bld(ctx.program, &instructions);
436 
437    for (; idx < block->instructions.size(); idx++) {
438       aco_ptr<Instruction> instr = std::move(block->instructions[idx]);
439 
440       WQMState needs = ctx.handle_wqm ? get_instr_needs(instr) : Unspecified;
441 
442       if (needs == WQM && state != WQM) {
443          transition_to_WQM(ctx, bld, block->index);
444          state = WQM;
445       } else if (needs == Exact) {
446          if (ctx.handle_wqm)
447             handle_atomic_data(ctx, bld, block->index, instr);
448          transition_to_Exact(ctx, bld, block->index);
449          state = Exact;
450       }
451 
452       if (instr->opcode == aco_opcode::p_discard_if) {
453          Operand current_exec = Operand(exec, bld.lm);
454 
455          if (block->instructions[idx + 1]->opcode == aco_opcode::p_end_wqm) {
456             /* Transition to Exact without extra instruction. */
457             info.exec.resize(1);
458             assert(info.exec[0].second == (mask_type_exact | mask_type_global));
459             current_exec = get_exec_op(info.exec[0].first);
460             info.exec[0].first = Operand(bld.lm);
461             state = Exact;
462          } else if (info.exec.size() >= 2 && ctx.handle_wqm) {
463             /* Preserve the WQM mask */
464             info.exec[1].second &= ~mask_type_global;
465          }
466 
467          Temp cond, exit_cond;
468          if (instr->operands[0].isConstant()) {
469             assert(instr->operands[0].constantValue() == -1u);
470             /* save condition and set exec to zero */
471             exit_cond = bld.tmp(s1);
472             cond =
473                bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.scc(Definition(exit_cond)),
474                         Definition(exec, bld.lm), Operand::zero(), Operand(exec, bld.lm));
475          } else {
476             cond = instr->operands[0].getTemp();
477             /* discard from current exec */
478             exit_cond = bld.sop2(Builder::s_andn2, Definition(exec, bld.lm), bld.def(s1, scc),
479                                  current_exec, cond)
480                            .def(1)
481                            .getTemp();
482          }
483 
484          /* discard from inner to outer exec mask on stack */
485          int num = info.exec.size() - 2;
486          for (int i = num; i >= 0; i--) {
487             Instruction* andn2 = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc),
488                                           info.exec[i].first, cond);
489             info.exec[i].first = Operand(andn2->definitions[0].getTemp());
490             exit_cond = andn2->definitions[1].getTemp();
491          }
492 
493          instr->opcode = aco_opcode::p_exit_early_if;
494          instr->operands[0] = bld.scc(exit_cond);
495          assert(!ctx.handle_wqm || (info.exec[0].second & mask_type_wqm) == 0);
496 
497       } else if (instr->opcode == aco_opcode::p_is_helper) {
498          Definition dst = instr->definitions[0];
499          assert(dst.size() == bld.lm.size());
500          if (state == Exact) {
501             instr.reset(create_instruction<SOP1_instruction>(bld.w64or32(Builder::s_mov),
502                                                              Format::SOP1, 1, 1));
503             instr->operands[0] = Operand::zero();
504             instr->definitions[0] = dst;
505          } else {
506             std::pair<Operand, uint8_t>& exact_mask = info.exec[0];
507             assert(exact_mask.second & mask_type_exact);
508 
509             instr.reset(create_instruction<SOP2_instruction>(bld.w64or32(Builder::s_andn2),
510                                                              Format::SOP2, 2, 2));
511             instr->operands[0] = Operand(exec, bld.lm); /* current exec */
512             instr->operands[1] = Operand(exact_mask.first);
513             instr->definitions[0] = dst;
514             instr->definitions[1] = bld.def(s1, scc);
515          }
516       } else if (instr->opcode == aco_opcode::p_demote_to_helper) {
517          assert((info.exec[0].second & mask_type_exact) &&
518                 (info.exec[0].second & mask_type_global));
519 
520          const bool nested_cf = !(info.exec.back().second & mask_type_global);
521          if (ctx.handle_wqm && state == Exact && nested_cf) {
522             /* Transition back to WQM without extra instruction. */
523             info.exec.pop_back();
524             state = WQM;
525          } else if (block->instructions[idx + 1]->opcode == aco_opcode::p_end_wqm) {
526             /* Transition to Exact without extra instruction. */
527             info.exec.resize(1);
528             state = Exact;
529          } else if (nested_cf) {
530             /* Save curent exec temporarily. */
531             info.exec.back().first = bld.copy(bld.def(bld.lm), Operand(exec, bld.lm));
532          }
533 
534          /* Remove invocations from global exact mask. */
535          Definition def = state == Exact ? Definition(exec, bld.lm) : bld.def(bld.lm);
536          Operand src = instr->operands[0].isConstant() ? Operand(exec, bld.lm) : instr->operands[0];
537 
538          Definition exit_cond =
539             bld.sop2(Builder::s_andn2, def, bld.def(s1, scc), get_exec_op(info.exec[0].first), src)
540                .def(1);
541          info.exec[0].first = Operand(def.getTemp());
542 
543          /* Update global WQM mask and store in exec. */
544          if (state == WQM) {
545             assert(info.exec.size() > 1);
546             exit_cond =
547                bld.sop1(Builder::s_wqm, Definition(exec, bld.lm), bld.def(s1, scc), def.getTemp())
548                   .def(1);
549          }
550 
551          /* End shader if global mask is zero. */
552          instr->opcode = aco_opcode::p_exit_early_if;
553          instr->operands[0] = bld.scc(exit_cond.getTemp());
554          bld.insert(std::move(instr));
555 
556          /* Update all other exec masks. */
557          if (nested_cf) {
558             const unsigned global_idx = state == WQM ? 1 : 0;
559             for (unsigned i = global_idx + 1; i < info.exec.size() - 1; i++) {
560                info.exec[i].first =
561                   bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc),
562                            get_exec_op(info.exec[i].first), Operand(exec, bld.lm));
563             }
564             /* Update current exec and save WQM mask. */
565             info.exec[global_idx].first =
566                bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.def(s1, scc),
567                         Definition(exec, bld.lm), info.exec.back().first, Operand(exec, bld.lm));
568             info.exec.back().first = Operand(bld.lm);
569          }
570          continue;
571 
572       } else if (instr->opcode == aco_opcode::p_elect) {
573          bool all_lanes_enabled = info.exec.back().first.constantEquals(-1u);
574          Definition dst = instr->definitions[0];
575 
576          if (all_lanes_enabled) {
577             bld.copy(Definition(dst), Operand::c32_or_c64(1u, dst.size() == 2));
578          } else {
579             Temp first_lane_idx = bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm));
580             bld.sop2(Builder::s_lshl, Definition(dst), bld.def(s1, scc),
581                      Operand::c32_or_c64(1u, dst.size() == 2), Operand(first_lane_idx));
582          }
583          continue;
584       } else if (instr->opcode == aco_opcode::p_end_wqm) {
585          assert(block->kind & block_kind_top_level);
586          assert(info.exec.size() <= 2);
587          /* This instruction indicates the end of WQM mode. */
588          info.exec.back().second |= mask_type_global;
589          transition_to_Exact(ctx, bld, block->index);
590          state = Exact;
591          ctx.handle_wqm = false;
592          continue;
593       }
594 
595       bld.insert(std::move(instr));
596    }
597 }
598 
599 void
add_branch_code(exec_ctx & ctx,Block * block)600 add_branch_code(exec_ctx& ctx, Block* block)
601 {
602    unsigned idx = block->index;
603    Builder bld(ctx.program, block);
604 
605    if (block->linear_succs.empty())
606       return;
607 
608    if (block->kind & block_kind_loop_preheader) {
609       /* collect information about the succeeding loop */
610       bool has_divergent_break = false;
611       bool has_divergent_continue = false;
612       bool has_discard = false;
613       unsigned loop_nest_depth = ctx.program->blocks[idx + 1].loop_nest_depth;
614 
615       for (unsigned i = idx + 1; ctx.program->blocks[i].loop_nest_depth >= loop_nest_depth; i++) {
616          Block& loop_block = ctx.program->blocks[i];
617 
618          if (loop_block.kind & block_kind_uses_discard)
619             has_discard = true;
620          if (loop_block.loop_nest_depth != loop_nest_depth)
621             continue;
622 
623          if (loop_block.kind & block_kind_uniform)
624             continue;
625          else if (loop_block.kind & block_kind_break)
626             has_divergent_break = true;
627          else if (loop_block.kind & block_kind_continue)
628             has_divergent_continue = true;
629       }
630 
631       if (has_divergent_break) {
632          /* save restore exec mask */
633          uint8_t mask = ctx.info[idx].exec.back().second;
634          if (ctx.info[idx].exec.back().first.constantEquals(-1u)) {
635             ctx.info[idx].exec.emplace_back(Operand(exec, bld.lm), mask);
636          } else {
637             bld.reset(bld.instructions, std::prev(bld.instructions->end()));
638             Operand restore = bld.copy(bld.def(bld.lm), Operand(exec, bld.lm));
639             ctx.info[idx].exec.emplace(std::prev(ctx.info[idx].exec.end()), restore, mask);
640             bld.reset(bld.instructions);
641          }
642          ctx.info[idx].exec.back().second &= (mask_type_wqm | mask_type_exact);
643       }
644       unsigned num_exec_masks = ctx.info[idx].exec.size();
645 
646       ctx.loop.emplace_back(&ctx.program->blocks[block->linear_succs[0]], num_exec_masks,
647                             has_divergent_break, has_divergent_continue, has_discard);
648    }
649 
650    /* For normal breaks, this is the exec mask. For discard+break, it's the
651     * old exec mask before it was zero'd.
652     */
653    Operand break_cond = Operand(exec, bld.lm);
654 
655    if (block->kind & block_kind_continue_or_break) {
656       assert(ctx.program->blocks[ctx.program->blocks[block->linear_succs[1]].linear_succs[0]].kind &
657              block_kind_loop_header);
658       assert(ctx.program->blocks[ctx.program->blocks[block->linear_succs[0]].linear_succs[0]].kind &
659              block_kind_loop_exit);
660       assert(block->instructions.back()->opcode == aco_opcode::p_branch);
661       block->instructions.pop_back();
662 
663       bool need_parallelcopy = false;
664       while (!(ctx.info[idx].exec.back().second & mask_type_loop)) {
665          ctx.info[idx].exec.pop_back();
666          need_parallelcopy = true;
667       }
668 
669       if (need_parallelcopy)
670          ctx.info[idx].exec.back().first =
671             bld.copy(Definition(exec, bld.lm), ctx.info[idx].exec.back().first);
672       bld.branch(aco_opcode::p_cbranch_nz, bld.def(s2), Operand(exec, bld.lm),
673                  block->linear_succs[1], block->linear_succs[0]);
674       return;
675    }
676 
677    if (block->kind & block_kind_uniform) {
678       Pseudo_branch_instruction& branch = block->instructions.back()->branch();
679       if (branch.opcode == aco_opcode::p_branch) {
680          branch.target[0] = block->linear_succs[0];
681       } else {
682          branch.target[0] = block->linear_succs[1];
683          branch.target[1] = block->linear_succs[0];
684       }
685       return;
686    }
687 
688    if (block->kind & block_kind_branch) {
689       // orig = s_and_saveexec_b64
690       assert(block->linear_succs.size() == 2);
691       assert(block->instructions.back()->opcode == aco_opcode::p_cbranch_z);
692       Temp cond = block->instructions.back()->operands[0].getTemp();
693       const bool sel_ctrl = block->instructions.back()->branch().selection_control_remove;
694       block->instructions.pop_back();
695 
696       uint8_t mask_type = ctx.info[idx].exec.back().second & (mask_type_wqm | mask_type_exact);
697       if (ctx.info[idx].exec.back().first.constantEquals(-1u)) {
698          bld.copy(Definition(exec, bld.lm), cond);
699       } else {
700          Temp old_exec = bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.def(s1, scc),
701                                   Definition(exec, bld.lm), cond, Operand(exec, bld.lm));
702 
703          ctx.info[idx].exec.back().first = Operand(old_exec);
704       }
705 
706       /* add next current exec to the stack */
707       ctx.info[idx].exec.emplace_back(Operand(bld.lm), mask_type);
708 
709       Builder::Result r = bld.branch(aco_opcode::p_cbranch_z, bld.def(s2), Operand(exec, bld.lm),
710                                      block->linear_succs[1], block->linear_succs[0]);
711       r->branch().selection_control_remove = sel_ctrl;
712       return;
713    }
714 
715    if (block->kind & block_kind_invert) {
716       // exec = s_andn2_b64 (original_exec, exec)
717       assert(block->instructions.back()->opcode == aco_opcode::p_branch);
718       const bool sel_ctrl = block->instructions.back()->branch().selection_control_remove;
719       block->instructions.pop_back();
720       assert(ctx.info[idx].exec.size() >= 2);
721       Operand orig_exec = ctx.info[idx].exec[ctx.info[idx].exec.size() - 2].first;
722       bld.sop2(Builder::s_andn2, Definition(exec, bld.lm), bld.def(s1, scc), orig_exec,
723                Operand(exec, bld.lm));
724 
725       Builder::Result r = bld.branch(aco_opcode::p_cbranch_z, bld.def(s2), Operand(exec, bld.lm),
726                                      block->linear_succs[1], block->linear_succs[0]);
727       r->branch().selection_control_remove = sel_ctrl;
728       return;
729    }
730 
731    if (block->kind & block_kind_break) {
732       // loop_mask = s_andn2_b64 (loop_mask, exec)
733       assert(block->instructions.back()->opcode == aco_opcode::p_branch);
734       block->instructions.pop_back();
735 
736       Temp cond = Temp();
737       for (int exec_idx = ctx.info[idx].exec.size() - 2; exec_idx >= 0; exec_idx--) {
738          cond = bld.tmp(s1);
739          Operand exec_mask = ctx.info[idx].exec[exec_idx].first;
740          exec_mask = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.scc(Definition(cond)),
741                               exec_mask, break_cond);
742          ctx.info[idx].exec[exec_idx].first = exec_mask;
743          if (ctx.info[idx].exec[exec_idx].second & mask_type_loop)
744             break;
745       }
746 
747       /* check if the successor is the merge block, otherwise set exec to 0 */
748       // TODO: this could be done better by directly branching to the merge block
749       unsigned succ_idx = ctx.program->blocks[block->linear_succs[1]].linear_succs[0];
750       Block& succ = ctx.program->blocks[succ_idx];
751       if (!(succ.kind & block_kind_invert || succ.kind & block_kind_merge)) {
752          bld.copy(Definition(exec, bld.lm), Operand::zero(bld.lm.bytes()));
753       }
754 
755       bld.branch(aco_opcode::p_cbranch_nz, bld.def(s2), bld.scc(cond), block->linear_succs[1],
756                  block->linear_succs[0]);
757       return;
758    }
759 
760    if (block->kind & block_kind_continue) {
761       assert(block->instructions.back()->opcode == aco_opcode::p_branch);
762       block->instructions.pop_back();
763 
764       Temp cond = Temp();
765       for (int exec_idx = ctx.info[idx].exec.size() - 2; exec_idx >= 0; exec_idx--) {
766          if (ctx.info[idx].exec[exec_idx].second & mask_type_loop)
767             break;
768          cond = bld.tmp(s1);
769          Operand exec_mask = ctx.info[idx].exec[exec_idx].first;
770          exec_mask = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.scc(Definition(cond)),
771                               exec_mask, Operand(exec, bld.lm));
772          ctx.info[idx].exec[exec_idx].first = exec_mask;
773       }
774       assert(cond != Temp());
775 
776       /* check if the successor is the merge block, otherwise set exec to 0 */
777       // TODO: this could be done better by directly branching to the merge block
778       unsigned succ_idx = ctx.program->blocks[block->linear_succs[1]].linear_succs[0];
779       Block& succ = ctx.program->blocks[succ_idx];
780       if (!(succ.kind & block_kind_invert || succ.kind & block_kind_merge)) {
781          bld.copy(Definition(exec, bld.lm), Operand::zero(bld.lm.bytes()));
782       }
783 
784       bld.branch(aco_opcode::p_cbranch_nz, bld.def(s2), bld.scc(cond), block->linear_succs[1],
785                  block->linear_succs[0]);
786       return;
787    }
788 }
789 
790 void
process_block(exec_ctx & ctx,Block * block)791 process_block(exec_ctx& ctx, Block* block)
792 {
793    std::vector<aco_ptr<Instruction>> instructions;
794    instructions.reserve(block->instructions.size());
795 
796    unsigned idx = add_coupling_code(ctx, block, instructions);
797 
798    assert(!block->linear_succs.empty() || ctx.info[block->index].exec.size() <= 2);
799 
800    process_instructions(ctx, block, instructions, idx);
801 
802    block->instructions = std::move(instructions);
803 
804    add_branch_code(ctx, block);
805 }
806 
807 } /* end namespace */
808 
809 void
insert_exec_mask(Program * program)810 insert_exec_mask(Program* program)
811 {
812    exec_ctx ctx(program);
813 
814    if (program->needs_wqm && program->needs_exact)
815       ctx.handle_wqm = true;
816 
817    for (Block& block : program->blocks)
818       process_block(ctx, &block);
819 }
820 
821 } // namespace aco
822