• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_ir.h"
26 #include "aco_util.h"
27 
28 #include <unordered_map>
29 #include <vector>
30 
31 /*
32  * Implements the algorithm for dominator-tree value numbering
33  * from "Value Numbering" by Briggs, Cooper, and Simpson.
34  */
35 
36 namespace aco {
37 namespace {
38 
39 inline uint32_t
murmur_32_scramble(uint32_t h,uint32_t k)40 murmur_32_scramble(uint32_t h, uint32_t k)
41 {
42    k *= 0xcc9e2d51;
43    k = (k << 15) | (k >> 17);
44    h ^= k * 0x1b873593;
45    h = (h << 13) | (h >> 19);
46    h = h * 5 + 0xe6546b64;
47    return h;
48 }
49 
50 template <typename T>
51 uint32_t
hash_murmur_32(Instruction * instr)52 hash_murmur_32(Instruction* instr)
53 {
54    uint32_t hash = uint32_t(instr->format) << 16 | uint32_t(instr->opcode);
55 
56    for (const Operand& op : instr->operands)
57       hash = murmur_32_scramble(hash, op.constantValue());
58 
59    /* skip format, opcode and pass_flags */
60    for (unsigned i = 2; i < (sizeof(T) >> 2); i++) {
61       uint32_t u;
62       /* Accesses it though a byte array, so doesn't violate the strict aliasing rule */
63       memcpy(&u, reinterpret_cast<uint8_t*>(instr) + i * 4, 4);
64       hash = murmur_32_scramble(hash, u);
65    }
66 
67    /* Finalize. */
68    uint32_t len = instr->operands.size() + instr->definitions.size() + sizeof(T);
69    hash ^= len;
70    hash ^= hash >> 16;
71    hash *= 0x85ebca6b;
72    hash ^= hash >> 13;
73    hash *= 0xc2b2ae35;
74    hash ^= hash >> 16;
75    return hash;
76 }
77 
78 struct InstrHash {
79    /* This hash function uses the Murmur3 algorithm written by Austin Appleby
80     * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
81     *
82     * In order to calculate the expression set, only the right-hand-side of an
83     * instruction is used for the hash, i.e. everything except the definitions.
84     */
operator ()aco::__anonac20af2b0111::InstrHash85    std::size_t operator()(Instruction* instr) const
86    {
87       if (instr->isDPP16())
88          return hash_murmur_32<DPP16_instruction>(instr);
89 
90       if (instr->isDPP8())
91          return hash_murmur_32<DPP8_instruction>(instr);
92 
93       if (instr->isSDWA())
94          return hash_murmur_32<SDWA_instruction>(instr);
95 
96       if (instr->isVINTERP_INREG())
97          return hash_murmur_32<VINTERP_inreg_instruction>(instr);
98 
99       if (instr->isVALU())
100          return hash_murmur_32<VALU_instruction>(instr);
101 
102       switch (instr->format) {
103       case Format::SMEM: return hash_murmur_32<SMEM_instruction>(instr);
104       case Format::VINTRP: return hash_murmur_32<VINTRP_instruction>(instr);
105       case Format::DS: return hash_murmur_32<DS_instruction>(instr);
106       case Format::SOPP: return hash_murmur_32<SOPP_instruction>(instr);
107       case Format::SOPK: return hash_murmur_32<SOPK_instruction>(instr);
108       case Format::EXP: return hash_murmur_32<Export_instruction>(instr);
109       case Format::MUBUF: return hash_murmur_32<MUBUF_instruction>(instr);
110       case Format::MIMG: return hash_murmur_32<MIMG_instruction>(instr);
111       case Format::MTBUF: return hash_murmur_32<MTBUF_instruction>(instr);
112       case Format::FLAT: return hash_murmur_32<FLAT_instruction>(instr);
113       case Format::PSEUDO_BRANCH: return hash_murmur_32<Pseudo_branch_instruction>(instr);
114       case Format::PSEUDO_REDUCTION: return hash_murmur_32<Pseudo_reduction_instruction>(instr);
115       default: return hash_murmur_32<Instruction>(instr);
116       }
117    }
118 };
119 
120 struct InstrPred {
operator ()aco::__anonac20af2b0111::InstrPred121    bool operator()(Instruction* a, Instruction* b) const
122    {
123       if (a->format != b->format)
124          return false;
125       if (a->opcode != b->opcode)
126          return false;
127       if (a->operands.size() != b->operands.size() ||
128           a->definitions.size() != b->definitions.size())
129          return false; /* possible with pseudo-instructions */
130       for (unsigned i = 0; i < a->operands.size(); i++) {
131          if (a->operands[i].isConstant()) {
132             if (!b->operands[i].isConstant())
133                return false;
134             if (a->operands[i].constantValue() != b->operands[i].constantValue())
135                return false;
136          } else if (a->operands[i].isTemp()) {
137             if (!b->operands[i].isTemp())
138                return false;
139             if (a->operands[i].tempId() != b->operands[i].tempId())
140                return false;
141          } else if (a->operands[i].isUndefined() ^ b->operands[i].isUndefined())
142             return false;
143          if (a->operands[i].isFixed()) {
144             if (!b->operands[i].isFixed())
145                return false;
146             if (a->operands[i].physReg() != b->operands[i].physReg())
147                return false;
148             if (a->operands[i].physReg() == exec && a->pass_flags != b->pass_flags)
149                return false;
150          }
151       }
152       for (unsigned i = 0; i < a->definitions.size(); i++) {
153          if (a->definitions[i].isTemp()) {
154             if (!b->definitions[i].isTemp())
155                return false;
156             if (a->definitions[i].regClass() != b->definitions[i].regClass())
157                return false;
158          }
159          if (a->definitions[i].isFixed()) {
160             if (!b->definitions[i].isFixed())
161                return false;
162             if (a->definitions[i].physReg() != b->definitions[i].physReg())
163                return false;
164             if (a->definitions[i].physReg() == exec)
165                return false;
166          }
167       }
168 
169       if (a->isVALU()) {
170          VALU_instruction& aV = a->valu();
171          VALU_instruction& bV = b->valu();
172          if (aV.abs != bV.abs || aV.neg != bV.neg || aV.clamp != bV.clamp || aV.omod != bV.omod ||
173              aV.opsel != bV.opsel || aV.opsel_lo != bV.opsel_lo || aV.opsel_hi != bV.opsel_hi)
174             return false;
175 
176          if (a->opcode == aco_opcode::v_permlane16_b32 ||
177              a->opcode == aco_opcode::v_permlanex16_b32 ||
178              a->opcode == aco_opcode::v_permlane64_b32 ||
179              a->opcode == aco_opcode::v_readfirstlane_b32)
180             return aV.pass_flags == bV.pass_flags;
181       }
182       if (a->isDPP16()) {
183          DPP16_instruction& aDPP = a->dpp16();
184          DPP16_instruction& bDPP = b->dpp16();
185          return aDPP.pass_flags == bDPP.pass_flags && aDPP.dpp_ctrl == bDPP.dpp_ctrl &&
186                 aDPP.bank_mask == bDPP.bank_mask && aDPP.row_mask == bDPP.row_mask &&
187                 aDPP.bound_ctrl == bDPP.bound_ctrl && aDPP.fetch_inactive == bDPP.fetch_inactive;
188       }
189       if (a->isDPP8()) {
190          DPP8_instruction& aDPP = a->dpp8();
191          DPP8_instruction& bDPP = b->dpp8();
192          return aDPP.pass_flags == bDPP.pass_flags && aDPP.lane_sel == bDPP.lane_sel &&
193                 aDPP.fetch_inactive == bDPP.fetch_inactive;
194       }
195       if (a->isSDWA()) {
196          SDWA_instruction& aSDWA = a->sdwa();
197          SDWA_instruction& bSDWA = b->sdwa();
198          return aSDWA.sel[0] == bSDWA.sel[0] && aSDWA.sel[1] == bSDWA.sel[1] &&
199                 aSDWA.dst_sel == bSDWA.dst_sel;
200       }
201 
202       switch (a->format) {
203       case Format::SOP1: {
204          if (a->opcode == aco_opcode::s_sendmsg_rtn_b32 ||
205              a->opcode == aco_opcode::s_sendmsg_rtn_b64)
206             return false;
207          return true;
208       }
209       case Format::SOPK: {
210          if (a->opcode == aco_opcode::s_getreg_b32)
211             return false;
212          SOPK_instruction& aK = a->sopk();
213          SOPK_instruction& bK = b->sopk();
214          return aK.imm == bK.imm;
215       }
216       case Format::SMEM: {
217          SMEM_instruction& aS = a->smem();
218          SMEM_instruction& bS = b->smem();
219          return aS.sync == bS.sync && aS.glc == bS.glc && aS.dlc == bS.dlc && aS.nv == bS.nv &&
220                 aS.disable_wqm == bS.disable_wqm;
221       }
222       case Format::VINTRP: {
223          VINTRP_instruction& aI = a->vintrp();
224          VINTRP_instruction& bI = b->vintrp();
225          if (aI.attribute != bI.attribute)
226             return false;
227          if (aI.component != bI.component)
228             return false;
229          return true;
230       }
231       case Format::VINTERP_INREG: {
232          VINTERP_inreg_instruction& aI = a->vinterp_inreg();
233          VINTERP_inreg_instruction& bI = b->vinterp_inreg();
234          return aI.wait_exp == bI.wait_exp;
235       }
236       case Format::PSEUDO_REDUCTION: {
237          Pseudo_reduction_instruction& aR = a->reduction();
238          Pseudo_reduction_instruction& bR = b->reduction();
239          return aR.pass_flags == bR.pass_flags && aR.reduce_op == bR.reduce_op &&
240                 aR.cluster_size == bR.cluster_size;
241       }
242       case Format::DS: {
243          assert(a->opcode == aco_opcode::ds_bpermute_b32 ||
244                 a->opcode == aco_opcode::ds_permute_b32 || a->opcode == aco_opcode::ds_swizzle_b32);
245          DS_instruction& aD = a->ds();
246          DS_instruction& bD = b->ds();
247          return aD.sync == bD.sync && aD.pass_flags == bD.pass_flags && aD.gds == bD.gds &&
248                 aD.offset0 == bD.offset0 && aD.offset1 == bD.offset1;
249       }
250       case Format::LDSDIR: {
251          LDSDIR_instruction& aD = a->ldsdir();
252          LDSDIR_instruction& bD = b->ldsdir();
253          return aD.sync == bD.sync && aD.attr == bD.attr && aD.attr_chan == bD.attr_chan &&
254                 aD.wait_vdst == bD.wait_vdst;
255       }
256       case Format::MTBUF: {
257          MTBUF_instruction& aM = a->mtbuf();
258          MTBUF_instruction& bM = b->mtbuf();
259          return aM.sync == bM.sync && aM.dfmt == bM.dfmt && aM.nfmt == bM.nfmt &&
260                 aM.offset == bM.offset && aM.offen == bM.offen && aM.idxen == bM.idxen &&
261                 aM.glc == bM.glc && aM.dlc == bM.dlc && aM.slc == bM.slc && aM.tfe == bM.tfe &&
262                 aM.disable_wqm == bM.disable_wqm;
263       }
264       case Format::MUBUF: {
265          MUBUF_instruction& aM = a->mubuf();
266          MUBUF_instruction& bM = b->mubuf();
267          return aM.sync == bM.sync && aM.offset == bM.offset && aM.offen == bM.offen &&
268                 aM.idxen == bM.idxen && aM.glc == bM.glc && aM.dlc == bM.dlc && aM.slc == bM.slc &&
269                 aM.tfe == bM.tfe && aM.lds == bM.lds && aM.disable_wqm == bM.disable_wqm;
270       }
271       case Format::MIMG: {
272          MIMG_instruction& aM = a->mimg();
273          MIMG_instruction& bM = b->mimg();
274          return aM.sync == bM.sync && aM.dmask == bM.dmask && aM.unrm == bM.unrm &&
275                 aM.glc == bM.glc && aM.slc == bM.slc && aM.tfe == bM.tfe && aM.da == bM.da &&
276                 aM.lwe == bM.lwe && aM.r128 == bM.r128 && aM.a16 == bM.a16 && aM.d16 == bM.d16 &&
277                 aM.disable_wqm == bM.disable_wqm;
278       }
279       case Format::FLAT:
280       case Format::GLOBAL:
281       case Format::SCRATCH:
282       case Format::EXP:
283       case Format::SOPP:
284       case Format::PSEUDO_BRANCH:
285       case Format::PSEUDO_BARRIER: unreachable("unsupported instruction format");
286       default: return true;
287       }
288    }
289 };
290 
291 using expr_set = aco::unordered_map<Instruction*, uint32_t, InstrHash, InstrPred>;
292 
293 struct vn_ctx {
294    Program* program;
295    monotonic_buffer_resource m;
296    expr_set expr_values;
297    aco::unordered_map<uint32_t, Temp> renames;
298 
299    /* The exec id should be the same on the same level of control flow depth.
300     * Together with the check for dominator relations, it is safe to assume
301     * that the same exec_id also means the same execution mask.
302     * Discards increment the exec_id, so that it won't return to the previous value.
303     */
304    uint32_t exec_id = 1;
305 
vn_ctxaco::__anonac20af2b0111::vn_ctx306    vn_ctx(Program* program_) : program(program_), m(), expr_values(m), renames(m)
307    {
308       static_assert(sizeof(Temp) == 4, "Temp must fit in 32bits");
309       unsigned size = 0;
310       for (Block& block : program->blocks)
311          size += block.instructions.size();
312       expr_values.reserve(size);
313    }
314 };
315 
316 /* dominates() returns true if the parent block dominates the child block and
317  * if the parent block is part of the same loop or has a smaller loop nest depth.
318  */
319 bool
dominates(vn_ctx & ctx,uint32_t parent,uint32_t child)320 dominates(vn_ctx& ctx, uint32_t parent, uint32_t child)
321 {
322    unsigned parent_loop_nest_depth = ctx.program->blocks[parent].loop_nest_depth;
323    while (parent < child && parent_loop_nest_depth <= ctx.program->blocks[child].loop_nest_depth)
324       child = ctx.program->blocks[child].logical_idom;
325 
326    return parent == child;
327 }
328 
329 /** Returns whether this instruction can safely be removed
330  *  and replaced by an equal expression.
331  *  This is in particular true for ALU instructions and
332  *  read-only memory instructions.
333  *
334  *  Note that expr_set must not be used with instructions
335  *  which cannot be eliminated.
336  */
337 bool
can_eliminate(aco_ptr<Instruction> & instr)338 can_eliminate(aco_ptr<Instruction>& instr)
339 {
340    switch (instr->format) {
341    case Format::FLAT:
342    case Format::GLOBAL:
343    case Format::SCRATCH:
344    case Format::EXP:
345    case Format::SOPP:
346    case Format::PSEUDO_BRANCH:
347    case Format::PSEUDO_BARRIER: return false;
348    case Format::DS:
349       return instr->opcode == aco_opcode::ds_bpermute_b32 ||
350              instr->opcode == aco_opcode::ds_permute_b32 ||
351              instr->opcode == aco_opcode::ds_swizzle_b32;
352    case Format::SMEM:
353    case Format::MUBUF:
354    case Format::MIMG:
355    case Format::MTBUF:
356       if (!get_sync_info(instr.get()).can_reorder())
357          return false;
358       break;
359    default: break;
360    }
361 
362    if (instr->definitions.empty() || instr->opcode == aco_opcode::p_phi ||
363        instr->opcode == aco_opcode::p_linear_phi ||
364        instr->opcode == aco_opcode::p_pops_gfx9_add_exiting_wave_id ||
365        instr->definitions[0].isNoCSE())
366       return false;
367 
368    return true;
369 }
370 
371 void
process_block(vn_ctx & ctx,Block & block)372 process_block(vn_ctx& ctx, Block& block)
373 {
374    std::vector<aco_ptr<Instruction>> new_instructions;
375    new_instructions.reserve(block.instructions.size());
376 
377    for (aco_ptr<Instruction>& instr : block.instructions) {
378       /* first, rename operands */
379       for (Operand& op : instr->operands) {
380          if (!op.isTemp())
381             continue;
382          auto it = ctx.renames.find(op.tempId());
383          if (it != ctx.renames.end())
384             op.setTemp(it->second);
385       }
386 
387       if (instr->opcode == aco_opcode::p_discard_if ||
388           instr->opcode == aco_opcode::p_demote_to_helper || instr->opcode == aco_opcode::p_end_wqm)
389          ctx.exec_id++;
390 
391       if (!can_eliminate(instr)) {
392          new_instructions.emplace_back(std::move(instr));
393          continue;
394       }
395 
396       /* simple copy-propagation through renaming */
397       bool copy_instr =
398          instr->opcode == aco_opcode::p_parallelcopy ||
399          (instr->opcode == aco_opcode::p_create_vector && instr->operands.size() == 1);
400       if (copy_instr && !instr->definitions[0].isFixed() && instr->operands[0].isTemp() &&
401           instr->operands[0].regClass() == instr->definitions[0].regClass()) {
402          ctx.renames[instr->definitions[0].tempId()] = instr->operands[0].getTemp();
403          continue;
404       }
405 
406       instr->pass_flags = ctx.exec_id;
407       std::pair<expr_set::iterator, bool> res = ctx.expr_values.emplace(instr.get(), block.index);
408 
409       /* if there was already an expression with the same value number */
410       if (!res.second) {
411          Instruction* orig_instr = res.first->first;
412          assert(instr->definitions.size() == orig_instr->definitions.size());
413          /* check if the original instruction dominates the current one */
414          if (dominates(ctx, res.first->second, block.index) &&
415              ctx.program->blocks[res.first->second].fp_mode.canReplace(block.fp_mode)) {
416             for (unsigned i = 0; i < instr->definitions.size(); i++) {
417                assert(instr->definitions[i].regClass() == orig_instr->definitions[i].regClass());
418                assert(instr->definitions[i].isTemp());
419                ctx.renames[instr->definitions[i].tempId()] = orig_instr->definitions[i].getTemp();
420                if (instr->definitions[i].isPrecise())
421                   orig_instr->definitions[i].setPrecise(true);
422                /* SPIR_V spec says that an instruction marked with NUW wrapping
423                 * around is undefined behaviour, so we can break additions in
424                 * other contexts.
425                 */
426                if (instr->definitions[i].isNUW())
427                   orig_instr->definitions[i].setNUW(true);
428             }
429          } else {
430             ctx.expr_values.erase(res.first);
431             ctx.expr_values.emplace(instr.get(), block.index);
432             new_instructions.emplace_back(std::move(instr));
433          }
434       } else {
435          new_instructions.emplace_back(std::move(instr));
436       }
437    }
438 
439    block.instructions = std::move(new_instructions);
440 }
441 
442 void
rename_phi_operands(Block & block,aco::unordered_map<uint32_t,Temp> & renames)443 rename_phi_operands(Block& block, aco::unordered_map<uint32_t, Temp>& renames)
444 {
445    for (aco_ptr<Instruction>& phi : block.instructions) {
446       if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
447          break;
448 
449       for (Operand& op : phi->operands) {
450          if (!op.isTemp())
451             continue;
452          auto it = renames.find(op.tempId());
453          if (it != renames.end())
454             op.setTemp(it->second);
455       }
456    }
457 }
458 } /* end namespace */
459 
460 void
value_numbering(Program * program)461 value_numbering(Program* program)
462 {
463    vn_ctx ctx(program);
464    std::vector<unsigned> loop_headers;
465 
466    for (Block& block : program->blocks) {
467       assert(ctx.exec_id > 0);
468       /* decrement exec_id when leaving nested control flow */
469       if (block.kind & block_kind_loop_header)
470          loop_headers.push_back(block.index);
471       if (block.kind & block_kind_merge) {
472          ctx.exec_id--;
473       } else if (block.kind & block_kind_loop_exit) {
474          ctx.exec_id -= program->blocks[loop_headers.back()].linear_preds.size();
475          ctx.exec_id -= block.linear_preds.size();
476          loop_headers.pop_back();
477       }
478 
479       if (block.logical_idom == (int)block.index)
480          ctx.expr_values.clear();
481 
482       if (block.logical_idom != -1)
483          process_block(ctx, block);
484       else
485          rename_phi_operands(block, ctx.renames);
486 
487       /* increment exec_id when entering nested control flow */
488       if (block.kind & block_kind_branch || block.kind & block_kind_loop_preheader ||
489           block.kind & block_kind_break || block.kind & block_kind_continue)
490          ctx.exec_id++;
491       else if (block.kind & block_kind_continue_or_break)
492          ctx.exec_id += 2;
493    }
494 
495    /* rename loop header phi operands */
496    for (Block& block : program->blocks) {
497       if (block.kind & block_kind_loop_header)
498          rename_phi_operands(block, ctx.renames);
499    }
500 }
501 
502 } // namespace aco
503