• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <map>
26 #include <unordered_map>
27 #include "aco_ir.h"
28 
29 /*
30  * Implements the algorithm for dominator-tree value numbering
31  * from "Value Numbering" by Briggs, Cooper, and Simpson.
32  */
33 
34 namespace aco {
35 namespace {
36 
37 inline
murmur_32_scramble(uint32_t h,uint32_t k)38 uint32_t murmur_32_scramble(uint32_t h, uint32_t k) {
39    k *= 0xcc9e2d51;
40    k = (k << 15) | (k >> 17);
41    h ^= k * 0x1b873593;
42    h = (h << 13) | (h >> 19);
43    h = h * 5 + 0xe6546b64;
44    return h;
45 }
46 
47 template<typename T>
hash_murmur_32(Instruction * instr)48 uint32_t hash_murmur_32(Instruction* instr)
49 {
50    uint32_t hash = uint32_t(instr->format) << 16 | uint32_t(instr->opcode);
51 
52    for (const Operand& op : instr->operands)
53       hash = murmur_32_scramble(hash, op.constantValue());
54 
55    /* skip format, opcode and pass_flags */
56    for (unsigned i = 2; i < (sizeof(T) >> 2); i++) {
57       uint32_t u;
58       /* Accesses it though a byte array, so doesn't violate the strict aliasing rule */
59       memcpy(&u, reinterpret_cast<uint8_t *>(instr) + i * 4, 4);
60       hash = murmur_32_scramble(hash, u);
61    }
62 
63    /* Finalize. */
64    uint32_t len = instr->operands.size() + instr->definitions.size() + sizeof(T);
65    hash ^= len;
66    hash ^= hash >> 16;
67    hash *= 0x85ebca6b;
68    hash ^= hash >> 13;
69    hash *= 0xc2b2ae35;
70    hash ^= hash >> 16;
71    return hash;
72 }
73 
74 struct InstrHash {
75    /* This hash function uses the Murmur3 algorithm written by Austin Appleby
76     * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
77     *
78     * In order to calculate the expression set, only the right-hand-side of an
79     * instruction is used for the hash, i.e. everything except the definitions.
80     */
operator ()aco::__anona95545fe0111::InstrHash81    std::size_t operator()(Instruction* instr) const
82    {
83       if (instr->isVOP3())
84          return hash_murmur_32<VOP3A_instruction>(instr);
85 
86       if (instr->isDPP())
87          return hash_murmur_32<DPP_instruction>(instr);
88 
89       if (instr->isSDWA())
90          return hash_murmur_32<SDWA_instruction>(instr);
91 
92       switch (instr->format) {
93       case Format::SMEM:
94          return hash_murmur_32<SMEM_instruction>(instr);
95       case Format::VINTRP:
96          return hash_murmur_32<Interp_instruction>(instr);
97       case Format::DS:
98          return hash_murmur_32<DS_instruction>(instr);
99       case Format::SOPP:
100          return hash_murmur_32<SOPP_instruction>(instr);
101       case Format::SOPK:
102          return hash_murmur_32<SOPK_instruction>(instr);
103       case Format::EXP:
104          return hash_murmur_32<Export_instruction>(instr);
105       case Format::MUBUF:
106          return hash_murmur_32<MUBUF_instruction>(instr);
107       case Format::MIMG:
108          return hash_murmur_32<MIMG_instruction>(instr);
109       case Format::MTBUF:
110          return hash_murmur_32<MTBUF_instruction>(instr);
111       case Format::FLAT:
112          return hash_murmur_32<FLAT_instruction>(instr);
113       case Format::PSEUDO_BRANCH:
114          return hash_murmur_32<Pseudo_branch_instruction>(instr);
115       case Format::PSEUDO_REDUCTION:
116          return hash_murmur_32<Pseudo_reduction_instruction>(instr);
117       default:
118          return hash_murmur_32<Instruction>(instr);
119       }
120    }
121 };
122 
123 struct InstrPred {
operator ()aco::__anona95545fe0111::InstrPred124    bool operator()(Instruction* a, Instruction* b) const
125    {
126       if (a->format != b->format)
127          return false;
128       if (a->opcode != b->opcode)
129          return false;
130       if (a->operands.size() != b->operands.size() || a->definitions.size() != b->definitions.size())
131          return false; /* possible with pseudo-instructions */
132       for (unsigned i = 0; i < a->operands.size(); i++) {
133          if (a->operands[i].isConstant()) {
134             if (!b->operands[i].isConstant())
135                return false;
136             if (a->operands[i].constantValue() != b->operands[i].constantValue())
137                return false;
138          }
139          else if (a->operands[i].isTemp()) {
140             if (!b->operands[i].isTemp())
141                return false;
142             if (a->operands[i].tempId() != b->operands[i].tempId())
143                return false;
144          }
145          else if (a->operands[i].isUndefined() ^ b->operands[i].isUndefined())
146             return false;
147          if (a->operands[i].isFixed()) {
148             if (!b->operands[i].isFixed())
149                return false;
150             if (a->operands[i].physReg() != b->operands[i].physReg())
151                return false;
152             if (a->operands[i].physReg() == exec && a->pass_flags != b->pass_flags)
153                return false;
154          }
155       }
156       for (unsigned i = 0; i < a->definitions.size(); i++) {
157          if (a->definitions[i].isTemp()) {
158             if (!b->definitions[i].isTemp())
159                return false;
160             if (a->definitions[i].regClass() != b->definitions[i].regClass())
161                return false;
162          }
163          if (a->definitions[i].isFixed()) {
164             if (!b->definitions[i].isFixed())
165                return false;
166             if (a->definitions[i].physReg() != b->definitions[i].physReg())
167                return false;
168             if (a->definitions[i].physReg() == exec)
169                return false;
170          }
171       }
172 
173       if (a->opcode == aco_opcode::v_readfirstlane_b32)
174          return a->pass_flags == b->pass_flags;
175 
176       /* The results of VOPC depend on the exec mask if used for subgroup operations. */
177       if ((uint32_t) a->format & (uint32_t) Format::VOPC && a->pass_flags != b->pass_flags)
178          return false;
179 
180       if (a->isVOP3()) {
181          VOP3A_instruction* a3 = static_cast<VOP3A_instruction*>(a);
182          VOP3A_instruction* b3 = static_cast<VOP3A_instruction*>(b);
183          for (unsigned i = 0; i < 3; i++) {
184             if (a3->abs[i] != b3->abs[i] ||
185                 a3->neg[i] != b3->neg[i])
186                return false;
187          }
188          return a3->clamp == b3->clamp &&
189                 a3->omod == b3->omod &&
190                 a3->opsel == b3->opsel;
191       }
192       if (a->isDPP()) {
193          DPP_instruction* aDPP = static_cast<DPP_instruction*>(a);
194          DPP_instruction* bDPP = static_cast<DPP_instruction*>(b);
195          return aDPP->pass_flags == bDPP->pass_flags &&
196                 aDPP->dpp_ctrl == bDPP->dpp_ctrl &&
197                 aDPP->bank_mask == bDPP->bank_mask &&
198                 aDPP->row_mask == bDPP->row_mask &&
199                 aDPP->bound_ctrl == bDPP->bound_ctrl &&
200                 aDPP->abs[0] == bDPP->abs[0] &&
201                 aDPP->abs[1] == bDPP->abs[1] &&
202                 aDPP->neg[0] == bDPP->neg[0] &&
203                 aDPP->neg[1] == bDPP->neg[1];
204       }
205       if (a->isSDWA()) {
206          SDWA_instruction* aSDWA = static_cast<SDWA_instruction*>(a);
207          SDWA_instruction* bSDWA = static_cast<SDWA_instruction*>(b);
208          return aSDWA->sel[0] == bSDWA->sel[0] &&
209                 aSDWA->sel[1] == bSDWA->sel[1] &&
210                 aSDWA->dst_sel == bSDWA->dst_sel &&
211                 aSDWA->abs[0] == bSDWA->abs[0] &&
212                 aSDWA->abs[1] == bSDWA->abs[1] &&
213                 aSDWA->neg[0] == bSDWA->neg[0] &&
214                 aSDWA->neg[1] == bSDWA->neg[1] &&
215                 aSDWA->dst_preserve == bSDWA->dst_preserve &&
216                 aSDWA->clamp == bSDWA->clamp &&
217                 aSDWA->omod == bSDWA->omod;
218       }
219 
220       switch (a->format) {
221          case Format::SOPK: {
222             if (a->opcode == aco_opcode::s_getreg_b32)
223                return false;
224             SOPK_instruction* aK = static_cast<SOPK_instruction*>(a);
225             SOPK_instruction* bK = static_cast<SOPK_instruction*>(b);
226             return aK->imm == bK->imm;
227          }
228          case Format::SMEM: {
229             SMEM_instruction* aS = static_cast<SMEM_instruction*>(a);
230             SMEM_instruction* bS = static_cast<SMEM_instruction*>(b);
231             /* isel shouldn't be creating situations where this assertion fails */
232             assert(aS->prevent_overflow == bS->prevent_overflow);
233             return aS->sync.can_reorder() && bS->sync.can_reorder() &&
234                    aS->sync == bS->sync && aS->glc == bS->glc && aS->dlc == bS->dlc &&
235                    aS->nv == bS->nv && aS->disable_wqm == bS->disable_wqm &&
236                    aS->prevent_overflow == bS->prevent_overflow;
237          }
238          case Format::VINTRP: {
239             Interp_instruction* aI = static_cast<Interp_instruction*>(a);
240             Interp_instruction* bI = static_cast<Interp_instruction*>(b);
241             if (aI->attribute != bI->attribute)
242                return false;
243             if (aI->component != bI->component)
244                return false;
245             return true;
246          }
247          case Format::PSEUDO_REDUCTION: {
248             Pseudo_reduction_instruction *aR = static_cast<Pseudo_reduction_instruction*>(a);
249             Pseudo_reduction_instruction *bR = static_cast<Pseudo_reduction_instruction*>(b);
250             return aR->pass_flags == bR->pass_flags &&
251                    aR->reduce_op == bR->reduce_op &&
252                    aR->cluster_size == bR->cluster_size;
253          }
254          case Format::MTBUF: {
255             MTBUF_instruction* aM = static_cast<MTBUF_instruction *>(a);
256             MTBUF_instruction* bM = static_cast<MTBUF_instruction *>(b);
257             return aM->sync.can_reorder() && bM->sync.can_reorder() &&
258                    aM->sync == bM->sync &&
259                    aM->dfmt == bM->dfmt &&
260                    aM->nfmt == bM->nfmt &&
261                    aM->offset == bM->offset &&
262                    aM->offen == bM->offen &&
263                    aM->idxen == bM->idxen &&
264                    aM->glc == bM->glc &&
265                    aM->dlc == bM->dlc &&
266                    aM->slc == bM->slc &&
267                    aM->tfe == bM->tfe &&
268                    aM->disable_wqm == bM->disable_wqm;
269          }
270          case Format::MUBUF: {
271             MUBUF_instruction* aM = static_cast<MUBUF_instruction *>(a);
272             MUBUF_instruction* bM = static_cast<MUBUF_instruction *>(b);
273             return aM->sync.can_reorder() && bM->sync.can_reorder() &&
274                    aM->sync == bM->sync &&
275                    aM->offset == bM->offset &&
276                    aM->offen == bM->offen &&
277                    aM->idxen == bM->idxen &&
278                    aM->glc == bM->glc &&
279                    aM->dlc == bM->dlc &&
280                    aM->slc == bM->slc &&
281                    aM->tfe == bM->tfe &&
282                    aM->lds == bM->lds &&
283                    aM->disable_wqm == bM->disable_wqm;
284          }
285          /* we want to optimize these in NIR and don't hassle with load-store dependencies */
286          case Format::FLAT:
287          case Format::GLOBAL:
288          case Format::SCRATCH:
289          case Format::EXP:
290          case Format::SOPP:
291          case Format::PSEUDO_BRANCH:
292          case Format::PSEUDO_BARRIER:
293             return false;
294          case Format::DS: {
295             if (a->opcode != aco_opcode::ds_bpermute_b32 &&
296                 a->opcode != aco_opcode::ds_permute_b32 &&
297                 a->opcode != aco_opcode::ds_swizzle_b32)
298                return false;
299             DS_instruction* aD = static_cast<DS_instruction *>(a);
300             DS_instruction* bD = static_cast<DS_instruction *>(b);
301             return aD->sync.can_reorder() && bD->sync.can_reorder() &&
302                    aD->sync == bD->sync &&
303                    aD->pass_flags == bD->pass_flags &&
304                    aD->gds == bD->gds &&
305                    aD->offset0 == bD->offset0 &&
306                    aD->offset1 == bD->offset1;
307          }
308          case Format::MIMG: {
309             MIMG_instruction* aM = static_cast<MIMG_instruction*>(a);
310             MIMG_instruction* bM = static_cast<MIMG_instruction*>(b);
311             return aM->sync.can_reorder() && bM->sync.can_reorder() &&
312                    aM->sync == bM->sync &&
313                    aM->dmask == bM->dmask &&
314                    aM->unrm == bM->unrm &&
315                    aM->glc == bM->glc &&
316                    aM->slc == bM->slc &&
317                    aM->tfe == bM->tfe &&
318                    aM->da == bM->da &&
319                    aM->lwe == bM->lwe &&
320                    aM->r128 == bM->r128 &&
321                    aM->a16 == bM->a16 &&
322                    aM->d16 == bM->d16 &&
323                    aM->disable_wqm == bM->disable_wqm;
324          }
325          default:
326             return true;
327       }
328    }
329 };
330 
331 using expr_set = std::unordered_map<Instruction*, uint32_t, InstrHash, InstrPred>;
332 
333 struct vn_ctx {
334    Program* program;
335    expr_set expr_values;
336    std::map<uint32_t, Temp> renames;
337 
338    /* The exec id should be the same on the same level of control flow depth.
339     * Together with the check for dominator relations, it is safe to assume
340     * that the same exec_id also means the same execution mask.
341     * Discards increment the exec_id, so that it won't return to the previous value.
342     */
343    uint32_t exec_id = 1;
344 
vn_ctxaco::__anona95545fe0111::vn_ctx345    vn_ctx(Program* program) : program(program) {
346       static_assert(sizeof(Temp) == 4, "Temp must fit in 32bits");
347       unsigned size = 0;
348       for (Block& block : program->blocks)
349          size += block.instructions.size();
350       expr_values.reserve(size);
351    }
352 };
353 
354 
355 /* dominates() returns true if the parent block dominates the child block and
356  * if the parent block is part of the same loop or has a smaller loop nest depth.
357  */
dominates(vn_ctx & ctx,uint32_t parent,uint32_t child)358 bool dominates(vn_ctx& ctx, uint32_t parent, uint32_t child)
359 {
360    unsigned parent_loop_nest_depth = ctx.program->blocks[parent].loop_nest_depth;
361    while (parent < child && parent_loop_nest_depth <= ctx.program->blocks[child].loop_nest_depth)
362       child = ctx.program->blocks[child].logical_idom;
363 
364    return parent == child;
365 }
366 
process_block(vn_ctx & ctx,Block & block)367 void process_block(vn_ctx& ctx, Block& block)
368 {
369    std::vector<aco_ptr<Instruction>> new_instructions;
370    new_instructions.reserve(block.instructions.size());
371 
372    for (aco_ptr<Instruction>& instr : block.instructions) {
373       /* first, rename operands */
374       for (Operand& op : instr->operands) {
375          if (!op.isTemp())
376             continue;
377          auto it = ctx.renames.find(op.tempId());
378          if (it != ctx.renames.end())
379             op.setTemp(it->second);
380       }
381 
382       if (instr->opcode == aco_opcode::p_discard_if ||
383           instr->opcode == aco_opcode::p_demote_to_helper)
384          ctx.exec_id++;
385 
386       if (instr->definitions.empty() || instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi) {
387          new_instructions.emplace_back(std::move(instr));
388          continue;
389       }
390 
391       /* simple copy-propagation through renaming */
392       bool copy_instr = instr->opcode == aco_opcode::p_parallelcopy ||
393                         (instr->opcode == aco_opcode::p_create_vector && instr->operands.size() == 1);
394       if (copy_instr && !instr->definitions[0].isFixed() && instr->operands[0].isTemp() &&
395           instr->operands[0].regClass() == instr->definitions[0].regClass()) {
396          ctx.renames[instr->definitions[0].tempId()] = instr->operands[0].getTemp();
397          continue;
398       }
399 
400       instr->pass_flags = ctx.exec_id;
401       std::pair<expr_set::iterator, bool> res = ctx.expr_values.emplace(instr.get(), block.index);
402 
403       /* if there was already an expression with the same value number */
404       if (!res.second) {
405          Instruction* orig_instr = res.first->first;
406          assert(instr->definitions.size() == orig_instr->definitions.size());
407          /* check if the original instruction dominates the current one */
408          if (dominates(ctx, res.first->second, block.index) &&
409              ctx.program->blocks[res.first->second].fp_mode.canReplace(block.fp_mode)) {
410             for (unsigned i = 0; i < instr->definitions.size(); i++) {
411                assert(instr->definitions[i].regClass() == orig_instr->definitions[i].regClass());
412                assert(instr->definitions[i].isTemp());
413                ctx.renames[instr->definitions[i].tempId()] = orig_instr->definitions[i].getTemp();
414                if (instr->definitions[i].isPrecise())
415                   orig_instr->definitions[i].setPrecise(true);
416                /* SPIR_V spec says that an instruction marked with NUW wrapping
417                 * around is undefined behaviour, so we can break additions in
418                 * other contexts.
419                 */
420                if (instr->definitions[i].isNUW())
421                   orig_instr->definitions[i].setNUW(true);
422             }
423          } else {
424             ctx.expr_values.erase(res.first);
425             ctx.expr_values.emplace(instr.get(), block.index);
426             new_instructions.emplace_back(std::move(instr));
427          }
428       } else {
429          new_instructions.emplace_back(std::move(instr));
430       }
431    }
432 
433    block.instructions = std::move(new_instructions);
434 }
435 
rename_phi_operands(Block & block,std::map<uint32_t,Temp> & renames)436 void rename_phi_operands(Block& block, std::map<uint32_t, Temp>& renames)
437 {
438    for (aco_ptr<Instruction>& phi : block.instructions) {
439       if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
440          break;
441 
442       for (Operand& op : phi->operands) {
443          if (!op.isTemp())
444             continue;
445          auto it = renames.find(op.tempId());
446          if (it != renames.end())
447             op.setTemp(it->second);
448       }
449    }
450 }
451 } /* end namespace */
452 
453 
value_numbering(Program * program)454 void value_numbering(Program* program)
455 {
456    vn_ctx ctx(program);
457    std::vector<unsigned> loop_headers;
458 
459    for (Block& block : program->blocks) {
460       assert(ctx.exec_id > 0);
461       /* decrement exec_id when leaving nested control flow */
462       if (block.kind & block_kind_loop_header)
463          loop_headers.push_back(block.index);
464       if (block.kind & block_kind_merge) {
465          ctx.exec_id--;
466       } else if (block.kind & block_kind_loop_exit) {
467          ctx.exec_id -= program->blocks[loop_headers.back()].linear_preds.size();
468          ctx.exec_id -= block.linear_preds.size();
469          loop_headers.pop_back();
470       }
471 
472       if (block.logical_idom != -1)
473          process_block(ctx, block);
474       else
475          rename_phi_operands(block, ctx.renames);
476 
477       /* increment exec_id when entering nested control flow */
478       if (block.kind & block_kind_branch ||
479           block.kind & block_kind_loop_preheader ||
480           block.kind & block_kind_break ||
481           block.kind & block_kind_continue ||
482           block.kind & block_kind_discard)
483          ctx.exec_id++;
484       else if (block.kind & block_kind_continue_or_break)
485          ctx.exec_id += 2;
486    }
487 
488    /* rename loop header phi operands */
489    for (Block& block : program->blocks) {
490       if (block.kind & block_kind_loop_header)
491          rename_phi_operands(block, ctx.renames);
492    }
493 }
494 
495 }
496