1 //===- GVNSink.cpp - sink expressions into successors ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file GVNSink.cpp
10 /// This pass attempts to sink instructions into successors, reducing static
11 /// instruction count and enabling if-conversion.
12 ///
13 /// We use a variant of global value numbering to decide what can be sunk.
14 /// Consider:
15 ///
16 /// [ %a1 = add i32 %b, 1 ] [ %c1 = add i32 %d, 1 ]
17 /// [ %a2 = xor i32 %a1, 1 ] [ %c2 = xor i32 %c1, 1 ]
18 /// \ /
19 /// [ %e = phi i32 %a2, %c2 ]
20 /// [ add i32 %e, 4 ]
21 ///
22 ///
23 /// GVN would number %a1 and %c1 differently because they compute different
24 /// results - the VN of an instruction is a function of its opcode and the
25 /// transitive closure of its operands. This is the key property for hoisting
26 /// and CSE.
27 ///
28 /// What we want when sinking however is for a numbering that is a function of
29 /// the *uses* of an instruction, which allows us to answer the question "if I
30 /// replace %a1 with %c1, will it contribute in an equivalent way to all
31 /// successive instructions?". The PostValueTable class in GVN provides this
32 /// mapping.
33 //
34 //===----------------------------------------------------------------------===//
35
36 #include "llvm/ADT/ArrayRef.h"
37 #include "llvm/ADT/DenseMap.h"
38 #include "llvm/ADT/DenseMapInfo.h"
39 #include "llvm/ADT/DenseSet.h"
40 #include "llvm/ADT/Hashing.h"
41 #include "llvm/ADT/None.h"
42 #include "llvm/ADT/Optional.h"
43 #include "llvm/ADT/PostOrderIterator.h"
44 #include "llvm/ADT/STLExtras.h"
45 #include "llvm/ADT/SmallPtrSet.h"
46 #include "llvm/ADT/SmallVector.h"
47 #include "llvm/ADT/Statistic.h"
48 #include "llvm/ADT/StringExtras.h"
49 #include "llvm/Analysis/GlobalsModRef.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/CFG.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/PassManager.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/Use.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Allocator.h"
64 #include "llvm/Support/ArrayRecycler.h"
65 #include "llvm/Support/AtomicOrdering.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/Compiler.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Transforms/Scalar.h"
71 #include "llvm/Transforms/Scalar/GVN.h"
72 #include "llvm/Transforms/Scalar/GVNExpression.h"
73 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
74 #include "llvm/Transforms/Utils/Local.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cstddef>
78 #include <cstdint>
79 #include <iterator>
80 #include <utility>
81
82 using namespace llvm;
83
84 #define DEBUG_TYPE "gvn-sink"
85
86 STATISTIC(NumRemoved, "Number of instructions removed");
87
88 namespace llvm {
89 namespace GVNExpression {
90
dump() const91 LLVM_DUMP_METHOD void Expression::dump() const {
92 print(dbgs());
93 dbgs() << "\n";
94 }
95
96 } // end namespace GVNExpression
97 } // end namespace llvm
98
99 namespace {
100
isMemoryInst(const Instruction * I)101 static bool isMemoryInst(const Instruction *I) {
102 return isa<LoadInst>(I) || isa<StoreInst>(I) ||
103 (isa<InvokeInst>(I) && !cast<InvokeInst>(I)->doesNotAccessMemory()) ||
104 (isa<CallInst>(I) && !cast<CallInst>(I)->doesNotAccessMemory());
105 }
106
107 /// Iterates through instructions in a set of blocks in reverse order from the
108 /// first non-terminator. For example (assume all blocks have size n):
109 /// LockstepReverseIterator I([B1, B2, B3]);
110 /// *I-- = [B1[n], B2[n], B3[n]];
111 /// *I-- = [B1[n-1], B2[n-1], B3[n-1]];
112 /// *I-- = [B1[n-2], B2[n-2], B3[n-2]];
113 /// ...
114 ///
115 /// It continues until all blocks have been exhausted. Use \c getActiveBlocks()
116 /// to
117 /// determine which blocks are still going and the order they appear in the
118 /// list returned by operator*.
119 class LockstepReverseIterator {
120 ArrayRef<BasicBlock *> Blocks;
121 SmallSetVector<BasicBlock *, 4> ActiveBlocks;
122 SmallVector<Instruction *, 4> Insts;
123 bool Fail;
124
125 public:
LockstepReverseIterator(ArrayRef<BasicBlock * > Blocks)126 LockstepReverseIterator(ArrayRef<BasicBlock *> Blocks) : Blocks(Blocks) {
127 reset();
128 }
129
reset()130 void reset() {
131 Fail = false;
132 ActiveBlocks.clear();
133 for (BasicBlock *BB : Blocks)
134 ActiveBlocks.insert(BB);
135 Insts.clear();
136 for (BasicBlock *BB : Blocks) {
137 if (BB->size() <= 1) {
138 // Block wasn't big enough - only contained a terminator.
139 ActiveBlocks.remove(BB);
140 continue;
141 }
142 Insts.push_back(BB->getTerminator()->getPrevNode());
143 }
144 if (Insts.empty())
145 Fail = true;
146 }
147
isValid() const148 bool isValid() const { return !Fail; }
operator *() const149 ArrayRef<Instruction *> operator*() const { return Insts; }
150
151 // Note: This needs to return a SmallSetVector as the elements of
152 // ActiveBlocks will be later copied to Blocks using std::copy. The
153 // resultant order of elements in Blocks needs to be deterministic.
154 // Using SmallPtrSet instead causes non-deterministic order while
155 // copying. And we cannot simply sort Blocks as they need to match the
156 // corresponding Values.
getActiveBlocks()157 SmallSetVector<BasicBlock *, 4> &getActiveBlocks() { return ActiveBlocks; }
158
restrictToBlocks(SmallSetVector<BasicBlock *,4> & Blocks)159 void restrictToBlocks(SmallSetVector<BasicBlock *, 4> &Blocks) {
160 for (auto II = Insts.begin(); II != Insts.end();) {
161 if (std::find(Blocks.begin(), Blocks.end(), (*II)->getParent()) ==
162 Blocks.end()) {
163 ActiveBlocks.remove((*II)->getParent());
164 II = Insts.erase(II);
165 } else {
166 ++II;
167 }
168 }
169 }
170
operator --()171 void operator--() {
172 if (Fail)
173 return;
174 SmallVector<Instruction *, 4> NewInsts;
175 for (auto *Inst : Insts) {
176 if (Inst == &Inst->getParent()->front())
177 ActiveBlocks.remove(Inst->getParent());
178 else
179 NewInsts.push_back(Inst->getPrevNode());
180 }
181 if (NewInsts.empty()) {
182 Fail = true;
183 return;
184 }
185 Insts = NewInsts;
186 }
187 };
188
189 //===----------------------------------------------------------------------===//
190
191 /// Candidate solution for sinking. There may be different ways to
192 /// sink instructions, differing in the number of instructions sunk,
193 /// the number of predecessors sunk from and the number of PHIs
194 /// required.
195 struct SinkingInstructionCandidate {
196 unsigned NumBlocks;
197 unsigned NumInstructions;
198 unsigned NumPHIs;
199 unsigned NumMemoryInsts;
200 int Cost = -1;
201 SmallVector<BasicBlock *, 4> Blocks;
202
calculateCost__anon9d0e72e30111::SinkingInstructionCandidate203 void calculateCost(unsigned NumOrigPHIs, unsigned NumOrigBlocks) {
204 unsigned NumExtraPHIs = NumPHIs - NumOrigPHIs;
205 unsigned SplitEdgeCost = (NumOrigBlocks > NumBlocks) ? 2 : 0;
206 Cost = (NumInstructions * (NumBlocks - 1)) -
207 (NumExtraPHIs *
208 NumExtraPHIs) // PHIs are expensive, so make sure they're worth it.
209 - SplitEdgeCost;
210 }
211
operator >__anon9d0e72e30111::SinkingInstructionCandidate212 bool operator>(const SinkingInstructionCandidate &Other) const {
213 return Cost > Other.Cost;
214 }
215 };
216
217 #ifndef NDEBUG
operator <<(raw_ostream & OS,const SinkingInstructionCandidate & C)218 raw_ostream &operator<<(raw_ostream &OS, const SinkingInstructionCandidate &C) {
219 OS << "<Candidate Cost=" << C.Cost << " #Blocks=" << C.NumBlocks
220 << " #Insts=" << C.NumInstructions << " #PHIs=" << C.NumPHIs << ">";
221 return OS;
222 }
223 #endif
224
225 //===----------------------------------------------------------------------===//
226
227 /// Describes a PHI node that may or may not exist. These track the PHIs
228 /// that must be created if we sunk a sequence of instructions. It provides
229 /// a hash function for efficient equality comparisons.
230 class ModelledPHI {
231 SmallVector<Value *, 4> Values;
232 SmallVector<BasicBlock *, 4> Blocks;
233
234 public:
235 ModelledPHI() = default;
236
ModelledPHI(const PHINode * PN)237 ModelledPHI(const PHINode *PN) {
238 // BasicBlock comes first so we sort by basic block pointer order, then by value pointer order.
239 SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
240 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I)
241 Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)});
242 llvm::sort(Ops);
243 for (auto &P : Ops) {
244 Blocks.push_back(P.first);
245 Values.push_back(P.second);
246 }
247 }
248
249 /// Create a dummy ModelledPHI that will compare unequal to any other ModelledPHI
250 /// without the same ID.
251 /// \note This is specifically for DenseMapInfo - do not use this!
createDummy(size_t ID)252 static ModelledPHI createDummy(size_t ID) {
253 ModelledPHI M;
254 M.Values.push_back(reinterpret_cast<Value*>(ID));
255 return M;
256 }
257
258 /// Create a PHI from an array of incoming values and incoming blocks.
259 template <typename VArray, typename BArray>
ModelledPHI(const VArray & V,const BArray & B)260 ModelledPHI(const VArray &V, const BArray &B) {
261 llvm::copy(V, std::back_inserter(Values));
262 llvm::copy(B, std::back_inserter(Blocks));
263 }
264
265 /// Create a PHI from [I[OpNum] for I in Insts].
266 template <typename BArray>
ModelledPHI(ArrayRef<Instruction * > Insts,unsigned OpNum,const BArray & B)267 ModelledPHI(ArrayRef<Instruction *> Insts, unsigned OpNum, const BArray &B) {
268 llvm::copy(B, std::back_inserter(Blocks));
269 for (auto *I : Insts)
270 Values.push_back(I->getOperand(OpNum));
271 }
272
273 /// Restrict the PHI's contents down to only \c NewBlocks.
274 /// \c NewBlocks must be a subset of \c this->Blocks.
restrictToBlocks(const SmallSetVector<BasicBlock *,4> & NewBlocks)275 void restrictToBlocks(const SmallSetVector<BasicBlock *, 4> &NewBlocks) {
276 auto BI = Blocks.begin();
277 auto VI = Values.begin();
278 while (BI != Blocks.end()) {
279 assert(VI != Values.end());
280 if (std::find(NewBlocks.begin(), NewBlocks.end(), *BI) ==
281 NewBlocks.end()) {
282 BI = Blocks.erase(BI);
283 VI = Values.erase(VI);
284 } else {
285 ++BI;
286 ++VI;
287 }
288 }
289 assert(Blocks.size() == NewBlocks.size());
290 }
291
getValues() const292 ArrayRef<Value *> getValues() const { return Values; }
293
areAllIncomingValuesSame() const294 bool areAllIncomingValuesSame() const {
295 return llvm::all_of(Values, [&](Value *V) { return V == Values[0]; });
296 }
297
areAllIncomingValuesSameType() const298 bool areAllIncomingValuesSameType() const {
299 return llvm::all_of(
300 Values, [&](Value *V) { return V->getType() == Values[0]->getType(); });
301 }
302
areAnyIncomingValuesConstant() const303 bool areAnyIncomingValuesConstant() const {
304 return llvm::any_of(Values, [&](Value *V) { return isa<Constant>(V); });
305 }
306
307 // Hash functor
hash() const308 unsigned hash() const {
309 return (unsigned)hash_combine_range(Values.begin(), Values.end());
310 }
311
operator ==(const ModelledPHI & Other) const312 bool operator==(const ModelledPHI &Other) const {
313 return Values == Other.Values && Blocks == Other.Blocks;
314 }
315 };
316
317 template <typename ModelledPHI> struct DenseMapInfo {
getEmptyKey__anon9d0e72e30111::DenseMapInfo318 static inline ModelledPHI &getEmptyKey() {
319 static ModelledPHI Dummy = ModelledPHI::createDummy(0);
320 return Dummy;
321 }
322
getTombstoneKey__anon9d0e72e30111::DenseMapInfo323 static inline ModelledPHI &getTombstoneKey() {
324 static ModelledPHI Dummy = ModelledPHI::createDummy(1);
325 return Dummy;
326 }
327
getHashValue__anon9d0e72e30111::DenseMapInfo328 static unsigned getHashValue(const ModelledPHI &V) { return V.hash(); }
329
isEqual__anon9d0e72e30111::DenseMapInfo330 static bool isEqual(const ModelledPHI &LHS, const ModelledPHI &RHS) {
331 return LHS == RHS;
332 }
333 };
334
335 using ModelledPHISet = DenseSet<ModelledPHI, DenseMapInfo<ModelledPHI>>;
336
337 //===----------------------------------------------------------------------===//
338 // ValueTable
339 //===----------------------------------------------------------------------===//
340 // This is a value number table where the value number is a function of the
341 // *uses* of a value, rather than its operands. Thus, if VN(A) == VN(B) we know
342 // that the program would be equivalent if we replaced A with PHI(A, B).
343 //===----------------------------------------------------------------------===//
344
345 /// A GVN expression describing how an instruction is used. The operands
346 /// field of BasicExpression is used to store uses, not operands.
347 ///
348 /// This class also contains fields for discriminators used when determining
349 /// equivalence of instructions with sideeffects.
350 class InstructionUseExpr : public GVNExpression::BasicExpression {
351 unsigned MemoryUseOrder = -1;
352 bool Volatile = false;
353
354 public:
InstructionUseExpr(Instruction * I,ArrayRecycler<Value * > & R,BumpPtrAllocator & A)355 InstructionUseExpr(Instruction *I, ArrayRecycler<Value *> &R,
356 BumpPtrAllocator &A)
357 : GVNExpression::BasicExpression(I->getNumUses()) {
358 allocateOperands(R, A);
359 setOpcode(I->getOpcode());
360 setType(I->getType());
361
362 for (auto &U : I->uses())
363 op_push_back(U.getUser());
364 llvm::sort(op_begin(), op_end());
365 }
366
setMemoryUseOrder(unsigned MUO)367 void setMemoryUseOrder(unsigned MUO) { MemoryUseOrder = MUO; }
setVolatile(bool V)368 void setVolatile(bool V) { Volatile = V; }
369
getHashValue() const370 hash_code getHashValue() const override {
371 return hash_combine(GVNExpression::BasicExpression::getHashValue(),
372 MemoryUseOrder, Volatile);
373 }
374
getHashValue(Function MapFn)375 template <typename Function> hash_code getHashValue(Function MapFn) {
376 hash_code H =
377 hash_combine(getOpcode(), getType(), MemoryUseOrder, Volatile);
378 for (auto *V : operands())
379 H = hash_combine(H, MapFn(V));
380 return H;
381 }
382 };
383
384 class ValueTable {
385 DenseMap<Value *, uint32_t> ValueNumbering;
386 DenseMap<GVNExpression::Expression *, uint32_t> ExpressionNumbering;
387 DenseMap<size_t, uint32_t> HashNumbering;
388 BumpPtrAllocator Allocator;
389 ArrayRecycler<Value *> Recycler;
390 uint32_t nextValueNumber = 1;
391
392 /// Create an expression for I based on its opcode and its uses. If I
393 /// touches or reads memory, the expression is also based upon its memory
394 /// order - see \c getMemoryUseOrder().
createExpr(Instruction * I)395 InstructionUseExpr *createExpr(Instruction *I) {
396 InstructionUseExpr *E =
397 new (Allocator) InstructionUseExpr(I, Recycler, Allocator);
398 if (isMemoryInst(I))
399 E->setMemoryUseOrder(getMemoryUseOrder(I));
400
401 if (CmpInst *C = dyn_cast<CmpInst>(I)) {
402 CmpInst::Predicate Predicate = C->getPredicate();
403 E->setOpcode((C->getOpcode() << 8) | Predicate);
404 }
405 return E;
406 }
407
408 /// Helper to compute the value number for a memory instruction
409 /// (LoadInst/StoreInst), including checking the memory ordering and
410 /// volatility.
createMemoryExpr(Inst * I)411 template <class Inst> InstructionUseExpr *createMemoryExpr(Inst *I) {
412 if (isStrongerThanUnordered(I->getOrdering()) || I->isAtomic())
413 return nullptr;
414 InstructionUseExpr *E = createExpr(I);
415 E->setVolatile(I->isVolatile());
416 return E;
417 }
418
419 public:
420 ValueTable() = default;
421
422 /// Returns the value number for the specified value, assigning
423 /// it a new number if it did not have one before.
lookupOrAdd(Value * V)424 uint32_t lookupOrAdd(Value *V) {
425 auto VI = ValueNumbering.find(V);
426 if (VI != ValueNumbering.end())
427 return VI->second;
428
429 if (!isa<Instruction>(V)) {
430 ValueNumbering[V] = nextValueNumber;
431 return nextValueNumber++;
432 }
433
434 Instruction *I = cast<Instruction>(V);
435 InstructionUseExpr *exp = nullptr;
436 switch (I->getOpcode()) {
437 case Instruction::Load:
438 exp = createMemoryExpr(cast<LoadInst>(I));
439 break;
440 case Instruction::Store:
441 exp = createMemoryExpr(cast<StoreInst>(I));
442 break;
443 case Instruction::Call:
444 case Instruction::Invoke:
445 case Instruction::FNeg:
446 case Instruction::Add:
447 case Instruction::FAdd:
448 case Instruction::Sub:
449 case Instruction::FSub:
450 case Instruction::Mul:
451 case Instruction::FMul:
452 case Instruction::UDiv:
453 case Instruction::SDiv:
454 case Instruction::FDiv:
455 case Instruction::URem:
456 case Instruction::SRem:
457 case Instruction::FRem:
458 case Instruction::Shl:
459 case Instruction::LShr:
460 case Instruction::AShr:
461 case Instruction::And:
462 case Instruction::Or:
463 case Instruction::Xor:
464 case Instruction::ICmp:
465 case Instruction::FCmp:
466 case Instruction::Trunc:
467 case Instruction::ZExt:
468 case Instruction::SExt:
469 case Instruction::FPToUI:
470 case Instruction::FPToSI:
471 case Instruction::UIToFP:
472 case Instruction::SIToFP:
473 case Instruction::FPTrunc:
474 case Instruction::FPExt:
475 case Instruction::PtrToInt:
476 case Instruction::IntToPtr:
477 case Instruction::BitCast:
478 case Instruction::Select:
479 case Instruction::ExtractElement:
480 case Instruction::InsertElement:
481 case Instruction::ShuffleVector:
482 case Instruction::InsertValue:
483 case Instruction::GetElementPtr:
484 exp = createExpr(I);
485 break;
486 default:
487 break;
488 }
489
490 if (!exp) {
491 ValueNumbering[V] = nextValueNumber;
492 return nextValueNumber++;
493 }
494
495 uint32_t e = ExpressionNumbering[exp];
496 if (!e) {
497 hash_code H = exp->getHashValue([=](Value *V) { return lookupOrAdd(V); });
498 auto I = HashNumbering.find(H);
499 if (I != HashNumbering.end()) {
500 e = I->second;
501 } else {
502 e = nextValueNumber++;
503 HashNumbering[H] = e;
504 ExpressionNumbering[exp] = e;
505 }
506 }
507 ValueNumbering[V] = e;
508 return e;
509 }
510
511 /// Returns the value number of the specified value. Fails if the value has
512 /// not yet been numbered.
lookup(Value * V) const513 uint32_t lookup(Value *V) const {
514 auto VI = ValueNumbering.find(V);
515 assert(VI != ValueNumbering.end() && "Value not numbered?");
516 return VI->second;
517 }
518
519 /// Removes all value numberings and resets the value table.
clear()520 void clear() {
521 ValueNumbering.clear();
522 ExpressionNumbering.clear();
523 HashNumbering.clear();
524 Recycler.clear(Allocator);
525 nextValueNumber = 1;
526 }
527
528 /// \c Inst uses or touches memory. Return an ID describing the memory state
529 /// at \c Inst such that if getMemoryUseOrder(I1) == getMemoryUseOrder(I2),
530 /// the exact same memory operations happen after I1 and I2.
531 ///
532 /// This is a very hard problem in general, so we use domain-specific
533 /// knowledge that we only ever check for equivalence between blocks sharing a
534 /// single immediate successor that is common, and when determining if I1 ==
535 /// I2 we will have already determined that next(I1) == next(I2). This
536 /// inductive property allows us to simply return the value number of the next
537 /// instruction that defines memory.
getMemoryUseOrder(Instruction * Inst)538 uint32_t getMemoryUseOrder(Instruction *Inst) {
539 auto *BB = Inst->getParent();
540 for (auto I = std::next(Inst->getIterator()), E = BB->end();
541 I != E && !I->isTerminator(); ++I) {
542 if (!isMemoryInst(&*I))
543 continue;
544 if (isa<LoadInst>(&*I))
545 continue;
546 CallInst *CI = dyn_cast<CallInst>(&*I);
547 if (CI && CI->onlyReadsMemory())
548 continue;
549 InvokeInst *II = dyn_cast<InvokeInst>(&*I);
550 if (II && II->onlyReadsMemory())
551 continue;
552 return lookupOrAdd(&*I);
553 }
554 return 0;
555 }
556 };
557
558 //===----------------------------------------------------------------------===//
559
560 class GVNSink {
561 public:
562 GVNSink() = default;
563
run(Function & F)564 bool run(Function &F) {
565 LLVM_DEBUG(dbgs() << "GVNSink: running on function @" << F.getName()
566 << "\n");
567
568 unsigned NumSunk = 0;
569 ReversePostOrderTraversal<Function*> RPOT(&F);
570 for (auto *N : RPOT)
571 NumSunk += sinkBB(N);
572
573 return NumSunk > 0;
574 }
575
576 private:
577 ValueTable VN;
578
isInstructionBlacklisted(Instruction * I)579 bool isInstructionBlacklisted(Instruction *I) {
580 // These instructions may change or break semantics if moved.
581 if (isa<PHINode>(I) || I->isEHPad() || isa<AllocaInst>(I) ||
582 I->getType()->isTokenTy())
583 return true;
584 return false;
585 }
586
587 /// The main heuristic function. Analyze the set of instructions pointed to by
588 /// LRI and return a candidate solution if these instructions can be sunk, or
589 /// None otherwise.
590 Optional<SinkingInstructionCandidate> analyzeInstructionForSinking(
591 LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
592 ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents);
593
594 /// Create a ModelledPHI for each PHI in BB, adding to PHIs.
analyzeInitialPHIs(BasicBlock * BB,ModelledPHISet & PHIs,SmallPtrSetImpl<Value * > & PHIContents)595 void analyzeInitialPHIs(BasicBlock *BB, ModelledPHISet &PHIs,
596 SmallPtrSetImpl<Value *> &PHIContents) {
597 for (PHINode &PN : BB->phis()) {
598 auto MPHI = ModelledPHI(&PN);
599 PHIs.insert(MPHI);
600 for (auto *V : MPHI.getValues())
601 PHIContents.insert(V);
602 }
603 }
604
605 /// The main instruction sinking driver. Set up state and try and sink
606 /// instructions into BBEnd from its predecessors.
607 unsigned sinkBB(BasicBlock *BBEnd);
608
609 /// Perform the actual mechanics of sinking an instruction from Blocks into
610 /// BBEnd, which is their only successor.
611 void sinkLastInstruction(ArrayRef<BasicBlock *> Blocks, BasicBlock *BBEnd);
612
613 /// Remove PHIs that all have the same incoming value.
foldPointlessPHINodes(BasicBlock * BB)614 void foldPointlessPHINodes(BasicBlock *BB) {
615 auto I = BB->begin();
616 while (PHINode *PN = dyn_cast<PHINode>(I++)) {
617 if (!llvm::all_of(PN->incoming_values(), [&](const Value *V) {
618 return V == PN->getIncomingValue(0);
619 }))
620 continue;
621 if (PN->getIncomingValue(0) != PN)
622 PN->replaceAllUsesWith(PN->getIncomingValue(0));
623 else
624 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
625 PN->eraseFromParent();
626 }
627 }
628 };
629
analyzeInstructionForSinking(LockstepReverseIterator & LRI,unsigned & InstNum,unsigned & MemoryInstNum,ModelledPHISet & NeededPHIs,SmallPtrSetImpl<Value * > & PHIContents)630 Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking(
631 LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
632 ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents) {
633 auto Insts = *LRI;
634 LLVM_DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I
635 : Insts) {
636 I->dump();
637 } dbgs() << " ]\n";);
638
639 DenseMap<uint32_t, unsigned> VNums;
640 for (auto *I : Insts) {
641 uint32_t N = VN.lookupOrAdd(I);
642 LLVM_DEBUG(dbgs() << " VN=" << Twine::utohexstr(N) << " for" << *I << "\n");
643 if (N == ~0U)
644 return None;
645 VNums[N]++;
646 }
647 unsigned VNumToSink =
648 std::max_element(VNums.begin(), VNums.end(),
649 [](const std::pair<uint32_t, unsigned> &I,
650 const std::pair<uint32_t, unsigned> &J) {
651 return I.second < J.second;
652 })
653 ->first;
654
655 if (VNums[VNumToSink] == 1)
656 // Can't sink anything!
657 return None;
658
659 // Now restrict the number of incoming blocks down to only those with
660 // VNumToSink.
661 auto &ActivePreds = LRI.getActiveBlocks();
662 unsigned InitialActivePredSize = ActivePreds.size();
663 SmallVector<Instruction *, 4> NewInsts;
664 for (auto *I : Insts) {
665 if (VN.lookup(I) != VNumToSink)
666 ActivePreds.remove(I->getParent());
667 else
668 NewInsts.push_back(I);
669 }
670 for (auto *I : NewInsts)
671 if (isInstructionBlacklisted(I))
672 return None;
673
674 // If we've restricted the incoming blocks, restrict all needed PHIs also
675 // to that set.
676 bool RecomputePHIContents = false;
677 if (ActivePreds.size() != InitialActivePredSize) {
678 ModelledPHISet NewNeededPHIs;
679 for (auto P : NeededPHIs) {
680 P.restrictToBlocks(ActivePreds);
681 NewNeededPHIs.insert(P);
682 }
683 NeededPHIs = NewNeededPHIs;
684 LRI.restrictToBlocks(ActivePreds);
685 RecomputePHIContents = true;
686 }
687
688 // The sunk instruction's results.
689 ModelledPHI NewPHI(NewInsts, ActivePreds);
690
691 // Does sinking this instruction render previous PHIs redundant?
692 if (NeededPHIs.find(NewPHI) != NeededPHIs.end()) {
693 NeededPHIs.erase(NewPHI);
694 RecomputePHIContents = true;
695 }
696
697 if (RecomputePHIContents) {
698 // The needed PHIs have changed, so recompute the set of all needed
699 // values.
700 PHIContents.clear();
701 for (auto &PHI : NeededPHIs)
702 PHIContents.insert(PHI.getValues().begin(), PHI.getValues().end());
703 }
704
705 // Is this instruction required by a later PHI that doesn't match this PHI?
706 // if so, we can't sink this instruction.
707 for (auto *V : NewPHI.getValues())
708 if (PHIContents.count(V))
709 // V exists in this PHI, but the whole PHI is different to NewPHI
710 // (else it would have been removed earlier). We cannot continue
711 // because this isn't representable.
712 return None;
713
714 // Which operands need PHIs?
715 // FIXME: If any of these fail, we should partition up the candidates to
716 // try and continue making progress.
717 Instruction *I0 = NewInsts[0];
718
719 // If all instructions that are going to participate don't have the same
720 // number of operands, we can't do any useful PHI analysis for all operands.
721 auto hasDifferentNumOperands = [&I0](Instruction *I) {
722 return I->getNumOperands() != I0->getNumOperands();
723 };
724 if (any_of(NewInsts, hasDifferentNumOperands))
725 return None;
726
727 for (unsigned OpNum = 0, E = I0->getNumOperands(); OpNum != E; ++OpNum) {
728 ModelledPHI PHI(NewInsts, OpNum, ActivePreds);
729 if (PHI.areAllIncomingValuesSame())
730 continue;
731 if (!canReplaceOperandWithVariable(I0, OpNum))
732 // We can 't create a PHI from this instruction!
733 return None;
734 if (NeededPHIs.count(PHI))
735 continue;
736 if (!PHI.areAllIncomingValuesSameType())
737 return None;
738 // Don't create indirect calls! The called value is the final operand.
739 if ((isa<CallInst>(I0) || isa<InvokeInst>(I0)) && OpNum == E - 1 &&
740 PHI.areAnyIncomingValuesConstant())
741 return None;
742
743 NeededPHIs.reserve(NeededPHIs.size());
744 NeededPHIs.insert(PHI);
745 PHIContents.insert(PHI.getValues().begin(), PHI.getValues().end());
746 }
747
748 if (isMemoryInst(NewInsts[0]))
749 ++MemoryInstNum;
750
751 SinkingInstructionCandidate Cand;
752 Cand.NumInstructions = ++InstNum;
753 Cand.NumMemoryInsts = MemoryInstNum;
754 Cand.NumBlocks = ActivePreds.size();
755 Cand.NumPHIs = NeededPHIs.size();
756 for (auto *C : ActivePreds)
757 Cand.Blocks.push_back(C);
758
759 return Cand;
760 }
761
sinkBB(BasicBlock * BBEnd)762 unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
763 LLVM_DEBUG(dbgs() << "GVNSink: running on basic block ";
764 BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
765 SmallVector<BasicBlock *, 4> Preds;
766 for (auto *B : predecessors(BBEnd)) {
767 auto *T = B->getTerminator();
768 if (isa<BranchInst>(T) || isa<SwitchInst>(T))
769 Preds.push_back(B);
770 else
771 return 0;
772 }
773 if (Preds.size() < 2)
774 return 0;
775 llvm::sort(Preds);
776
777 unsigned NumOrigPreds = Preds.size();
778 // We can only sink instructions through unconditional branches.
779 for (auto I = Preds.begin(); I != Preds.end();) {
780 if ((*I)->getTerminator()->getNumSuccessors() != 1)
781 I = Preds.erase(I);
782 else
783 ++I;
784 }
785
786 LockstepReverseIterator LRI(Preds);
787 SmallVector<SinkingInstructionCandidate, 4> Candidates;
788 unsigned InstNum = 0, MemoryInstNum = 0;
789 ModelledPHISet NeededPHIs;
790 SmallPtrSet<Value *, 4> PHIContents;
791 analyzeInitialPHIs(BBEnd, NeededPHIs, PHIContents);
792 unsigned NumOrigPHIs = NeededPHIs.size();
793
794 while (LRI.isValid()) {
795 auto Cand = analyzeInstructionForSinking(LRI, InstNum, MemoryInstNum,
796 NeededPHIs, PHIContents);
797 if (!Cand)
798 break;
799 Cand->calculateCost(NumOrigPHIs, Preds.size());
800 Candidates.emplace_back(*Cand);
801 --LRI;
802 }
803
804 llvm::stable_sort(Candidates, std::greater<SinkingInstructionCandidate>());
805 LLVM_DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C
806 : Candidates) dbgs()
807 << " " << C << "\n";);
808
809 // Pick the top candidate, as long it is positive!
810 if (Candidates.empty() || Candidates.front().Cost <= 0)
811 return 0;
812 auto C = Candidates.front();
813
814 LLVM_DEBUG(dbgs() << " -- Sinking: " << C << "\n");
815 BasicBlock *InsertBB = BBEnd;
816 if (C.Blocks.size() < NumOrigPreds) {
817 LLVM_DEBUG(dbgs() << " -- Splitting edge to ";
818 BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
819 InsertBB = SplitBlockPredecessors(BBEnd, C.Blocks, ".gvnsink.split");
820 if (!InsertBB) {
821 LLVM_DEBUG(dbgs() << " -- FAILED to split edge!\n");
822 // Edge couldn't be split.
823 return 0;
824 }
825 }
826
827 for (unsigned I = 0; I < C.NumInstructions; ++I)
828 sinkLastInstruction(C.Blocks, InsertBB);
829
830 return C.NumInstructions;
831 }
832
sinkLastInstruction(ArrayRef<BasicBlock * > Blocks,BasicBlock * BBEnd)833 void GVNSink::sinkLastInstruction(ArrayRef<BasicBlock *> Blocks,
834 BasicBlock *BBEnd) {
835 SmallVector<Instruction *, 4> Insts;
836 for (BasicBlock *BB : Blocks)
837 Insts.push_back(BB->getTerminator()->getPrevNode());
838 Instruction *I0 = Insts.front();
839
840 SmallVector<Value *, 4> NewOperands;
841 for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) {
842 bool NeedPHI = llvm::any_of(Insts, [&I0, O](const Instruction *I) {
843 return I->getOperand(O) != I0->getOperand(O);
844 });
845 if (!NeedPHI) {
846 NewOperands.push_back(I0->getOperand(O));
847 continue;
848 }
849
850 // Create a new PHI in the successor block and populate it.
851 auto *Op = I0->getOperand(O);
852 assert(!Op->getType()->isTokenTy() && "Can't PHI tokens!");
853 auto *PN = PHINode::Create(Op->getType(), Insts.size(),
854 Op->getName() + ".sink", &BBEnd->front());
855 for (auto *I : Insts)
856 PN->addIncoming(I->getOperand(O), I->getParent());
857 NewOperands.push_back(PN);
858 }
859
860 // Arbitrarily use I0 as the new "common" instruction; remap its operands
861 // and move it to the start of the successor block.
862 for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O)
863 I0->getOperandUse(O).set(NewOperands[O]);
864 I0->moveBefore(&*BBEnd->getFirstInsertionPt());
865
866 // Update metadata and IR flags.
867 for (auto *I : Insts)
868 if (I != I0) {
869 combineMetadataForCSE(I0, I, true);
870 I0->andIRFlags(I);
871 }
872
873 for (auto *I : Insts)
874 if (I != I0)
875 I->replaceAllUsesWith(I0);
876 foldPointlessPHINodes(BBEnd);
877
878 // Finally nuke all instructions apart from the common instruction.
879 for (auto *I : Insts)
880 if (I != I0)
881 I->eraseFromParent();
882
883 NumRemoved += Insts.size() - 1;
884 }
885
886 ////////////////////////////////////////////////////////////////////////////////
887 // Pass machinery / boilerplate
888
889 class GVNSinkLegacyPass : public FunctionPass {
890 public:
891 static char ID;
892
GVNSinkLegacyPass()893 GVNSinkLegacyPass() : FunctionPass(ID) {
894 initializeGVNSinkLegacyPassPass(*PassRegistry::getPassRegistry());
895 }
896
runOnFunction(Function & F)897 bool runOnFunction(Function &F) override {
898 if (skipFunction(F))
899 return false;
900 GVNSink G;
901 return G.run(F);
902 }
903
getAnalysisUsage(AnalysisUsage & AU) const904 void getAnalysisUsage(AnalysisUsage &AU) const override {
905 AU.addPreserved<GlobalsAAWrapperPass>();
906 }
907 };
908
909 } // end anonymous namespace
910
run(Function & F,FunctionAnalysisManager & AM)911 PreservedAnalyses GVNSinkPass::run(Function &F, FunctionAnalysisManager &AM) {
912 GVNSink G;
913 if (!G.run(F))
914 return PreservedAnalyses::all();
915
916 PreservedAnalyses PA;
917 PA.preserve<GlobalsAA>();
918 return PA;
919 }
920
921 char GVNSinkLegacyPass::ID = 0;
922
923 INITIALIZE_PASS_BEGIN(GVNSinkLegacyPass, "gvn-sink",
924 "Early GVN sinking of Expressions", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)925 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
926 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
927 INITIALIZE_PASS_END(GVNSinkLegacyPass, "gvn-sink",
928 "Early GVN sinking of Expressions", false, false)
929
930 FunctionPass *llvm::createGVNSinkPass() { return new GVNSinkLegacyPass(); }
931