1 //===-- MachineCSE.cpp - Machine Common Subexpression Elimination Pass ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs global common subexpression elimination on machine
11 // instructions using a scoped hash table based value numbering scheme. It
12 // must be run while the machine function is still in SSA form.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #define DEBUG_TYPE "machine-cse"
17 #include "llvm/CodeGen/Passes.h"
18 #include "llvm/CodeGen/MachineDominators.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/ScopedHashTable.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/RecyclingAllocator.h"
29 using namespace llvm;
30
31 STATISTIC(NumCoalesces, "Number of copies coalesced");
32 STATISTIC(NumCSEs, "Number of common subexpression eliminated");
33 STATISTIC(NumPhysCSEs,
34 "Number of physreg referencing common subexpr eliminated");
35 STATISTIC(NumCrossBBCSEs,
36 "Number of cross-MBB physreg referencing CS eliminated");
37 STATISTIC(NumCommutes, "Number of copies coalesced after commuting");
38
39 namespace {
40 class MachineCSE : public MachineFunctionPass {
41 const TargetInstrInfo *TII;
42 const TargetRegisterInfo *TRI;
43 AliasAnalysis *AA;
44 MachineDominatorTree *DT;
45 MachineRegisterInfo *MRI;
46 public:
47 static char ID; // Pass identification
MachineCSE()48 MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) {
49 initializeMachineCSEPass(*PassRegistry::getPassRegistry());
50 }
51
52 virtual bool runOnMachineFunction(MachineFunction &MF);
53
getAnalysisUsage(AnalysisUsage & AU) const54 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
55 AU.setPreservesCFG();
56 MachineFunctionPass::getAnalysisUsage(AU);
57 AU.addRequired<AliasAnalysis>();
58 AU.addPreservedID(MachineLoopInfoID);
59 AU.addRequired<MachineDominatorTree>();
60 AU.addPreserved<MachineDominatorTree>();
61 }
62
releaseMemory()63 virtual void releaseMemory() {
64 ScopeMap.clear();
65 Exps.clear();
66 AllocatableRegs.clear();
67 ReservedRegs.clear();
68 }
69
70 private:
71 const unsigned LookAheadLimit;
72 typedef RecyclingAllocator<BumpPtrAllocator,
73 ScopedHashTableVal<MachineInstr*, unsigned> > AllocatorTy;
74 typedef ScopedHashTable<MachineInstr*, unsigned,
75 MachineInstrExpressionTrait, AllocatorTy> ScopedHTType;
76 typedef ScopedHTType::ScopeTy ScopeType;
77 DenseMap<MachineBasicBlock*, ScopeType*> ScopeMap;
78 ScopedHTType VNT;
79 SmallVector<MachineInstr*, 64> Exps;
80 unsigned CurrVN;
81 BitVector AllocatableRegs;
82 BitVector ReservedRegs;
83
84 bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
85 bool isPhysDefTriviallyDead(unsigned Reg,
86 MachineBasicBlock::const_iterator I,
87 MachineBasicBlock::const_iterator E) const;
88 bool hasLivePhysRegDefUses(const MachineInstr *MI,
89 const MachineBasicBlock *MBB,
90 SmallSet<unsigned,8> &PhysRefs,
91 SmallVector<unsigned,2> &PhysDefs) const;
92 bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
93 SmallSet<unsigned,8> &PhysRefs,
94 SmallVector<unsigned,2> &PhysDefs,
95 bool &NonLocal) const;
96 bool isCSECandidate(MachineInstr *MI);
97 bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
98 MachineInstr *CSMI, MachineInstr *MI);
99 void EnterScope(MachineBasicBlock *MBB);
100 void ExitScope(MachineBasicBlock *MBB);
101 bool ProcessBlock(MachineBasicBlock *MBB);
102 void ExitScopeIfDone(MachineDomTreeNode *Node,
103 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren);
104 bool PerformCSE(MachineDomTreeNode *Node);
105 };
106 } // end anonymous namespace
107
108 char MachineCSE::ID = 0;
109 char &llvm::MachineCSEID = MachineCSE::ID;
110 INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse",
111 "Machine Common Subexpression Elimination", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)112 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
113 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
114 INITIALIZE_PASS_END(MachineCSE, "machine-cse",
115 "Machine Common Subexpression Elimination", false, false)
116
117 bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
118 MachineBasicBlock *MBB) {
119 bool Changed = false;
120 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
121 MachineOperand &MO = MI->getOperand(i);
122 if (!MO.isReg() || !MO.isUse())
123 continue;
124 unsigned Reg = MO.getReg();
125 if (!TargetRegisterInfo::isVirtualRegister(Reg))
126 continue;
127 if (!MRI->hasOneNonDBGUse(Reg))
128 // Only coalesce single use copies. This ensure the copy will be
129 // deleted.
130 continue;
131 MachineInstr *DefMI = MRI->getVRegDef(Reg);
132 if (DefMI->getParent() != MBB)
133 continue;
134 if (!DefMI->isCopy())
135 continue;
136 unsigned SrcReg = DefMI->getOperand(1).getReg();
137 if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
138 continue;
139 if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
140 continue;
141 if (!MRI->constrainRegClass(SrcReg, MRI->getRegClass(Reg)))
142 continue;
143 DEBUG(dbgs() << "Coalescing: " << *DefMI);
144 DEBUG(dbgs() << "*** to: " << *MI);
145 MO.setReg(SrcReg);
146 MRI->clearKillFlags(SrcReg);
147 DefMI->eraseFromParent();
148 ++NumCoalesces;
149 Changed = true;
150 }
151
152 return Changed;
153 }
154
155 bool
isPhysDefTriviallyDead(unsigned Reg,MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator E) const156 MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
157 MachineBasicBlock::const_iterator I,
158 MachineBasicBlock::const_iterator E) const {
159 unsigned LookAheadLeft = LookAheadLimit;
160 while (LookAheadLeft) {
161 // Skip over dbg_value's.
162 while (I != E && I->isDebugValue())
163 ++I;
164
165 if (I == E)
166 // Reached end of block, register is obviously dead.
167 return true;
168
169 bool SeenDef = false;
170 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
171 const MachineOperand &MO = I->getOperand(i);
172 if (MO.isRegMask() && MO.clobbersPhysReg(Reg))
173 SeenDef = true;
174 if (!MO.isReg() || !MO.getReg())
175 continue;
176 if (!TRI->regsOverlap(MO.getReg(), Reg))
177 continue;
178 if (MO.isUse())
179 // Found a use!
180 return false;
181 SeenDef = true;
182 }
183 if (SeenDef)
184 // See a def of Reg (or an alias) before encountering any use, it's
185 // trivially dead.
186 return true;
187
188 --LookAheadLeft;
189 ++I;
190 }
191 return false;
192 }
193
194 /// hasLivePhysRegDefUses - Return true if the specified instruction read/write
195 /// physical registers (except for dead defs of physical registers). It also
196 /// returns the physical register def by reference if it's the only one and the
197 /// instruction does not uses a physical register.
hasLivePhysRegDefUses(const MachineInstr * MI,const MachineBasicBlock * MBB,SmallSet<unsigned,8> & PhysRefs,SmallVector<unsigned,2> & PhysDefs) const198 bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
199 const MachineBasicBlock *MBB,
200 SmallSet<unsigned,8> &PhysRefs,
201 SmallVector<unsigned,2> &PhysDefs) const{
202 MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
203 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
204 const MachineOperand &MO = MI->getOperand(i);
205 if (!MO.isReg())
206 continue;
207 unsigned Reg = MO.getReg();
208 if (!Reg)
209 continue;
210 if (TargetRegisterInfo::isVirtualRegister(Reg))
211 continue;
212 // If the def is dead, it's ok. But the def may not marked "dead". That's
213 // common since this pass is run before livevariables. We can scan
214 // forward a few instructions and check if it is obviously dead.
215 if (MO.isDef() &&
216 (MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end())))
217 continue;
218 // Reading constant physregs is ok.
219 if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
220 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
221 PhysRefs.insert(*AI);
222 if (MO.isDef())
223 PhysDefs.push_back(Reg);
224 }
225
226 return !PhysRefs.empty();
227 }
228
PhysRegDefsReach(MachineInstr * CSMI,MachineInstr * MI,SmallSet<unsigned,8> & PhysRefs,SmallVector<unsigned,2> & PhysDefs,bool & NonLocal) const229 bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
230 SmallSet<unsigned,8> &PhysRefs,
231 SmallVector<unsigned,2> &PhysDefs,
232 bool &NonLocal) const {
233 // For now conservatively returns false if the common subexpression is
234 // not in the same basic block as the given instruction. The only exception
235 // is if the common subexpression is in the sole predecessor block.
236 const MachineBasicBlock *MBB = MI->getParent();
237 const MachineBasicBlock *CSMBB = CSMI->getParent();
238
239 bool CrossMBB = false;
240 if (CSMBB != MBB) {
241 if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB)
242 return false;
243
244 for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
245 if (AllocatableRegs.test(PhysDefs[i]) || ReservedRegs.test(PhysDefs[i]))
246 // Avoid extending live range of physical registers if they are
247 //allocatable or reserved.
248 return false;
249 }
250 CrossMBB = true;
251 }
252 MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I);
253 MachineBasicBlock::const_iterator E = MI;
254 MachineBasicBlock::const_iterator EE = CSMBB->end();
255 unsigned LookAheadLeft = LookAheadLimit;
256 while (LookAheadLeft) {
257 // Skip over dbg_value's.
258 while (I != E && I != EE && I->isDebugValue())
259 ++I;
260
261 if (I == EE) {
262 assert(CrossMBB && "Reaching end-of-MBB without finding MI?");
263 (void)CrossMBB;
264 CrossMBB = false;
265 NonLocal = true;
266 I = MBB->begin();
267 EE = MBB->end();
268 continue;
269 }
270
271 if (I == E)
272 return true;
273
274 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
275 const MachineOperand &MO = I->getOperand(i);
276 // RegMasks go on instructions like calls that clobber lots of physregs.
277 // Don't attempt to CSE across such an instruction.
278 if (MO.isRegMask())
279 return false;
280 if (!MO.isReg() || !MO.isDef())
281 continue;
282 unsigned MOReg = MO.getReg();
283 if (TargetRegisterInfo::isVirtualRegister(MOReg))
284 continue;
285 if (PhysRefs.count(MOReg))
286 return false;
287 }
288
289 --LookAheadLeft;
290 ++I;
291 }
292
293 return false;
294 }
295
isCSECandidate(MachineInstr * MI)296 bool MachineCSE::isCSECandidate(MachineInstr *MI) {
297 if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
298 MI->isKill() || MI->isInlineAsm() || MI->isDebugValue())
299 return false;
300
301 // Ignore copies.
302 if (MI->isCopyLike())
303 return false;
304
305 // Ignore stuff that we obviously can't move.
306 if (MI->mayStore() || MI->isCall() || MI->isTerminator() ||
307 MI->hasUnmodeledSideEffects())
308 return false;
309
310 if (MI->mayLoad()) {
311 // Okay, this instruction does a load. As a refinement, we allow the target
312 // to decide whether the loaded value is actually a constant. If so, we can
313 // actually use it as a load.
314 if (!MI->isInvariantLoad(AA))
315 // FIXME: we should be able to hoist loads with no other side effects if
316 // there are no other instructions which can change memory in this loop.
317 // This is a trivial form of alias analysis.
318 return false;
319 }
320 return true;
321 }
322
323 /// isProfitableToCSE - Return true if it's profitable to eliminate MI with a
324 /// common expression that defines Reg.
isProfitableToCSE(unsigned CSReg,unsigned Reg,MachineInstr * CSMI,MachineInstr * MI)325 bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
326 MachineInstr *CSMI, MachineInstr *MI) {
327 // FIXME: Heuristics that works around the lack the live range splitting.
328
329 // If CSReg is used at all uses of Reg, CSE should not increase register
330 // pressure of CSReg.
331 bool MayIncreasePressure = true;
332 if (TargetRegisterInfo::isVirtualRegister(CSReg) &&
333 TargetRegisterInfo::isVirtualRegister(Reg)) {
334 MayIncreasePressure = false;
335 SmallPtrSet<MachineInstr*, 8> CSUses;
336 for (MachineRegisterInfo::use_nodbg_iterator I =MRI->use_nodbg_begin(CSReg),
337 E = MRI->use_nodbg_end(); I != E; ++I) {
338 MachineInstr *Use = &*I;
339 CSUses.insert(Use);
340 }
341 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
342 E = MRI->use_nodbg_end(); I != E; ++I) {
343 MachineInstr *Use = &*I;
344 if (!CSUses.count(Use)) {
345 MayIncreasePressure = true;
346 break;
347 }
348 }
349 }
350 if (!MayIncreasePressure) return true;
351
352 // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
353 // an immediate predecessor. We don't want to increase register pressure and
354 // end up causing other computation to be spilled.
355 if (MI->isAsCheapAsAMove()) {
356 MachineBasicBlock *CSBB = CSMI->getParent();
357 MachineBasicBlock *BB = MI->getParent();
358 if (CSBB != BB && !CSBB->isSuccessor(BB))
359 return false;
360 }
361
362 // Heuristics #2: If the expression doesn't not use a vr and the only use
363 // of the redundant computation are copies, do not cse.
364 bool HasVRegUse = false;
365 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
366 const MachineOperand &MO = MI->getOperand(i);
367 if (MO.isReg() && MO.isUse() &&
368 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
369 HasVRegUse = true;
370 break;
371 }
372 }
373 if (!HasVRegUse) {
374 bool HasNonCopyUse = false;
375 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
376 E = MRI->use_nodbg_end(); I != E; ++I) {
377 MachineInstr *Use = &*I;
378 // Ignore copies.
379 if (!Use->isCopyLike()) {
380 HasNonCopyUse = true;
381 break;
382 }
383 }
384 if (!HasNonCopyUse)
385 return false;
386 }
387
388 // Heuristics #3: If the common subexpression is used by PHIs, do not reuse
389 // it unless the defined value is already used in the BB of the new use.
390 bool HasPHI = false;
391 SmallPtrSet<MachineBasicBlock*, 4> CSBBs;
392 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg),
393 E = MRI->use_nodbg_end(); I != E; ++I) {
394 MachineInstr *Use = &*I;
395 HasPHI |= Use->isPHI();
396 CSBBs.insert(Use->getParent());
397 }
398
399 if (!HasPHI)
400 return true;
401 return CSBBs.count(MI->getParent());
402 }
403
EnterScope(MachineBasicBlock * MBB)404 void MachineCSE::EnterScope(MachineBasicBlock *MBB) {
405 DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
406 ScopeType *Scope = new ScopeType(VNT);
407 ScopeMap[MBB] = Scope;
408 }
409
ExitScope(MachineBasicBlock * MBB)410 void MachineCSE::ExitScope(MachineBasicBlock *MBB) {
411 DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
412 DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
413 assert(SI != ScopeMap.end());
414 ScopeMap.erase(SI);
415 delete SI->second;
416 }
417
ProcessBlock(MachineBasicBlock * MBB)418 bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
419 bool Changed = false;
420
421 SmallVector<std::pair<unsigned, unsigned>, 8> CSEPairs;
422 SmallVector<unsigned, 2> ImplicitDefsToUpdate;
423 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ) {
424 MachineInstr *MI = &*I;
425 ++I;
426
427 if (!isCSECandidate(MI))
428 continue;
429
430 bool FoundCSE = VNT.count(MI);
431 if (!FoundCSE) {
432 // Look for trivial copy coalescing opportunities.
433 if (PerformTrivialCoalescing(MI, MBB)) {
434 Changed = true;
435
436 // After coalescing MI itself may become a copy.
437 if (MI->isCopyLike())
438 continue;
439 FoundCSE = VNT.count(MI);
440 }
441 }
442
443 // Commute commutable instructions.
444 bool Commuted = false;
445 if (!FoundCSE && MI->isCommutable()) {
446 MachineInstr *NewMI = TII->commuteInstruction(MI);
447 if (NewMI) {
448 Commuted = true;
449 FoundCSE = VNT.count(NewMI);
450 if (NewMI != MI) {
451 // New instruction. It doesn't need to be kept.
452 NewMI->eraseFromParent();
453 Changed = true;
454 } else if (!FoundCSE)
455 // MI was changed but it didn't help, commute it back!
456 (void)TII->commuteInstruction(MI);
457 }
458 }
459
460 // If the instruction defines physical registers and the values *may* be
461 // used, then it's not safe to replace it with a common subexpression.
462 // It's also not safe if the instruction uses physical registers.
463 bool CrossMBBPhysDef = false;
464 SmallSet<unsigned, 8> PhysRefs;
465 SmallVector<unsigned, 2> PhysDefs;
466 if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, PhysDefs)) {
467 FoundCSE = false;
468
469 // ... Unless the CS is local or is in the sole predecessor block
470 // and it also defines the physical register which is not clobbered
471 // in between and the physical register uses were not clobbered.
472 unsigned CSVN = VNT.lookup(MI);
473 MachineInstr *CSMI = Exps[CSVN];
474 if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
475 FoundCSE = true;
476 }
477
478 if (!FoundCSE) {
479 VNT.insert(MI, CurrVN++);
480 Exps.push_back(MI);
481 continue;
482 }
483
484 // Found a common subexpression, eliminate it.
485 unsigned CSVN = VNT.lookup(MI);
486 MachineInstr *CSMI = Exps[CSVN];
487 DEBUG(dbgs() << "Examining: " << *MI);
488 DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI);
489
490 // Check if it's profitable to perform this CSE.
491 bool DoCSE = true;
492 unsigned NumDefs = MI->getDesc().getNumDefs() +
493 MI->getDesc().getNumImplicitDefs();
494
495 for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) {
496 MachineOperand &MO = MI->getOperand(i);
497 if (!MO.isReg() || !MO.isDef())
498 continue;
499 unsigned OldReg = MO.getReg();
500 unsigned NewReg = CSMI->getOperand(i).getReg();
501
502 // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
503 // we should make sure it is not dead at CSMI.
504 if (MO.isImplicit() && !MO.isDead() && CSMI->getOperand(i).isDead())
505 ImplicitDefsToUpdate.push_back(i);
506 if (OldReg == NewReg) {
507 --NumDefs;
508 continue;
509 }
510
511 assert(TargetRegisterInfo::isVirtualRegister(OldReg) &&
512 TargetRegisterInfo::isVirtualRegister(NewReg) &&
513 "Do not CSE physical register defs!");
514
515 if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) {
516 DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
517 DoCSE = false;
518 break;
519 }
520
521 // Don't perform CSE if the result of the old instruction cannot exist
522 // within the register class of the new instruction.
523 const TargetRegisterClass *OldRC = MRI->getRegClass(OldReg);
524 if (!MRI->constrainRegClass(NewReg, OldRC)) {
525 DEBUG(dbgs() << "*** Not the same register class, avoid CSE!\n");
526 DoCSE = false;
527 break;
528 }
529
530 CSEPairs.push_back(std::make_pair(OldReg, NewReg));
531 --NumDefs;
532 }
533
534 // Actually perform the elimination.
535 if (DoCSE) {
536 for (unsigned i = 0, e = CSEPairs.size(); i != e; ++i) {
537 MRI->replaceRegWith(CSEPairs[i].first, CSEPairs[i].second);
538 MRI->clearKillFlags(CSEPairs[i].second);
539 }
540
541 // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
542 // we should make sure it is not dead at CSMI.
543 for (unsigned i = 0, e = ImplicitDefsToUpdate.size(); i != e; ++i)
544 CSMI->getOperand(ImplicitDefsToUpdate[i]).setIsDead(false);
545
546 if (CrossMBBPhysDef) {
547 // Add physical register defs now coming in from a predecessor to MBB
548 // livein list.
549 while (!PhysDefs.empty()) {
550 unsigned LiveIn = PhysDefs.pop_back_val();
551 if (!MBB->isLiveIn(LiveIn))
552 MBB->addLiveIn(LiveIn);
553 }
554 ++NumCrossBBCSEs;
555 }
556
557 MI->eraseFromParent();
558 ++NumCSEs;
559 if (!PhysRefs.empty())
560 ++NumPhysCSEs;
561 if (Commuted)
562 ++NumCommutes;
563 Changed = true;
564 } else {
565 VNT.insert(MI, CurrVN++);
566 Exps.push_back(MI);
567 }
568 CSEPairs.clear();
569 ImplicitDefsToUpdate.clear();
570 }
571
572 return Changed;
573 }
574
575 /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
576 /// dominator tree node if its a leaf or all of its children are done. Walk
577 /// up the dominator tree to destroy ancestors which are now done.
578 void
ExitScopeIfDone(MachineDomTreeNode * Node,DenseMap<MachineDomTreeNode *,unsigned> & OpenChildren)579 MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
580 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren) {
581 if (OpenChildren[Node])
582 return;
583
584 // Pop scope.
585 ExitScope(Node->getBlock());
586
587 // Now traverse upwards to pop ancestors whose offsprings are all done.
588 while (MachineDomTreeNode *Parent = Node->getIDom()) {
589 unsigned Left = --OpenChildren[Parent];
590 if (Left != 0)
591 break;
592 ExitScope(Parent->getBlock());
593 Node = Parent;
594 }
595 }
596
PerformCSE(MachineDomTreeNode * Node)597 bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
598 SmallVector<MachineDomTreeNode*, 32> Scopes;
599 SmallVector<MachineDomTreeNode*, 8> WorkList;
600 DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
601
602 CurrVN = 0;
603
604 // Perform a DFS walk to determine the order of visit.
605 WorkList.push_back(Node);
606 do {
607 Node = WorkList.pop_back_val();
608 Scopes.push_back(Node);
609 const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
610 unsigned NumChildren = Children.size();
611 OpenChildren[Node] = NumChildren;
612 for (unsigned i = 0; i != NumChildren; ++i) {
613 MachineDomTreeNode *Child = Children[i];
614 WorkList.push_back(Child);
615 }
616 } while (!WorkList.empty());
617
618 // Now perform CSE.
619 bool Changed = false;
620 for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
621 MachineDomTreeNode *Node = Scopes[i];
622 MachineBasicBlock *MBB = Node->getBlock();
623 EnterScope(MBB);
624 Changed |= ProcessBlock(MBB);
625 // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
626 ExitScopeIfDone(Node, OpenChildren);
627 }
628
629 return Changed;
630 }
631
runOnMachineFunction(MachineFunction & MF)632 bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
633 TII = MF.getTarget().getInstrInfo();
634 TRI = MF.getTarget().getRegisterInfo();
635 MRI = &MF.getRegInfo();
636 AA = &getAnalysis<AliasAnalysis>();
637 DT = &getAnalysis<MachineDominatorTree>();
638 AllocatableRegs = TRI->getAllocatableSet(MF);
639 ReservedRegs = TRI->getReservedRegs(MF);
640 return PerformCSE(DT->getRootNode());
641 }
642