• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- llvm/CodeGen/Rewriter.cpp -  Rewriter -----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "VirtRegMap.h"
13 #include "llvm/Function.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 using namespace llvm;
28 
29 STATISTIC(NumDSE     , "Number of dead stores elided");
30 STATISTIC(NumDSS     , "Number of dead spill slots removed");
31 STATISTIC(NumCommutes, "Number of instructions commuted");
32 STATISTIC(NumDRM     , "Number of re-materializable defs elided");
33 STATISTIC(NumStores  , "Number of stores added");
34 STATISTIC(NumPSpills , "Number of physical register spills");
35 STATISTIC(NumOmitted , "Number of reloads omitted");
36 STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
37 STATISTIC(NumCopified, "Number of available reloads turned into copies");
38 STATISTIC(NumReMats  , "Number of re-materialization");
39 STATISTIC(NumLoads   , "Number of loads added");
40 STATISTIC(NumReused  , "Number of values reused");
41 STATISTIC(NumDCE     , "Number of copies elided");
42 STATISTIC(NumSUnfold , "Number of stores unfolded");
43 STATISTIC(NumModRefUnfold, "Number of modref unfolded");
44 
45 namespace {
46   enum RewriterName { local, trivial };
47 }
48 
49 static cl::opt<RewriterName>
50 RewriterOpt("rewriter",
51             cl::desc("Rewriter to use (default=local)"),
52             cl::Prefix,
53             cl::values(clEnumVal(local,   "local rewriter"),
54                        clEnumVal(trivial, "trivial rewriter"),
55                        clEnumValEnd),
56             cl::init(local));
57 
58 static cl::opt<bool>
59 ScheduleSpills("schedule-spills",
60                cl::desc("Schedule spill code"),
61                cl::init(false));
62 
~VirtRegRewriter()63 VirtRegRewriter::~VirtRegRewriter() {}
64 
65 /// substitutePhysReg - Replace virtual register in MachineOperand with a
66 /// physical register. Do the right thing with the sub-register index.
67 /// Note that operands may be added, so the MO reference is no longer valid.
substitutePhysReg(MachineOperand & MO,unsigned Reg,const TargetRegisterInfo & TRI)68 static void substitutePhysReg(MachineOperand &MO, unsigned Reg,
69                               const TargetRegisterInfo &TRI) {
70   if (MO.getSubReg()) {
71     MO.substPhysReg(Reg, TRI);
72 
73     // Any kill flags apply to the full virtual register, so they also apply to
74     // the full physical register.
75     // We assume that partial defs have already been decorated with a super-reg
76     // <imp-def> operand by LiveIntervals.
77     MachineInstr &MI = *MO.getParent();
78     if (MO.isUse() && !MO.isUndef() &&
79         (MO.isKill() || MI.isRegTiedToDefOperand(&MO-&MI.getOperand(0))))
80       MI.addRegisterKilled(Reg, &TRI, /*AddIfNotFound=*/ true);
81   } else {
82     MO.setReg(Reg);
83   }
84 }
85 
86 namespace {
87 
88 /// This class is intended for use with the new spilling framework only. It
89 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
90 /// spill code.
91 struct TrivialRewriter : public VirtRegRewriter {
92 
runOnMachineFunction__anona7c589420211::TrivialRewriter93   bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
94                             LiveIntervals* LIs) {
95     DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
96     DEBUG(dbgs() << "********** Function: "
97           << MF.getFunction()->getName() << '\n');
98     DEBUG(dbgs() << "**** Machine Instrs"
99           << "(NOTE! Does not include spills and reloads!) ****\n");
100     DEBUG(MF.dump());
101 
102     MachineRegisterInfo *mri = &MF.getRegInfo();
103     const TargetRegisterInfo *tri = MF.getTarget().getRegisterInfo();
104 
105     bool changed = false;
106 
107     for (LiveIntervals::iterator liItr = LIs->begin(), liEnd = LIs->end();
108          liItr != liEnd; ++liItr) {
109 
110       const LiveInterval *li = liItr->second;
111       unsigned reg = li->reg;
112 
113       if (TargetRegisterInfo::isPhysicalRegister(reg)) {
114         if (!li->empty())
115           mri->setPhysRegUsed(reg);
116       }
117       else {
118         if (!VRM.hasPhys(reg))
119           continue;
120         unsigned pReg = VRM.getPhys(reg);
121         mri->setPhysRegUsed(pReg);
122         // Copy the register use-list before traversing it.
123         SmallVector<std::pair<MachineInstr*, unsigned>, 32> reglist;
124         for (MachineRegisterInfo::reg_iterator I = mri->reg_begin(reg),
125                E = mri->reg_end(); I != E; ++I)
126           reglist.push_back(std::make_pair(&*I, I.getOperandNo()));
127         for (unsigned N=0; N != reglist.size(); ++N)
128           substitutePhysReg(reglist[N].first->getOperand(reglist[N].second),
129                             pReg, *tri);
130         changed |= !reglist.empty();
131       }
132     }
133 
134     DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
135     DEBUG(MF.dump());
136 
137     return changed;
138   }
139 
140 };
141 
142 }
143 
144 // ************************************************************************ //
145 
146 namespace {
147 
148 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
149 /// from top down, keep track of which spill slots or remat are available in
150 /// each register.
151 ///
152 /// Note that not all physregs are created equal here.  In particular, some
153 /// physregs are reloads that we are allowed to clobber or ignore at any time.
154 /// Other physregs are values that the register allocated program is using
155 /// that we cannot CHANGE, but we can read if we like.  We keep track of this
156 /// on a per-stack-slot / remat id basis as the low bit in the value of the
157 /// SpillSlotsAvailable entries.  The predicate 'canClobberPhysReg()' checks
158 /// this bit and addAvailable sets it if.
159 class AvailableSpills {
160   const TargetRegisterInfo *TRI;
161   const TargetInstrInfo *TII;
162 
163   // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
164   // or remat'ed virtual register values that are still available, due to
165   // being loaded or stored to, but not invalidated yet.
166   std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
167 
168   // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
169   // indicating which stack slot values are currently held by a physreg.  This
170   // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
171   // physreg is modified.
172   std::multimap<unsigned, int> PhysRegsAvailable;
173 
174   void disallowClobberPhysRegOnly(unsigned PhysReg);
175 
176   void ClobberPhysRegOnly(unsigned PhysReg);
177 public:
AvailableSpills(const TargetRegisterInfo * tri,const TargetInstrInfo * tii)178   AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
179     : TRI(tri), TII(tii) {
180   }
181 
182   /// clear - Reset the state.
clear()183   void clear() {
184     SpillSlotsOrReMatsAvailable.clear();
185     PhysRegsAvailable.clear();
186   }
187 
getRegInfo() const188   const TargetRegisterInfo *getRegInfo() const { return TRI; }
189 
190   /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
191   /// available in a physical register, return that PhysReg, otherwise
192   /// return 0.
getSpillSlotOrReMatPhysReg(int Slot) const193   unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
194     std::map<int, unsigned>::const_iterator I =
195       SpillSlotsOrReMatsAvailable.find(Slot);
196     if (I != SpillSlotsOrReMatsAvailable.end()) {
197       return I->second >> 1;  // Remove the CanClobber bit.
198     }
199     return 0;
200   }
201 
202   /// addAvailable - Mark that the specified stack slot / remat is available
203   /// in the specified physreg.  If CanClobber is true, the physreg can be
204   /// modified at any time without changing the semantics of the program.
addAvailable(int SlotOrReMat,unsigned Reg,bool CanClobber=true)205   void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
206     // If this stack slot is thought to be available in some other physreg,
207     // remove its record.
208     ModifyStackSlotOrReMat(SlotOrReMat);
209 
210     PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
211     SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) |
212                                               (unsigned)CanClobber;
213 
214     if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
215       DEBUG(dbgs() << "Remembering RM#"
216                    << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1);
217     else
218       DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat);
219     DEBUG(dbgs() << " in physreg " << TRI->getName(Reg)
220           << (CanClobber ? " canclobber" : "") << "\n");
221   }
222 
223   /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
224   /// the value of the specified stackslot register if it desires. The
225   /// specified stack slot must be available in a physreg for this query to
226   /// make sense.
canClobberPhysRegForSS(int SlotOrReMat) const227   bool canClobberPhysRegForSS(int SlotOrReMat) const {
228     assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
229            "Value not available!");
230     return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
231   }
232 
233   /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
234   /// physical register where values for some stack slot(s) might be
235   /// available.
canClobberPhysReg(unsigned PhysReg) const236   bool canClobberPhysReg(unsigned PhysReg) const {
237     std::multimap<unsigned, int>::const_iterator I =
238       PhysRegsAvailable.lower_bound(PhysReg);
239     while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
240       int SlotOrReMat = I->second;
241       I++;
242       if (!canClobberPhysRegForSS(SlotOrReMat))
243         return false;
244     }
245     return true;
246   }
247 
248   /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
249   /// stackslot register. The register is still available but is no longer
250   /// allowed to be modifed.
251   void disallowClobberPhysReg(unsigned PhysReg);
252 
253   /// ClobberPhysReg - This is called when the specified physreg changes
254   /// value.  We use this to invalidate any info about stuff that lives in
255   /// it and any of its aliases.
256   void ClobberPhysReg(unsigned PhysReg);
257 
258   /// ModifyStackSlotOrReMat - This method is called when the value in a stack
259   /// slot changes.  This removes information about which register the
260   /// previous value for this slot lives in (as the previous value is dead
261   /// now).
262   void ModifyStackSlotOrReMat(int SlotOrReMat);
263 
264   /// ClobberSharingStackSlots - When a register mapped to a stack slot changes,
265   /// other stack slots sharing the same register are no longer valid.
266   void ClobberSharingStackSlots(int StackSlot);
267 
268   /// AddAvailableRegsToLiveIn - Availability information is being kept coming
269   /// into the specified MBB. Add available physical registers as potential
270   /// live-in's. If they are reused in the MBB, they will be added to the
271   /// live-in set to make register scavenger and post-allocation scheduler.
272   void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
273                                 std::vector<MachineOperand*> &KillOps);
274 };
275 
276 }
277 
278 // ************************************************************************ //
279 
280 // Given a location where a reload of a spilled register or a remat of
281 // a constant is to be inserted, attempt to find a safe location to
282 // insert the load at an earlier point in the basic-block, to hide
283 // latency of the load and to avoid address-generation interlock
284 // issues.
285 static MachineBasicBlock::iterator
ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,MachineBasicBlock::iterator const Begin,unsigned PhysReg,const TargetRegisterInfo * TRI,bool DoReMat,int SSorRMId,const TargetInstrInfo * TII,const MachineFunction & MF)286 ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,
287                  MachineBasicBlock::iterator const Begin,
288                  unsigned PhysReg,
289                  const TargetRegisterInfo *TRI,
290                  bool DoReMat,
291                  int SSorRMId,
292                  const TargetInstrInfo *TII,
293                  const MachineFunction &MF)
294 {
295   if (!ScheduleSpills)
296     return InsertLoc;
297 
298   // Spill backscheduling is of primary interest to addresses, so
299   // don't do anything if the register isn't in the register class
300   // used for pointers.
301 
302   const TargetLowering *TL = MF.getTarget().getTargetLowering();
303 
304   if (!TL->isTypeLegal(TL->getPointerTy()))
305     // Believe it or not, this is true on 16-bit targets like PIC16.
306     return InsertLoc;
307 
308   const TargetRegisterClass *ptrRegClass =
309     TL->getRegClassFor(TL->getPointerTy());
310   if (!ptrRegClass->contains(PhysReg))
311     return InsertLoc;
312 
313   // Scan upwards through the preceding instructions. If an instruction doesn't
314   // reference the stack slot or the register we're loading, we can
315   // backschedule the reload up past it.
316   MachineBasicBlock::iterator NewInsertLoc = InsertLoc;
317   while (NewInsertLoc != Begin) {
318     MachineBasicBlock::iterator Prev = prior(NewInsertLoc);
319     for (unsigned i = 0; i < Prev->getNumOperands(); ++i) {
320       MachineOperand &Op = Prev->getOperand(i);
321       if (!DoReMat && Op.isFI() && Op.getIndex() == SSorRMId)
322         goto stop;
323     }
324     if (Prev->findRegisterUseOperandIdx(PhysReg) != -1 ||
325         Prev->findRegisterDefOperand(PhysReg))
326       goto stop;
327     for (const unsigned *Alias = TRI->getAliasSet(PhysReg); *Alias; ++Alias)
328       if (Prev->findRegisterUseOperandIdx(*Alias) != -1 ||
329           Prev->findRegisterDefOperand(*Alias))
330         goto stop;
331     NewInsertLoc = Prev;
332   }
333 stop:;
334 
335   // If we made it to the beginning of the block, turn around and move back
336   // down just past any existing reloads. They're likely to be reloads/remats
337   // for instructions earlier than what our current reload/remat is for, so
338   // they should be scheduled earlier.
339   if (NewInsertLoc == Begin) {
340     int FrameIdx;
341     while (InsertLoc != NewInsertLoc &&
342            (TII->isLoadFromStackSlot(NewInsertLoc, FrameIdx) ||
343             TII->isTriviallyReMaterializable(NewInsertLoc)))
344       ++NewInsertLoc;
345   }
346 
347   return NewInsertLoc;
348 }
349 
350 namespace {
351 
352 // ReusedOp - For each reused operand, we keep track of a bit of information,
353 // in case we need to rollback upon processing a new operand.  See comments
354 // below.
355 struct ReusedOp {
356   // The MachineInstr operand that reused an available value.
357   unsigned Operand;
358 
359   // StackSlotOrReMat - The spill slot or remat id of the value being reused.
360   unsigned StackSlotOrReMat;
361 
362   // PhysRegReused - The physical register the value was available in.
363   unsigned PhysRegReused;
364 
365   // AssignedPhysReg - The physreg that was assigned for use by the reload.
366   unsigned AssignedPhysReg;
367 
368   // VirtReg - The virtual register itself.
369   unsigned VirtReg;
370 
ReusedOp__anona7c589420411::ReusedOp371   ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
372            unsigned vreg)
373     : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
374       AssignedPhysReg(apr), VirtReg(vreg) {}
375 };
376 
377 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
378 /// is reused instead of reloaded.
379 class ReuseInfo {
380   MachineInstr &MI;
381   std::vector<ReusedOp> Reuses;
382   BitVector PhysRegsClobbered;
383 public:
ReuseInfo(MachineInstr & mi,const TargetRegisterInfo * tri)384   ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
385     PhysRegsClobbered.resize(tri->getNumRegs());
386   }
387 
hasReuses() const388   bool hasReuses() const {
389     return !Reuses.empty();
390   }
391 
392   /// addReuse - If we choose to reuse a virtual register that is already
393   /// available instead of reloading it, remember that we did so.
addReuse(unsigned OpNo,unsigned StackSlotOrReMat,unsigned PhysRegReused,unsigned AssignedPhysReg,unsigned VirtReg)394   void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
395                 unsigned PhysRegReused, unsigned AssignedPhysReg,
396                 unsigned VirtReg) {
397     // If the reload is to the assigned register anyway, no undo will be
398     // required.
399     if (PhysRegReused == AssignedPhysReg) return;
400 
401     // Otherwise, remember this.
402     Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
403                               AssignedPhysReg, VirtReg));
404   }
405 
markClobbered(unsigned PhysReg)406   void markClobbered(unsigned PhysReg) {
407     PhysRegsClobbered.set(PhysReg);
408   }
409 
isClobbered(unsigned PhysReg) const410   bool isClobbered(unsigned PhysReg) const {
411     return PhysRegsClobbered.test(PhysReg);
412   }
413 
414   /// GetRegForReload - We are about to emit a reload into PhysReg.  If there
415   /// is some other operand that is using the specified register, either pick
416   /// a new register to use, or evict the previous reload and use this reg.
417   unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg,
418                            MachineFunction &MF, MachineInstr *MI,
419                            AvailableSpills &Spills,
420                            std::vector<MachineInstr*> &MaybeDeadStores,
421                            SmallSet<unsigned, 8> &Rejected,
422                            BitVector &RegKills,
423                            std::vector<MachineOperand*> &KillOps,
424                            VirtRegMap &VRM);
425 
426   /// GetRegForReload - Helper for the above GetRegForReload(). Add a
427   /// 'Rejected' set to remember which registers have been considered and
428   /// rejected for the reload. This avoids infinite looping in case like
429   /// this:
430   /// t1 := op t2, t3
431   /// t2 <- assigned r0 for use by the reload but ended up reuse r1
432   /// t3 <- assigned r1 for use by the reload but ended up reuse r0
433   /// t1 <- desires r1
434   ///       sees r1 is taken by t2, tries t2's reload register r0
435   ///       sees r0 is taken by t3, tries t3's reload register r1
436   ///       sees r1 is taken by t2, tries t2's reload register r0 ...
GetRegForReload(unsigned VirtReg,unsigned PhysReg,MachineInstr * MI,AvailableSpills & Spills,std::vector<MachineInstr * > & MaybeDeadStores,BitVector & RegKills,std::vector<MachineOperand * > & KillOps,VirtRegMap & VRM)437   unsigned GetRegForReload(unsigned VirtReg, unsigned PhysReg, MachineInstr *MI,
438                            AvailableSpills &Spills,
439                            std::vector<MachineInstr*> &MaybeDeadStores,
440                            BitVector &RegKills,
441                            std::vector<MachineOperand*> &KillOps,
442                            VirtRegMap &VRM) {
443     SmallSet<unsigned, 8> Rejected;
444     MachineFunction &MF = *MI->getParent()->getParent();
445     const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
446     return GetRegForReload(RC, PhysReg, MF, MI, Spills, MaybeDeadStores,
447                            Rejected, RegKills, KillOps, VRM);
448   }
449 };
450 
451 }
452 
453 // ****************** //
454 // Utility Functions  //
455 // ****************** //
456 
457 /// findSinglePredSuccessor - Return via reference a vector of machine basic
458 /// blocks each of which is a successor of the specified BB and has no other
459 /// predecessor.
findSinglePredSuccessor(MachineBasicBlock * MBB,SmallVectorImpl<MachineBasicBlock * > & Succs)460 static void findSinglePredSuccessor(MachineBasicBlock *MBB,
461                                    SmallVectorImpl<MachineBasicBlock *> &Succs){
462   for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
463          SE = MBB->succ_end(); SI != SE; ++SI) {
464     MachineBasicBlock *SuccMBB = *SI;
465     if (SuccMBB->pred_size() == 1)
466       Succs.push_back(SuccMBB);
467   }
468 }
469 
470 /// ResurrectConfirmedKill - Helper for ResurrectKill. This register is killed
471 /// but not re-defined and it's being reused. Remove the kill flag for the
472 /// register and unset the kill's marker and last kill operand.
ResurrectConfirmedKill(unsigned Reg,const TargetRegisterInfo * TRI,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)473 static void ResurrectConfirmedKill(unsigned Reg, const TargetRegisterInfo* TRI,
474                                    BitVector &RegKills,
475                                    std::vector<MachineOperand*> &KillOps) {
476   DEBUG(dbgs() << "Resurrect " << TRI->getName(Reg) << "\n");
477 
478   MachineOperand *KillOp = KillOps[Reg];
479   KillOp->setIsKill(false);
480   // KillOps[Reg] might be a def of a super-register.
481   unsigned KReg = KillOp->getReg();
482   if (!RegKills[KReg])
483     return;
484 
485   assert(KillOps[KReg]->getParent() == KillOp->getParent() &&
486          "invalid superreg kill flags");
487   KillOps[KReg] = NULL;
488   RegKills.reset(KReg);
489 
490   // If it's a def of a super-register. Its other sub-regsters are no
491   // longer killed as well.
492   for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
493     DEBUG(dbgs() << "  Resurrect subreg " << TRI->getName(*SR) << "\n");
494 
495     assert(KillOps[*SR]->getParent() == KillOp->getParent() &&
496            "invalid subreg kill flags");
497     KillOps[*SR] = NULL;
498     RegKills.reset(*SR);
499   }
500 }
501 
502 /// ResurrectKill - Invalidate kill info associated with a previous MI. An
503 /// optimization may have decided that it's safe to reuse a previously killed
504 /// register. If we fail to erase the invalid kill flags, then the register
505 /// scavenger may later clobber the register used by this MI. Note that this
506 /// must be done even if this MI is being deleted! Consider:
507 ///
508 /// USE $r1 (vreg1) <kill>
509 /// ...
510 /// $r1(vreg3) = COPY $r1 (vreg2)
511 ///
512 /// RegAlloc has smartly assigned all three vregs to the same physreg. Initially
513 /// vreg1's only use is a kill. The rewriter doesn't know it should be live
514 /// until it rewrites vreg2. At that points it sees that the copy is dead and
515 /// deletes it. However, deleting the copy implicitly forwards liveness of $r1
516 /// (it's copy coalescing). We must resurrect $r1 by removing the kill flag at
517 /// vreg1 before deleting the copy.
ResurrectKill(MachineInstr & MI,unsigned Reg,const TargetRegisterInfo * TRI,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)518 static void ResurrectKill(MachineInstr &MI, unsigned Reg,
519                           const TargetRegisterInfo* TRI, BitVector &RegKills,
520                           std::vector<MachineOperand*> &KillOps) {
521   if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
522     ResurrectConfirmedKill(Reg, TRI, RegKills, KillOps);
523     return;
524   }
525   // No previous kill for this reg. Check for subreg kills as well.
526   // d4 =
527   // store d4, fi#0
528   // ...
529   //    = s8<kill>
530   // ...
531   //    = d4  <avoiding reload>
532   for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
533     unsigned SReg = *SR;
534     if (RegKills[SReg] && KillOps[SReg]->getParent() != &MI)
535       ResurrectConfirmedKill(SReg, TRI, RegKills, KillOps);
536   }
537 }
538 
539 /// InvalidateKills - MI is going to be deleted. If any of its operands are
540 /// marked kill, then invalidate the information.
InvalidateKills(MachineInstr & MI,const TargetRegisterInfo * TRI,BitVector & RegKills,std::vector<MachineOperand * > & KillOps,SmallVector<unsigned,2> * KillRegs=NULL)541 static void InvalidateKills(MachineInstr &MI,
542                             const TargetRegisterInfo* TRI,
543                             BitVector &RegKills,
544                             std::vector<MachineOperand*> &KillOps,
545                             SmallVector<unsigned, 2> *KillRegs = NULL) {
546   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
547     MachineOperand &MO = MI.getOperand(i);
548     if (!MO.isReg() || !MO.isUse() || !MO.isKill() || MO.isUndef())
549       continue;
550     unsigned Reg = MO.getReg();
551     if (TargetRegisterInfo::isVirtualRegister(Reg))
552       continue;
553     if (KillRegs)
554       KillRegs->push_back(Reg);
555     assert(Reg < KillOps.size());
556     if (KillOps[Reg] == &MO) {
557       // This operand was the kill, now no longer.
558       KillOps[Reg] = NULL;
559       RegKills.reset(Reg);
560       for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
561         if (RegKills[*SR]) {
562           assert(KillOps[*SR] == &MO && "bad subreg kill flags");
563           KillOps[*SR] = NULL;
564           RegKills.reset(*SR);
565         }
566       }
567     }
568     else {
569       // This operand may have reused a previously killed reg. Keep it live in
570       // case it continues to be used after erasing this instruction.
571       ResurrectKill(MI, Reg, TRI, RegKills, KillOps);
572     }
573   }
574 }
575 
576 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
577 /// (since its spill instruction is removed), mark it isDead. Also checks if
578 /// the def MI has other definition operands that are not dead. Returns it by
579 /// reference.
InvalidateRegDef(MachineBasicBlock::iterator I,MachineInstr & NewDef,unsigned Reg,bool & HasLiveDef,const TargetRegisterInfo * TRI)580 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
581                              MachineInstr &NewDef, unsigned Reg,
582                              bool &HasLiveDef,
583                              const TargetRegisterInfo *TRI) {
584   // Due to remat, it's possible this reg isn't being reused. That is,
585   // the def of this reg (by prev MI) is now dead.
586   MachineInstr *DefMI = I;
587   MachineOperand *DefOp = NULL;
588   for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
589     MachineOperand &MO = DefMI->getOperand(i);
590     if (!MO.isReg() || !MO.isDef() || !MO.isKill() || MO.isUndef())
591       continue;
592     if (MO.getReg() == Reg)
593       DefOp = &MO;
594     else if (!MO.isDead())
595       HasLiveDef = true;
596   }
597   if (!DefOp)
598     return false;
599 
600   bool FoundUse = false, Done = false;
601   MachineBasicBlock::iterator E = &NewDef;
602   ++I; ++E;
603   for (; !Done && I != E; ++I) {
604     MachineInstr *NMI = I;
605     for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
606       MachineOperand &MO = NMI->getOperand(j);
607       if (!MO.isReg() || MO.getReg() == 0 ||
608           (MO.getReg() != Reg && !TRI->isSubRegister(Reg, MO.getReg())))
609         continue;
610       if (MO.isUse())
611         FoundUse = true;
612       Done = true; // Stop after scanning all the operands of this MI.
613     }
614   }
615   if (!FoundUse) {
616     // Def is dead!
617     DefOp->setIsDead();
618     return true;
619   }
620   return false;
621 }
622 
623 /// UpdateKills - Track and update kill info. If a MI reads a register that is
624 /// marked kill, then it must be due to register reuse. Transfer the kill info
625 /// over.
UpdateKills(MachineInstr & MI,const TargetRegisterInfo * TRI,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)626 static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
627                         BitVector &RegKills,
628                         std::vector<MachineOperand*> &KillOps) {
629   // These do not affect kill info at all.
630   if (MI.isDebugValue())
631     return;
632   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
633     MachineOperand &MO = MI.getOperand(i);
634     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
635       continue;
636     unsigned Reg = MO.getReg();
637     if (Reg == 0)
638       continue;
639 
640     // This operand may have reused a previously killed reg. Keep it live.
641     ResurrectKill(MI, Reg, TRI, RegKills, KillOps);
642 
643     if (MO.isKill()) {
644       RegKills.set(Reg);
645       KillOps[Reg] = &MO;
646       for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
647         RegKills.set(*SR);
648         KillOps[*SR] = &MO;
649       }
650     }
651   }
652 
653   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
654     const MachineOperand &MO = MI.getOperand(i);
655     if (!MO.isReg() || !MO.getReg() || !MO.isDef())
656       continue;
657     unsigned Reg = MO.getReg();
658     RegKills.reset(Reg);
659     KillOps[Reg] = NULL;
660     // It also defines (or partially define) aliases.
661     for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
662       RegKills.reset(*SR);
663       KillOps[*SR] = NULL;
664     }
665     for (const unsigned *SR = TRI->getSuperRegisters(Reg); *SR; ++SR) {
666       RegKills.reset(*SR);
667       KillOps[*SR] = NULL;
668     }
669   }
670 }
671 
672 /// ReMaterialize - Re-materialize definition for Reg targeting DestReg.
673 ///
ReMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MII,unsigned DestReg,unsigned Reg,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI,VirtRegMap & VRM)674 static void ReMaterialize(MachineBasicBlock &MBB,
675                           MachineBasicBlock::iterator &MII,
676                           unsigned DestReg, unsigned Reg,
677                           const TargetInstrInfo *TII,
678                           const TargetRegisterInfo *TRI,
679                           VirtRegMap &VRM) {
680   MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
681 #ifndef NDEBUG
682   const MCInstrDesc &MCID = ReMatDefMI->getDesc();
683   assert(MCID.getNumDefs() == 1 &&
684          "Don't know how to remat instructions that define > 1 values!");
685 #endif
686   TII->reMaterialize(MBB, MII, DestReg, 0, ReMatDefMI, *TRI);
687   MachineInstr *NewMI = prior(MII);
688   for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
689     MachineOperand &MO = NewMI->getOperand(i);
690     if (!MO.isReg() || MO.getReg() == 0)
691       continue;
692     unsigned VirtReg = MO.getReg();
693     if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
694       continue;
695     assert(MO.isUse());
696     unsigned Phys = VRM.getPhys(VirtReg);
697     assert(Phys && "Virtual register is not assigned a register?");
698     substitutePhysReg(MO, Phys, *TRI);
699   }
700   ++NumReMats;
701 }
702 
703 /// findSuperReg - Find the SubReg's super-register of given register class
704 /// where its SubIdx sub-register is SubReg.
findSuperReg(const TargetRegisterClass * RC,unsigned SubReg,unsigned SubIdx,const TargetRegisterInfo * TRI)705 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
706                              unsigned SubIdx, const TargetRegisterInfo *TRI) {
707   for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
708        I != E; ++I) {
709     unsigned Reg = *I;
710     if (TRI->getSubReg(Reg, SubIdx) == SubReg)
711       return Reg;
712   }
713   return 0;
714 }
715 
716 // ******************************** //
717 // Available Spills Implementation  //
718 // ******************************** //
719 
720 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
721 /// stackslot register. The register is still available but is no longer
722 /// allowed to be modifed.
disallowClobberPhysRegOnly(unsigned PhysReg)723 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
724   std::multimap<unsigned, int>::iterator I =
725     PhysRegsAvailable.lower_bound(PhysReg);
726   while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
727     int SlotOrReMat = I->second;
728     I++;
729     assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
730            "Bidirectional map mismatch!");
731     SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
732     DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
733          << " copied, it is available for use but can no longer be modified\n");
734   }
735 }
736 
737 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
738 /// stackslot register and its aliases. The register and its aliases may
739 /// still available but is no longer allowed to be modifed.
disallowClobberPhysReg(unsigned PhysReg)740 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
741   for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
742     disallowClobberPhysRegOnly(*AS);
743   disallowClobberPhysRegOnly(PhysReg);
744 }
745 
746 /// ClobberPhysRegOnly - This is called when the specified physreg changes
747 /// value.  We use this to invalidate any info about stuff we thing lives in it.
ClobberPhysRegOnly(unsigned PhysReg)748 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
749   std::multimap<unsigned, int>::iterator I =
750     PhysRegsAvailable.lower_bound(PhysReg);
751   while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
752     int SlotOrReMat = I->second;
753     PhysRegsAvailable.erase(I++);
754     assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
755            "Bidirectional map mismatch!");
756     SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
757     DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
758           << " clobbered, invalidating ");
759     if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
760       DEBUG(dbgs() << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 <<"\n");
761     else
762       DEBUG(dbgs() << "SS#" << SlotOrReMat << "\n");
763   }
764 }
765 
766 /// ClobberPhysReg - This is called when the specified physreg changes
767 /// value.  We use this to invalidate any info about stuff we thing lives in
768 /// it and any of its aliases.
ClobberPhysReg(unsigned PhysReg)769 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
770   for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
771     ClobberPhysRegOnly(*AS);
772   ClobberPhysRegOnly(PhysReg);
773 }
774 
775 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
776 /// into the specified MBB. Add available physical registers as potential
777 /// live-in's. If they are reused in the MBB, they will be added to the
778 /// live-in set to make register scavenger and post-allocation scheduler.
AddAvailableRegsToLiveIn(MachineBasicBlock & MBB,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)779 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
780                                         BitVector &RegKills,
781                                         std::vector<MachineOperand*> &KillOps) {
782   std::set<unsigned> NotAvailable;
783   for (std::multimap<unsigned, int>::iterator
784          I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
785        I != E; ++I) {
786     unsigned Reg = I->first;
787     const TargetRegisterClass* RC = TRI->getMinimalPhysRegClass(Reg);
788     // FIXME: A temporary workaround. We can't reuse available value if it's
789     // not safe to move the def of the virtual register's class. e.g.
790     // X86::RFP* register classes. Do not add it as a live-in.
791     if (!TII->isSafeToMoveRegClassDefs(RC))
792       // This is no longer available.
793       NotAvailable.insert(Reg);
794     else {
795       MBB.addLiveIn(Reg);
796       if (RegKills[Reg])
797         ResurrectConfirmedKill(Reg, TRI, RegKills, KillOps);
798     }
799 
800     // Skip over the same register.
801     std::multimap<unsigned, int>::iterator NI = llvm::next(I);
802     while (NI != E && NI->first == Reg) {
803       ++I;
804       ++NI;
805     }
806   }
807 
808   for (std::set<unsigned>::iterator I = NotAvailable.begin(),
809          E = NotAvailable.end(); I != E; ++I) {
810     ClobberPhysReg(*I);
811     for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
812        *SubRegs; ++SubRegs)
813       ClobberPhysReg(*SubRegs);
814   }
815 }
816 
817 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
818 /// slot changes.  This removes information about which register the previous
819 /// value for this slot lives in (as the previous value is dead now).
ModifyStackSlotOrReMat(int SlotOrReMat)820 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
821   std::map<int, unsigned>::iterator It =
822     SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
823   if (It == SpillSlotsOrReMatsAvailable.end()) return;
824   unsigned Reg = It->second >> 1;
825   SpillSlotsOrReMatsAvailable.erase(It);
826 
827   // This register may hold the value of multiple stack slots, only remove this
828   // stack slot from the set of values the register contains.
829   std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
830   for (; ; ++I) {
831     assert(I != PhysRegsAvailable.end() && I->first == Reg &&
832            "Map inverse broken!");
833     if (I->second == SlotOrReMat) break;
834   }
835   PhysRegsAvailable.erase(I);
836 }
837 
ClobberSharingStackSlots(int StackSlot)838 void AvailableSpills::ClobberSharingStackSlots(int StackSlot) {
839   std::map<int, unsigned>::iterator It =
840     SpillSlotsOrReMatsAvailable.find(StackSlot);
841   if (It == SpillSlotsOrReMatsAvailable.end()) return;
842   unsigned Reg = It->second >> 1;
843 
844   // Erase entries in PhysRegsAvailable for other stack slots.
845   std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
846   while (I != PhysRegsAvailable.end() && I->first == Reg) {
847     std::multimap<unsigned, int>::iterator NextI = llvm::next(I);
848     if (I->second != StackSlot) {
849       DEBUG(dbgs() << "Clobbered sharing SS#" << I->second << " in "
850                    << PrintReg(Reg, TRI) << '\n');
851       SpillSlotsOrReMatsAvailable.erase(I->second);
852       PhysRegsAvailable.erase(I);
853     }
854     I = NextI;
855   }
856 }
857 
858 // ************************** //
859 // Reuse Info Implementation  //
860 // ************************** //
861 
862 /// GetRegForReload - We are about to emit a reload into PhysReg.  If there
863 /// is some other operand that is using the specified register, either pick
864 /// a new register to use, or evict the previous reload and use this reg.
GetRegForReload(const TargetRegisterClass * RC,unsigned PhysReg,MachineFunction & MF,MachineInstr * MI,AvailableSpills & Spills,std::vector<MachineInstr * > & MaybeDeadStores,SmallSet<unsigned,8> & Rejected,BitVector & RegKills,std::vector<MachineOperand * > & KillOps,VirtRegMap & VRM)865 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
866                          unsigned PhysReg,
867                          MachineFunction &MF,
868                          MachineInstr *MI, AvailableSpills &Spills,
869                          std::vector<MachineInstr*> &MaybeDeadStores,
870                          SmallSet<unsigned, 8> &Rejected,
871                          BitVector &RegKills,
872                          std::vector<MachineOperand*> &KillOps,
873                          VirtRegMap &VRM) {
874   const TargetInstrInfo* TII = MF.getTarget().getInstrInfo();
875   const TargetRegisterInfo *TRI = Spills.getRegInfo();
876 
877   if (Reuses.empty()) return PhysReg;  // This is most often empty.
878 
879   for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
880     ReusedOp &Op = Reuses[ro];
881     // If we find some other reuse that was supposed to use this register
882     // exactly for its reload, we can change this reload to use ITS reload
883     // register. That is, unless its reload register has already been
884     // considered and subsequently rejected because it has also been reused
885     // by another operand.
886     if (Op.PhysRegReused == PhysReg &&
887         Rejected.count(Op.AssignedPhysReg) == 0 &&
888         RC->contains(Op.AssignedPhysReg)) {
889       // Yup, use the reload register that we didn't use before.
890       unsigned NewReg = Op.AssignedPhysReg;
891       Rejected.insert(PhysReg);
892       return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores,
893                              Rejected, RegKills, KillOps, VRM);
894     } else {
895       // Otherwise, we might also have a problem if a previously reused
896       // value aliases the new register. If so, codegen the previous reload
897       // and use this one.
898       unsigned PRRU = Op.PhysRegReused;
899       if (TRI->regsOverlap(PRRU, PhysReg)) {
900         // Okay, we found out that an alias of a reused register
901         // was used.  This isn't good because it means we have
902         // to undo a previous reuse.
903         MachineBasicBlock *MBB = MI->getParent();
904         const TargetRegisterClass *AliasRC =
905           MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
906 
907         // Copy Op out of the vector and remove it, we're going to insert an
908         // explicit load for it.
909         ReusedOp NewOp = Op;
910         Reuses.erase(Reuses.begin()+ro);
911 
912         // MI may be using only a sub-register of PhysRegUsed.
913         unsigned RealPhysRegUsed = MI->getOperand(NewOp.Operand).getReg();
914         unsigned SubIdx = 0;
915         assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed) &&
916                "A reuse cannot be a virtual register");
917         if (PRRU != RealPhysRegUsed) {
918           // What was the sub-register index?
919           SubIdx = TRI->getSubRegIndex(PRRU, RealPhysRegUsed);
920           assert(SubIdx &&
921                  "Operand physreg is not a sub-register of PhysRegUsed");
922         }
923 
924         // Ok, we're going to try to reload the assigned physreg into the
925         // slot that we were supposed to in the first place.  However, that
926         // register could hold a reuse.  Check to see if it conflicts or
927         // would prefer us to use a different register.
928         unsigned NewPhysReg = GetRegForReload(RC, NewOp.AssignedPhysReg,
929                                               MF, MI, Spills, MaybeDeadStores,
930                                               Rejected, RegKills, KillOps, VRM);
931 
932         bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT;
933         int SSorRMId = DoReMat
934           ? VRM.getReMatId(NewOp.VirtReg) : (int) NewOp.StackSlotOrReMat;
935 
936         // Back-schedule reloads and remats.
937         MachineBasicBlock::iterator InsertLoc =
938           ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI,
939                            DoReMat, SSorRMId, TII, MF);
940 
941         if (DoReMat) {
942           ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII,
943                         TRI, VRM);
944         } else {
945           TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg,
946                                     NewOp.StackSlotOrReMat, AliasRC, TRI);
947           MachineInstr *LoadMI = prior(InsertLoc);
948           VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
949           // Any stores to this stack slot are not dead anymore.
950           MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
951           ++NumLoads;
952         }
953         Spills.ClobberPhysReg(NewPhysReg);
954         Spills.ClobberPhysReg(NewOp.PhysRegReused);
955 
956         unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) :NewPhysReg;
957         MI->getOperand(NewOp.Operand).setReg(RReg);
958         MI->getOperand(NewOp.Operand).setSubReg(0);
959 
960         Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
961         UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
962         DEBUG(dbgs() << '\t' << *prior(InsertLoc));
963 
964         DEBUG(dbgs() << "Reuse undone!\n");
965         --NumReused;
966 
967         // Finally, PhysReg is now available, go ahead and use it.
968         return PhysReg;
969       }
970     }
971   }
972   return PhysReg;
973 }
974 
975 // ************************************************************************ //
976 
977 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
978 /// stack slot mod/ref. It also checks if it's possible to unfold the
979 /// instruction by having it define a specified physical register instead.
FoldsStackSlotModRef(MachineInstr & MI,int SS,unsigned PhysReg,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI,VirtRegMap & VRM)980 static bool FoldsStackSlotModRef(MachineInstr &MI, int SS, unsigned PhysReg,
981                                  const TargetInstrInfo *TII,
982                                  const TargetRegisterInfo *TRI,
983                                  VirtRegMap &VRM) {
984   if (VRM.hasEmergencySpills(&MI) || VRM.isSpillPt(&MI))
985     return false;
986 
987   bool Found = false;
988   VirtRegMap::MI2VirtMapTy::const_iterator I, End;
989   for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
990     unsigned VirtReg = I->second.first;
991     VirtRegMap::ModRef MR = I->second.second;
992     if (MR & VirtRegMap::isModRef)
993       if (VRM.getStackSlot(VirtReg) == SS) {
994         Found= TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), true, true) != 0;
995         break;
996       }
997   }
998   if (!Found)
999     return false;
1000 
1001   // Does the instruction uses a register that overlaps the scratch register?
1002   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1003     MachineOperand &MO = MI.getOperand(i);
1004     if (!MO.isReg() || MO.getReg() == 0)
1005       continue;
1006     unsigned Reg = MO.getReg();
1007     if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1008       if (!VRM.hasPhys(Reg))
1009         continue;
1010       Reg = VRM.getPhys(Reg);
1011     }
1012     if (TRI->regsOverlap(PhysReg, Reg))
1013       return false;
1014   }
1015   return true;
1016 }
1017 
1018 /// FindFreeRegister - Find a free register of a given register class by looking
1019 /// at (at most) the last two machine instructions.
FindFreeRegister(MachineBasicBlock::iterator MII,MachineBasicBlock & MBB,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI,BitVector & AllocatableRegs)1020 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
1021                                  MachineBasicBlock &MBB,
1022                                  const TargetRegisterClass *RC,
1023                                  const TargetRegisterInfo *TRI,
1024                                  BitVector &AllocatableRegs) {
1025   BitVector Defs(TRI->getNumRegs());
1026   BitVector Uses(TRI->getNumRegs());
1027   SmallVector<unsigned, 4> LocalUses;
1028   SmallVector<unsigned, 4> Kills;
1029 
1030   // Take a look at 2 instructions at most.
1031   unsigned Count = 0;
1032   while (Count < 2) {
1033     if (MII == MBB.begin())
1034       break;
1035     MachineInstr *PrevMI = prior(MII);
1036     MII = PrevMI;
1037 
1038     if (PrevMI->isDebugValue())
1039       continue; // Skip over dbg_value instructions.
1040     ++Count;
1041 
1042     for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
1043       MachineOperand &MO = PrevMI->getOperand(i);
1044       if (!MO.isReg() || MO.getReg() == 0)
1045         continue;
1046       unsigned Reg = MO.getReg();
1047       if (MO.isDef()) {
1048         Defs.set(Reg);
1049         for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
1050           Defs.set(*AS);
1051       } else  {
1052         LocalUses.push_back(Reg);
1053         if (MO.isKill() && AllocatableRegs[Reg])
1054           Kills.push_back(Reg);
1055       }
1056     }
1057 
1058     for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
1059       unsigned Kill = Kills[i];
1060       if (!Defs[Kill] && !Uses[Kill] &&
1061           RC->contains(Kill))
1062         return Kill;
1063     }
1064     for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
1065       unsigned Reg = LocalUses[i];
1066       Uses.set(Reg);
1067       for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
1068         Uses.set(*AS);
1069     }
1070   }
1071 
1072   return 0;
1073 }
1074 
1075 static
AssignPhysToVirtReg(MachineInstr * MI,unsigned VirtReg,unsigned PhysReg,const TargetRegisterInfo & TRI)1076 void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
1077                          const TargetRegisterInfo &TRI) {
1078   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1079     MachineOperand &MO = MI->getOperand(i);
1080     if (MO.isReg() && MO.getReg() == VirtReg)
1081       substitutePhysReg(MO, PhysReg, TRI);
1082   }
1083 }
1084 
1085 namespace {
1086 
1087 struct RefSorter {
operator ()__anona7c589420511::RefSorter1088   bool operator()(const std::pair<MachineInstr*, int> &A,
1089                   const std::pair<MachineInstr*, int> &B) {
1090     return A.second < B.second;
1091   }
1092 };
1093 
1094 // ***************************** //
1095 // Local Spiller Implementation  //
1096 // ***************************** //
1097 
1098 class LocalRewriter : public VirtRegRewriter {
1099   MachineRegisterInfo *MRI;
1100   const TargetRegisterInfo *TRI;
1101   const TargetInstrInfo *TII;
1102   VirtRegMap *VRM;
1103   LiveIntervals *LIs;
1104   BitVector AllocatableRegs;
1105   DenseMap<MachineInstr*, unsigned> DistanceMap;
1106   DenseMap<int, SmallVector<MachineInstr*,4> > Slot2DbgValues;
1107 
1108   MachineBasicBlock *MBB;       // Basic block currently being processed.
1109 
1110 public:
1111 
1112   bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
1113                             LiveIntervals* LIs);
1114 
1115 private:
EraseInstr(MachineInstr * MI)1116   void EraseInstr(MachineInstr *MI) {
1117     VRM->RemoveMachineInstrFromMaps(MI);
1118     LIs->RemoveMachineInstrFromMaps(MI);
1119     MI->eraseFromParent();
1120   }
1121 
1122   bool OptimizeByUnfold2(unsigned VirtReg, int SS,
1123                          MachineBasicBlock::iterator &MII,
1124                          std::vector<MachineInstr*> &MaybeDeadStores,
1125                          AvailableSpills &Spills,
1126                          BitVector &RegKills,
1127                          std::vector<MachineOperand*> &KillOps);
1128 
1129   bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
1130                         std::vector<MachineInstr*> &MaybeDeadStores,
1131                         AvailableSpills &Spills,
1132                         BitVector &RegKills,
1133                         std::vector<MachineOperand*> &KillOps);
1134 
1135   bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
1136                            unsigned VirtReg, unsigned SrcReg, int SS,
1137                            AvailableSpills &Spills,
1138                            BitVector &RegKills,
1139                            std::vector<MachineOperand*> &KillOps,
1140                            const TargetRegisterInfo *TRI);
1141 
1142   void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
1143                            int Idx, unsigned PhysReg, int StackSlot,
1144                            const TargetRegisterClass *RC,
1145                            bool isAvailable, MachineInstr *&LastStore,
1146                            AvailableSpills &Spills,
1147                            SmallSet<MachineInstr*, 4> &ReMatDefs,
1148                            BitVector &RegKills,
1149                            std::vector<MachineOperand*> &KillOps);
1150 
1151   void TransferDeadness(unsigned Reg, BitVector &RegKills,
1152                         std::vector<MachineOperand*> &KillOps);
1153 
1154   bool InsertEmergencySpills(MachineInstr *MI);
1155 
1156   bool InsertRestores(MachineInstr *MI,
1157                       AvailableSpills &Spills,
1158                       BitVector &RegKills,
1159                       std::vector<MachineOperand*> &KillOps);
1160 
1161   bool InsertSpills(MachineInstr *MI);
1162 
1163   void ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
1164                    std::vector<MachineInstr*> &MaybeDeadStores,
1165                    BitVector &RegKills,
1166                    ReuseInfo &ReusedOperands,
1167                    std::vector<MachineOperand*> &KillOps);
1168 
1169   void RewriteMBB(LiveIntervals *LIs,
1170                   AvailableSpills &Spills, BitVector &RegKills,
1171                   std::vector<MachineOperand*> &KillOps);
1172 };
1173 }
1174 
runOnMachineFunction(MachineFunction & MF,VirtRegMap & vrm,LiveIntervals * lis)1175 bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
1176                                          LiveIntervals* lis) {
1177   MRI = &MF.getRegInfo();
1178   TRI = MF.getTarget().getRegisterInfo();
1179   TII = MF.getTarget().getInstrInfo();
1180   VRM = &vrm;
1181   LIs = lis;
1182   AllocatableRegs = TRI->getAllocatableSet(MF);
1183   DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
1184         << MF.getFunction()->getName() << "':\n");
1185   DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
1186         " reloads!) ****\n");
1187   DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
1188 
1189   // Spills - Keep track of which spilled values are available in physregs
1190   // so that we can choose to reuse the physregs instead of emitting
1191   // reloads. This is usually refreshed per basic block.
1192   AvailableSpills Spills(TRI, TII);
1193 
1194   // Keep track of kill information.
1195   BitVector RegKills(TRI->getNumRegs());
1196   std::vector<MachineOperand*> KillOps;
1197   KillOps.resize(TRI->getNumRegs(), NULL);
1198 
1199   // SingleEntrySuccs - Successor blocks which have a single predecessor.
1200   SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
1201   SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
1202 
1203   // Traverse the basic blocks depth first.
1204   MachineBasicBlock *Entry = MF.begin();
1205   SmallPtrSet<MachineBasicBlock*,16> Visited;
1206   for (df_ext_iterator<MachineBasicBlock*,
1207          SmallPtrSet<MachineBasicBlock*,16> >
1208          DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1209        DFI != E; ++DFI) {
1210     MBB = *DFI;
1211     if (!EarlyVisited.count(MBB))
1212       RewriteMBB(LIs, Spills, RegKills, KillOps);
1213 
1214     // If this MBB is the only predecessor of a successor. Keep the
1215     // availability information and visit it next.
1216     do {
1217       // Keep visiting single predecessor successor as long as possible.
1218       SinglePredSuccs.clear();
1219       findSinglePredSuccessor(MBB, SinglePredSuccs);
1220       if (SinglePredSuccs.empty())
1221         MBB = 0;
1222       else {
1223         // FIXME: More than one successors, each of which has MBB has
1224         // the only predecessor.
1225         MBB = SinglePredSuccs[0];
1226         if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
1227           Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
1228           RewriteMBB(LIs, Spills, RegKills, KillOps);
1229         }
1230       }
1231     } while (MBB);
1232 
1233     // Clear the availability info.
1234     Spills.clear();
1235   }
1236 
1237   DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
1238   DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
1239 
1240   // Mark unused spill slots.
1241   MachineFrameInfo *MFI = MF.getFrameInfo();
1242   int SS = VRM->getLowSpillSlot();
1243   if (SS != VirtRegMap::NO_STACK_SLOT) {
1244     for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS) {
1245       SmallVector<MachineInstr*, 4> &DbgValues = Slot2DbgValues[SS];
1246       if (!VRM->isSpillSlotUsed(SS)) {
1247         MFI->RemoveStackObject(SS);
1248         for (unsigned j = 0, ee = DbgValues.size(); j != ee; ++j) {
1249           MachineInstr *DVMI = DbgValues[j];
1250           DEBUG(dbgs() << "Removing debug info referencing FI#" << SS << '\n');
1251           EraseInstr(DVMI);
1252         }
1253         ++NumDSS;
1254       }
1255       DbgValues.clear();
1256     }
1257   }
1258   Slot2DbgValues.clear();
1259 
1260   return true;
1261 }
1262 
1263 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1264 /// a scratch register is available.
1265 ///     xorq  %r12<kill>, %r13
1266 ///     addq  %rax, -184(%rbp)
1267 ///     addq  %r13, -184(%rbp)
1268 /// ==>
1269 ///     xorq  %r12<kill>, %r13
1270 ///     movq  -184(%rbp), %r12
1271 ///     addq  %rax, %r12
1272 ///     addq  %r13, %r12
1273 ///     movq  %r12, -184(%rbp)
1274 bool LocalRewriter::
OptimizeByUnfold2(unsigned VirtReg,int SS,MachineBasicBlock::iterator & MII,std::vector<MachineInstr * > & MaybeDeadStores,AvailableSpills & Spills,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)1275 OptimizeByUnfold2(unsigned VirtReg, int SS,
1276                   MachineBasicBlock::iterator &MII,
1277                   std::vector<MachineInstr*> &MaybeDeadStores,
1278                   AvailableSpills &Spills,
1279                   BitVector &RegKills,
1280                   std::vector<MachineOperand*> &KillOps) {
1281 
1282   MachineBasicBlock::iterator NextMII = llvm::next(MII);
1283   // Skip over dbg_value instructions.
1284   while (NextMII != MBB->end() && NextMII->isDebugValue())
1285     NextMII = llvm::next(NextMII);
1286   if (NextMII == MBB->end())
1287     return false;
1288 
1289   if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
1290     return false;
1291 
1292   // Now let's see if the last couple of instructions happens to have freed up
1293   // a register.
1294   const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1295   unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
1296   if (!PhysReg)
1297     return false;
1298 
1299   MachineFunction &MF = *MBB->getParent();
1300   TRI = MF.getTarget().getRegisterInfo();
1301   MachineInstr &MI = *MII;
1302   if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
1303     return false;
1304 
1305   // If the next instruction also folds the same SS modref and can be unfoled,
1306   // then it's worthwhile to issue a load from SS into the free register and
1307   // then unfold these instructions.
1308   if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
1309     return false;
1310 
1311   // Back-schedule reloads and remats.
1312   ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
1313 
1314   // Load from SS to the spare physical register.
1315   TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC, TRI);
1316   // This invalidates Phys.
1317   Spills.ClobberPhysReg(PhysReg);
1318   // Remember it's available.
1319   Spills.addAvailable(SS, PhysReg);
1320   MaybeDeadStores[SS] = NULL;
1321 
1322   // Unfold current MI.
1323   SmallVector<MachineInstr*, 4> NewMIs;
1324   if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
1325     llvm_unreachable("Unable unfold the load / store folding instruction!");
1326   assert(NewMIs.size() == 1);
1327   AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
1328   VRM->transferRestorePts(&MI, NewMIs[0]);
1329   MII = MBB->insert(MII, NewMIs[0]);
1330   InvalidateKills(MI, TRI, RegKills, KillOps);
1331   EraseInstr(&MI);
1332   ++NumModRefUnfold;
1333 
1334   // Unfold next instructions that fold the same SS.
1335   do {
1336     MachineInstr &NextMI = *NextMII;
1337     NextMII = llvm::next(NextMII);
1338     NewMIs.clear();
1339     if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
1340       llvm_unreachable("Unable unfold the load / store folding instruction!");
1341     assert(NewMIs.size() == 1);
1342     AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
1343     VRM->transferRestorePts(&NextMI, NewMIs[0]);
1344     MBB->insert(NextMII, NewMIs[0]);
1345     InvalidateKills(NextMI, TRI, RegKills, KillOps);
1346     EraseInstr(&NextMI);
1347     ++NumModRefUnfold;
1348     // Skip over dbg_value instructions.
1349     while (NextMII != MBB->end() && NextMII->isDebugValue())
1350       NextMII = llvm::next(NextMII);
1351     if (NextMII == MBB->end())
1352       break;
1353   } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
1354 
1355   // Store the value back into SS.
1356   TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC, TRI);
1357   MachineInstr *StoreMI = prior(NextMII);
1358   VRM->addSpillSlotUse(SS, StoreMI);
1359   VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1360 
1361   return true;
1362 }
1363 
1364 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1365 /// instruction. e.g.
1366 ///     xorl  %edi, %eax
1367 ///     movl  %eax, -32(%ebp)
1368 ///     movl  -36(%ebp), %eax
1369 ///     orl   %eax, -32(%ebp)
1370 /// ==>
1371 ///     xorl  %edi, %eax
1372 ///     orl   -36(%ebp), %eax
1373 ///     mov   %eax, -32(%ebp)
1374 /// This enables unfolding optimization for a subsequent instruction which will
1375 /// also eliminate the newly introduced store instruction.
1376 bool LocalRewriter::
OptimizeByUnfold(MachineBasicBlock::iterator & MII,std::vector<MachineInstr * > & MaybeDeadStores,AvailableSpills & Spills,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)1377 OptimizeByUnfold(MachineBasicBlock::iterator &MII,
1378                  std::vector<MachineInstr*> &MaybeDeadStores,
1379                  AvailableSpills &Spills,
1380                  BitVector &RegKills,
1381                  std::vector<MachineOperand*> &KillOps) {
1382   MachineFunction &MF = *MBB->getParent();
1383   MachineInstr &MI = *MII;
1384   unsigned UnfoldedOpc = 0;
1385   unsigned UnfoldPR = 0;
1386   unsigned UnfoldVR = 0;
1387   int FoldedSS = VirtRegMap::NO_STACK_SLOT;
1388   VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1389   for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
1390     // Only transform a MI that folds a single register.
1391     if (UnfoldedOpc)
1392       return false;
1393     UnfoldVR = I->second.first;
1394     VirtRegMap::ModRef MR = I->second.second;
1395     // MI2VirtMap be can updated which invalidate the iterator.
1396     // Increment the iterator first.
1397     ++I;
1398     if (VRM->isAssignedReg(UnfoldVR))
1399       continue;
1400     // If this reference is not a use, any previous store is now dead.
1401     // Otherwise, the store to this stack slot is not dead anymore.
1402     FoldedSS = VRM->getStackSlot(UnfoldVR);
1403     MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
1404     if (DeadStore && (MR & VirtRegMap::isModRef)) {
1405       unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
1406       if (!PhysReg || !DeadStore->readsRegister(PhysReg))
1407         continue;
1408       UnfoldPR = PhysReg;
1409       UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
1410                                                     false, true);
1411     }
1412   }
1413 
1414   if (!UnfoldedOpc) {
1415     if (!UnfoldVR)
1416       return false;
1417 
1418     // Look for other unfolding opportunities.
1419     return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
1420                              RegKills, KillOps);
1421   }
1422 
1423   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1424     MachineOperand &MO = MI.getOperand(i);
1425     if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
1426       continue;
1427     unsigned VirtReg = MO.getReg();
1428     if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
1429       continue;
1430     if (VRM->isAssignedReg(VirtReg)) {
1431       unsigned PhysReg = VRM->getPhys(VirtReg);
1432       if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
1433         return false;
1434     } else if (VRM->isReMaterialized(VirtReg))
1435       continue;
1436     int SS = VRM->getStackSlot(VirtReg);
1437     unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1438     if (PhysReg) {
1439       if (TRI->regsOverlap(PhysReg, UnfoldPR))
1440         return false;
1441       continue;
1442     }
1443     if (VRM->hasPhys(VirtReg)) {
1444       PhysReg = VRM->getPhys(VirtReg);
1445       if (!TRI->regsOverlap(PhysReg, UnfoldPR))
1446         continue;
1447     }
1448 
1449     // Ok, we'll need to reload the value into a register which makes
1450     // it impossible to perform the store unfolding optimization later.
1451     // Let's see if it is possible to fold the load if the store is
1452     // unfolded. This allows us to perform the store unfolding
1453     // optimization.
1454     SmallVector<MachineInstr*, 4> NewMIs;
1455     if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
1456       assert(NewMIs.size() == 1);
1457       MachineInstr *NewMI = NewMIs.back();
1458       MBB->insert(MII, NewMI);
1459       NewMIs.clear();
1460       int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
1461       assert(Idx != -1);
1462       SmallVector<unsigned, 1> Ops;
1463       Ops.push_back(Idx);
1464       MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
1465       NewMI->eraseFromParent();
1466       if (FoldedMI) {
1467         VRM->addSpillSlotUse(SS, FoldedMI);
1468         if (!VRM->hasPhys(UnfoldVR))
1469           VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
1470         VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1471         MII = FoldedMI;
1472         InvalidateKills(MI, TRI, RegKills, KillOps);
1473         EraseInstr(&MI);
1474         return true;
1475       }
1476     }
1477   }
1478 
1479   return false;
1480 }
1481 
1482 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1483 /// where SrcReg is r1 and it is tied to r0. Return true if after
1484 /// commuting this instruction it will be r0 = op r2, r1.
CommuteChangesDestination(MachineInstr * DefMI,const MCInstrDesc & MCID,unsigned SrcReg,const TargetInstrInfo * TII,unsigned & DstIdx)1485 static bool CommuteChangesDestination(MachineInstr *DefMI,
1486                                       const MCInstrDesc &MCID,
1487                                       unsigned SrcReg,
1488                                       const TargetInstrInfo *TII,
1489                                       unsigned &DstIdx) {
1490   if (MCID.getNumDefs() != 1 && MCID.getNumOperands() != 3)
1491     return false;
1492   if (!DefMI->getOperand(1).isReg() ||
1493       DefMI->getOperand(1).getReg() != SrcReg)
1494     return false;
1495   unsigned DefIdx;
1496   if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
1497     return false;
1498   unsigned SrcIdx1, SrcIdx2;
1499   if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
1500     return false;
1501   if (SrcIdx1 == 1 && SrcIdx2 == 2) {
1502     DstIdx = 2;
1503     return true;
1504   }
1505   return false;
1506 }
1507 
1508 /// CommuteToFoldReload -
1509 /// Look for
1510 /// r1 = load fi#1
1511 /// r1 = op r1, r2<kill>
1512 /// store r1, fi#1
1513 ///
1514 /// If op is commutable and r2 is killed, then we can xform these to
1515 /// r2 = op r2, fi#1
1516 /// store r2, fi#1
1517 bool LocalRewriter::
CommuteToFoldReload(MachineBasicBlock::iterator & MII,unsigned VirtReg,unsigned SrcReg,int SS,AvailableSpills & Spills,BitVector & RegKills,std::vector<MachineOperand * > & KillOps,const TargetRegisterInfo * TRI)1518 CommuteToFoldReload(MachineBasicBlock::iterator &MII,
1519                     unsigned VirtReg, unsigned SrcReg, int SS,
1520                     AvailableSpills &Spills,
1521                     BitVector &RegKills,
1522                     std::vector<MachineOperand*> &KillOps,
1523                     const TargetRegisterInfo *TRI) {
1524   if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
1525     return false;
1526 
1527   MachineInstr &MI = *MII;
1528   MachineBasicBlock::iterator DefMII = prior(MII);
1529   MachineInstr *DefMI = DefMII;
1530   const MCInstrDesc &MCID = DefMI->getDesc();
1531   unsigned NewDstIdx;
1532   if (DefMII != MBB->begin() &&
1533       MCID.isCommutable() &&
1534       CommuteChangesDestination(DefMI, MCID, SrcReg, TII, NewDstIdx)) {
1535     MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1536     unsigned NewReg = NewDstMO.getReg();
1537     if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1538       return false;
1539     MachineInstr *ReloadMI = prior(DefMII);
1540     int FrameIdx;
1541     unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1542     if (DestReg != SrcReg || FrameIdx != SS)
1543       return false;
1544     int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1545     if (UseIdx == -1)
1546       return false;
1547     unsigned DefIdx;
1548     if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
1549       return false;
1550     assert(DefMI->getOperand(DefIdx).isReg() &&
1551            DefMI->getOperand(DefIdx).getReg() == SrcReg);
1552 
1553     // Now commute def instruction.
1554     MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1555     if (!CommutedMI)
1556       return false;
1557     MBB->insert(MII, CommutedMI);
1558     SmallVector<unsigned, 1> Ops;
1559     Ops.push_back(NewDstIdx);
1560     MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
1561     // Not needed since foldMemoryOperand returns new MI.
1562     CommutedMI->eraseFromParent();
1563     if (!FoldedMI)
1564       return false;
1565 
1566     VRM->addSpillSlotUse(SS, FoldedMI);
1567     VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1568     // Insert new def MI and spill MI.
1569     const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1570     TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC, TRI);
1571     MII = prior(MII);
1572     MachineInstr *StoreMI = MII;
1573     VRM->addSpillSlotUse(SS, StoreMI);
1574     VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1575     MII = FoldedMI;  // Update MII to backtrack.
1576 
1577     // Delete all 3 old instructions.
1578     InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
1579     EraseInstr(ReloadMI);
1580     InvalidateKills(*DefMI, TRI, RegKills, KillOps);
1581     EraseInstr(DefMI);
1582     InvalidateKills(MI, TRI, RegKills, KillOps);
1583     EraseInstr(&MI);
1584 
1585     // If NewReg was previously holding value of some SS, it's now clobbered.
1586     // This has to be done now because it's a physical register. When this
1587     // instruction is re-visited, it's ignored.
1588     Spills.ClobberPhysReg(NewReg);
1589 
1590     ++NumCommutes;
1591     return true;
1592   }
1593 
1594   return false;
1595 }
1596 
1597 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1598 /// the last store to the same slot is now dead. If so, remove the last store.
1599 void LocalRewriter::
SpillRegToStackSlot(MachineBasicBlock::iterator & MII,int Idx,unsigned PhysReg,int StackSlot,const TargetRegisterClass * RC,bool isAvailable,MachineInstr * & LastStore,AvailableSpills & Spills,SmallSet<MachineInstr *,4> & ReMatDefs,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)1600 SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
1601                     int Idx, unsigned PhysReg, int StackSlot,
1602                     const TargetRegisterClass *RC,
1603                     bool isAvailable, MachineInstr *&LastStore,
1604                     AvailableSpills &Spills,
1605                     SmallSet<MachineInstr*, 4> &ReMatDefs,
1606                     BitVector &RegKills,
1607                     std::vector<MachineOperand*> &KillOps) {
1608 
1609   MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
1610   TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC,
1611                            TRI);
1612   MachineInstr *StoreMI = prior(oldNextMII);
1613   VRM->addSpillSlotUse(StackSlot, StoreMI);
1614   DEBUG(dbgs() << "Store:\t" << *StoreMI);
1615 
1616   // If there is a dead store to this stack slot, nuke it now.
1617   if (LastStore) {
1618     DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
1619     ++NumDSE;
1620     SmallVector<unsigned, 2> KillRegs;
1621     InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
1622     MachineBasicBlock::iterator PrevMII = LastStore;
1623     bool CheckDef = PrevMII != MBB->begin();
1624     if (CheckDef)
1625       --PrevMII;
1626     EraseInstr(LastStore);
1627     if (CheckDef) {
1628       // Look at defs of killed registers on the store. Mark the defs
1629       // as dead since the store has been deleted and they aren't
1630       // being reused.
1631       for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1632         bool HasOtherDef = false;
1633         if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
1634           MachineInstr *DeadDef = PrevMII;
1635           if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
1636             // FIXME: This assumes a remat def does not have side effects.
1637             EraseInstr(DeadDef);
1638             ++NumDRM;
1639           }
1640         }
1641       }
1642     }
1643   }
1644 
1645   // Allow for multi-instruction spill sequences, as on PPC Altivec.  Presume
1646   // the last of multiple instructions is the actual store.
1647   LastStore = prior(oldNextMII);
1648 
1649   // If the stack slot value was previously available in some other
1650   // register, change it now.  Otherwise, make the register available,
1651   // in PhysReg.
1652   Spills.ModifyStackSlotOrReMat(StackSlot);
1653   Spills.ClobberPhysReg(PhysReg);
1654   Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1655   ++NumStores;
1656 }
1657 
1658 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1659 /// effect and all of its defs are dead.
isSafeToDelete(MachineInstr & MI)1660 static bool isSafeToDelete(MachineInstr &MI) {
1661   const MCInstrDesc &MCID = MI.getDesc();
1662   if (MCID.mayLoad() || MCID.mayStore() || MCID.isTerminator() ||
1663       MCID.isCall() || MCID.isBarrier() || MCID.isReturn() ||
1664       MI.isLabel() || MI.isDebugValue() ||
1665       MI.hasUnmodeledSideEffects())
1666     return false;
1667 
1668   // Technically speaking inline asm without side effects and no defs can still
1669   // be deleted. But there is so much bad inline asm code out there, we should
1670   // let them be.
1671   if (MI.isInlineAsm())
1672     return false;
1673 
1674   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1675     MachineOperand &MO = MI.getOperand(i);
1676     if (!MO.isReg() || !MO.getReg())
1677       continue;
1678     if (MO.isDef() && !MO.isDead())
1679       return false;
1680     if (MO.isUse() && MO.isKill())
1681       // FIXME: We can't remove kill markers or else the scavenger will assert.
1682       // An alternative is to add a ADD pseudo instruction to replace kill
1683       // markers.
1684       return false;
1685   }
1686   return true;
1687 }
1688 
1689 /// TransferDeadness - A identity copy definition is dead and it's being
1690 /// removed. Find the last def or use and mark it as dead / kill.
1691 void LocalRewriter::
TransferDeadness(unsigned Reg,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)1692 TransferDeadness(unsigned Reg, BitVector &RegKills,
1693                  std::vector<MachineOperand*> &KillOps) {
1694   SmallPtrSet<MachineInstr*, 4> Seens;
1695   SmallVector<std::pair<MachineInstr*, int>,8> Refs;
1696   for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
1697          RE = MRI->reg_end(); RI != RE; ++RI) {
1698     MachineInstr *UDMI = &*RI;
1699     if (UDMI->isDebugValue() || UDMI->getParent() != MBB)
1700       continue;
1701     DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1702     if (DI == DistanceMap.end())
1703       continue;
1704     if (Seens.insert(UDMI))
1705       Refs.push_back(std::make_pair(UDMI, DI->second));
1706   }
1707 
1708   if (Refs.empty())
1709     return;
1710   std::sort(Refs.begin(), Refs.end(), RefSorter());
1711 
1712   while (!Refs.empty()) {
1713     MachineInstr *LastUDMI = Refs.back().first;
1714     Refs.pop_back();
1715 
1716     MachineOperand *LastUD = NULL;
1717     for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1718       MachineOperand &MO = LastUDMI->getOperand(i);
1719       if (!MO.isReg() || MO.getReg() != Reg)
1720         continue;
1721       if (!LastUD || (LastUD->isUse() && MO.isDef()))
1722         LastUD = &MO;
1723       if (LastUDMI->isRegTiedToDefOperand(i))
1724         break;
1725     }
1726     if (LastUD->isDef()) {
1727       // If the instruction has no side effect, delete it and propagate
1728       // backward further. Otherwise, mark is dead and we are done.
1729       if (!isSafeToDelete(*LastUDMI)) {
1730         LastUD->setIsDead();
1731         break;
1732       }
1733       EraseInstr(LastUDMI);
1734     } else {
1735       LastUD->setIsKill();
1736       RegKills.set(Reg);
1737       KillOps[Reg] = LastUD;
1738       break;
1739     }
1740   }
1741 }
1742 
1743 /// InsertEmergencySpills - Insert emergency spills before MI if requested by
1744 /// VRM. Return true if spills were inserted.
InsertEmergencySpills(MachineInstr * MI)1745 bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
1746   if (!VRM->hasEmergencySpills(MI))
1747     return false;
1748   MachineBasicBlock::iterator MII = MI;
1749   SmallSet<int, 4> UsedSS;
1750   std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
1751   for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1752     unsigned PhysReg = EmSpills[i];
1753     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysReg);
1754     assert(RC && "Unable to determine register class!");
1755     int SS = VRM->getEmergencySpillSlot(RC);
1756     if (UsedSS.count(SS))
1757       llvm_unreachable("Need to spill more than one physical registers!");
1758     UsedSS.insert(SS);
1759     TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC, TRI);
1760     MachineInstr *StoreMI = prior(MII);
1761     VRM->addSpillSlotUse(SS, StoreMI);
1762 
1763     // Back-schedule reloads and remats.
1764     MachineBasicBlock::iterator InsertLoc =
1765       ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false, SS,
1766                        TII, *MBB->getParent());
1767 
1768     TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC, TRI);
1769 
1770     MachineInstr *LoadMI = prior(InsertLoc);
1771     VRM->addSpillSlotUse(SS, LoadMI);
1772     ++NumPSpills;
1773     DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
1774   }
1775   return true;
1776 }
1777 
1778 /// InsertRestores - Restore registers before MI is requested by VRM. Return
1779 /// true is any instructions were inserted.
InsertRestores(MachineInstr * MI,AvailableSpills & Spills,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)1780 bool LocalRewriter::InsertRestores(MachineInstr *MI,
1781                                    AvailableSpills &Spills,
1782                                    BitVector &RegKills,
1783                                    std::vector<MachineOperand*> &KillOps) {
1784   if (!VRM->isRestorePt(MI))
1785     return false;
1786   MachineBasicBlock::iterator MII = MI;
1787   std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(MI);
1788   for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1789     unsigned VirtReg = RestoreRegs[e-i-1];  // Reverse order.
1790     if (!VRM->getPreSplitReg(VirtReg))
1791       continue; // Split interval spilled again.
1792     unsigned Phys = VRM->getPhys(VirtReg);
1793     MRI->setPhysRegUsed(Phys);
1794 
1795     // Check if the value being restored if available. If so, it must be
1796     // from a predecessor BB that fallthrough into this BB. We do not
1797     // expect:
1798     // BB1:
1799     // r1 = load fi#1
1800     // ...
1801     //    = r1<kill>
1802     // ... # r1 not clobbered
1803     // ...
1804     //    = load fi#1
1805     bool DoReMat = VRM->isReMaterialized(VirtReg);
1806     int SSorRMId = DoReMat
1807       ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
1808     unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1809     if (InReg == Phys) {
1810       // If the value is already available in the expected register, save
1811       // a reload / remat.
1812       if (SSorRMId)
1813         DEBUG(dbgs() << "Reusing RM#"
1814                      << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1815       else
1816         DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
1817       DEBUG(dbgs() << " from physreg "
1818                    << TRI->getName(InReg) << " for " << PrintReg(VirtReg)
1819                    <<" instead of reloading into physreg "
1820                    << TRI->getName(Phys) << '\n');
1821 
1822       // Reusing a physreg may resurrect it. But we expect ProcessUses to update
1823       // the kill flags for the current instruction after processing it.
1824 
1825       ++NumOmitted;
1826       continue;
1827     } else if (InReg && InReg != Phys) {
1828       if (SSorRMId)
1829         DEBUG(dbgs() << "Reusing RM#"
1830                      << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
1831       else
1832         DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
1833       DEBUG(dbgs() << " from physreg "
1834                    << TRI->getName(InReg) << " for " << PrintReg(VirtReg)
1835                    <<" by copying it into physreg "
1836                    << TRI->getName(Phys) << '\n');
1837 
1838       // If the reloaded / remat value is available in another register,
1839       // copy it to the desired register.
1840 
1841       // Back-schedule reloads and remats.
1842       MachineBasicBlock::iterator InsertLoc =
1843         ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
1844                          *MBB->getParent());
1845       MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI->getDebugLoc(),
1846                                      TII->get(TargetOpcode::COPY), Phys)
1847                                .addReg(InReg, RegState::Kill);
1848 
1849       // This invalidates Phys.
1850       Spills.ClobberPhysReg(Phys);
1851       // Remember it's available.
1852       Spills.addAvailable(SSorRMId, Phys);
1853 
1854       CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1855       UpdateKills(*CopyMI, TRI, RegKills, KillOps);
1856 
1857       DEBUG(dbgs() << '\t' << *CopyMI);
1858       ++NumCopified;
1859       continue;
1860     }
1861 
1862     // Back-schedule reloads and remats.
1863     MachineBasicBlock::iterator InsertLoc =
1864       ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
1865                        *MBB->getParent());
1866 
1867     if (VRM->isReMaterialized(VirtReg)) {
1868       ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
1869     } else {
1870       const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
1871       TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC, TRI);
1872       MachineInstr *LoadMI = prior(InsertLoc);
1873       VRM->addSpillSlotUse(SSorRMId, LoadMI);
1874       ++NumLoads;
1875       DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
1876     }
1877 
1878     // This invalidates Phys.
1879     Spills.ClobberPhysReg(Phys);
1880     // Remember it's available.
1881     Spills.addAvailable(SSorRMId, Phys);
1882 
1883     UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
1884     DEBUG(dbgs() << '\t' << *prior(MII));
1885   }
1886   return true;
1887 }
1888 
1889 /// InsertSpills - Insert spills after MI if requested by VRM. Return
1890 /// true if spills were inserted.
InsertSpills(MachineInstr * MI)1891 bool LocalRewriter::InsertSpills(MachineInstr *MI) {
1892   if (!VRM->isSpillPt(MI))
1893     return false;
1894   MachineBasicBlock::iterator MII = MI;
1895   std::vector<std::pair<unsigned,bool> > &SpillRegs =
1896     VRM->getSpillPtSpills(MI);
1897   for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1898     unsigned VirtReg = SpillRegs[i].first;
1899     bool isKill = SpillRegs[i].second;
1900     if (!VRM->getPreSplitReg(VirtReg))
1901       continue; // Split interval spilled again.
1902     const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
1903     unsigned Phys = VRM->getPhys(VirtReg);
1904     int StackSlot = VRM->getStackSlot(VirtReg);
1905     MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
1906     TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
1907                              RC, TRI);
1908     MachineInstr *StoreMI = prior(oldNextMII);
1909     VRM->addSpillSlotUse(StackSlot, StoreMI);
1910     DEBUG(dbgs() << "Store:\t" << *StoreMI);
1911     VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1912   }
1913   return true;
1914 }
1915 
1916 
1917 /// ProcessUses - Process all of MI's spilled operands and all available
1918 /// operands.
ProcessUses(MachineInstr & MI,AvailableSpills & Spills,std::vector<MachineInstr * > & MaybeDeadStores,BitVector & RegKills,ReuseInfo & ReusedOperands,std::vector<MachineOperand * > & KillOps)1919 void LocalRewriter::ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
1920                                 std::vector<MachineInstr*> &MaybeDeadStores,
1921                                 BitVector &RegKills,
1922                                 ReuseInfo &ReusedOperands,
1923                                 std::vector<MachineOperand*> &KillOps) {
1924   // Clear kill info.
1925   SmallSet<unsigned, 2> KilledMIRegs;
1926   SmallVector<unsigned, 4> VirtUseOps;
1927   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1928     MachineOperand &MO = MI.getOperand(i);
1929     if (!MO.isReg() || MO.getReg() == 0)
1930       continue;   // Ignore non-register operands.
1931 
1932     unsigned VirtReg = MO.getReg();
1933 
1934     if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1935       // Ignore physregs for spilling, but remember that it is used by this
1936       // function.
1937       MRI->setPhysRegUsed(VirtReg);
1938       continue;
1939     }
1940 
1941     // We want to process implicit virtual register uses first.
1942     if (MO.isImplicit())
1943       // If the virtual register is implicitly defined, emit a implicit_def
1944       // before so scavenger knows it's "defined".
1945       // FIXME: This is a horrible hack done the by register allocator to
1946       // remat a definition with virtual register operand.
1947       VirtUseOps.insert(VirtUseOps.begin(), i);
1948     else
1949       VirtUseOps.push_back(i);
1950 
1951     // A partial def causes problems because the same operand both reads and
1952     // writes the register. This rewriter is designed to rewrite uses and defs
1953     // separately, so a partial def would already have been rewritten to a
1954     // physreg by the time we get to processing defs.
1955     // Add an implicit use operand to model the partial def.
1956     if (MO.isDef() && MO.getSubReg() && MI.readsVirtualRegister(VirtReg) &&
1957         MI.findRegisterUseOperandIdx(VirtReg) == -1) {
1958       VirtUseOps.insert(VirtUseOps.begin(), MI.getNumOperands());
1959       MI.addOperand(MachineOperand::CreateReg(VirtReg,
1960                                               false,  // isDef
1961                                               true)); // isImplicit
1962       DEBUG(dbgs() << "Partial redef: " << MI);
1963     }
1964   }
1965 
1966   // Process all of the spilled uses and all non spilled reg references.
1967   SmallVector<int, 2> PotentialDeadStoreSlots;
1968   KilledMIRegs.clear();
1969   for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1970     unsigned i = VirtUseOps[j];
1971     unsigned VirtReg = MI.getOperand(i).getReg();
1972     assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1973            "Not a virtual register?");
1974 
1975     unsigned SubIdx = MI.getOperand(i).getSubReg();
1976     if (VRM->isAssignedReg(VirtReg)) {
1977       // This virtual register was assigned a physreg!
1978       unsigned Phys = VRM->getPhys(VirtReg);
1979       MRI->setPhysRegUsed(Phys);
1980       if (MI.getOperand(i).isDef())
1981         ReusedOperands.markClobbered(Phys);
1982       substitutePhysReg(MI.getOperand(i), Phys, *TRI);
1983       if (VRM->isImplicitlyDefined(VirtReg))
1984         // FIXME: Is this needed?
1985         BuildMI(*MBB, &MI, MI.getDebugLoc(),
1986                 TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
1987       continue;
1988     }
1989 
1990     // This virtual register is now known to be a spilled value.
1991     if (!MI.getOperand(i).isUse())
1992       continue;  // Handle defs in the loop below (handle use&def here though)
1993 
1994     bool AvoidReload = MI.getOperand(i).isUndef();
1995     // Check if it is defined by an implicit def. It should not be spilled.
1996     // Note, this is for correctness reason. e.g.
1997     // 8   %reg1024<def> = IMPLICIT_DEF
1998     // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1999     // The live range [12, 14) are not part of the r1024 live interval since
2000     // it's defined by an implicit def. It will not conflicts with live
2001     // interval of r1025. Now suppose both registers are spilled, you can
2002     // easily see a situation where both registers are reloaded before
2003     // the INSERT_SUBREG and both target registers that would overlap.
2004     bool DoReMat = VRM->isReMaterialized(VirtReg);
2005     int SSorRMId = DoReMat
2006       ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
2007     int ReuseSlot = SSorRMId;
2008 
2009     // Check to see if this stack slot is available.
2010     unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
2011 
2012     // If this is a sub-register use, make sure the reuse register is in the
2013     // right register class. For example, for x86 not all of the 32-bit
2014     // registers have accessible sub-registers.
2015     // Similarly so for EXTRACT_SUBREG. Consider this:
2016     // EDI = op
2017     // MOV32_mr fi#1, EDI
2018     // ...
2019     //       = EXTRACT_SUBREG fi#1
2020     // fi#1 is available in EDI, but it cannot be reused because it's not in
2021     // the right register file.
2022     if (PhysReg && !AvoidReload && SubIdx) {
2023       const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
2024       if (!RC->contains(PhysReg))
2025         PhysReg = 0;
2026     }
2027 
2028     if (PhysReg && !AvoidReload) {
2029       // This spilled operand might be part of a two-address operand.  If this
2030       // is the case, then changing it will necessarily require changing the
2031       // def part of the instruction as well.  However, in some cases, we
2032       // aren't allowed to modify the reused register.  If none of these cases
2033       // apply, reuse it.
2034       bool CanReuse = true;
2035       bool isTied = MI.isRegTiedToDefOperand(i);
2036       if (isTied) {
2037         // Okay, we have a two address operand.  We can reuse this physreg as
2038         // long as we are allowed to clobber the value and there isn't an
2039         // earlier def that has already clobbered the physreg.
2040         CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
2041           Spills.canClobberPhysReg(PhysReg);
2042       }
2043       // If this is an asm, and a PhysReg alias is used elsewhere as an
2044       // earlyclobber operand, we can't also use it as an input.
2045       if (MI.isInlineAsm()) {
2046         for (unsigned k = 0, e = MI.getNumOperands(); k != e; ++k) {
2047           MachineOperand &MOk = MI.getOperand(k);
2048           if (MOk.isReg() && MOk.isEarlyClobber() &&
2049               TRI->regsOverlap(MOk.getReg(), PhysReg)) {
2050             CanReuse = false;
2051             DEBUG(dbgs() << "Not reusing physreg " << TRI->getName(PhysReg)
2052                          << " for " << PrintReg(VirtReg) << ": " << MOk
2053                          << '\n');
2054             break;
2055           }
2056         }
2057       }
2058 
2059       if (CanReuse) {
2060         // If this stack slot value is already available, reuse it!
2061         if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
2062           DEBUG(dbgs() << "Reusing RM#"
2063                 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
2064         else
2065           DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
2066         DEBUG(dbgs() << " from physreg "
2067               << TRI->getName(PhysReg) << " for " << PrintReg(VirtReg)
2068               << " instead of reloading into "
2069               << PrintReg(VRM->getPhys(VirtReg), TRI) << '\n');
2070         unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2071         MI.getOperand(i).setReg(RReg);
2072         MI.getOperand(i).setSubReg(0);
2073 
2074         // Reusing a physreg may resurrect it. But we expect ProcessUses to
2075         // update the kill flags for the current instr after processing it.
2076 
2077         // The only technical detail we have is that we don't know that
2078         // PhysReg won't be clobbered by a reloaded stack slot that occurs
2079         // later in the instruction.  In particular, consider 'op V1, V2'.
2080         // If V1 is available in physreg R0, we would choose to reuse it
2081         // here, instead of reloading it into the register the allocator
2082         // indicated (say R1).  However, V2 might have to be reloaded
2083         // later, and it might indicate that it needs to live in R0.  When
2084         // this occurs, we need to have information available that
2085         // indicates it is safe to use R1 for the reload instead of R0.
2086         //
2087         // To further complicate matters, we might conflict with an alias,
2088         // or R0 and R1 might not be compatible with each other.  In this
2089         // case, we actually insert a reload for V1 in R1, ensuring that
2090         // we can get at R0 or its alias.
2091         ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
2092                                 VRM->getPhys(VirtReg), VirtReg);
2093         if (isTied)
2094           // Only mark it clobbered if this is a use&def operand.
2095           ReusedOperands.markClobbered(PhysReg);
2096         ++NumReused;
2097 
2098         if (MI.getOperand(i).isKill() &&
2099             ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
2100 
2101           // The store of this spilled value is potentially dead, but we
2102           // won't know for certain until we've confirmed that the re-use
2103           // above is valid, which means waiting until the other operands
2104           // are processed. For now we just track the spill slot, we'll
2105           // remove it after the other operands are processed if valid.
2106 
2107           PotentialDeadStoreSlots.push_back(ReuseSlot);
2108         }
2109 
2110         // Mark is isKill if it's there no other uses of the same virtual
2111         // register and it's not a two-address operand. IsKill will be
2112         // unset if reg is reused.
2113         if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
2114           MI.getOperand(i).setIsKill();
2115           KilledMIRegs.insert(VirtReg);
2116         }
2117         continue;
2118       }  // CanReuse
2119 
2120       // Otherwise we have a situation where we have a two-address instruction
2121       // whose mod/ref operand needs to be reloaded.  This reload is already
2122       // available in some register "PhysReg", but if we used PhysReg as the
2123       // operand to our 2-addr instruction, the instruction would modify
2124       // PhysReg.  This isn't cool if something later uses PhysReg and expects
2125       // to get its initial value.
2126       //
2127       // To avoid this problem, and to avoid doing a load right after a store,
2128       // we emit a copy from PhysReg into the designated register for this
2129       // operand.
2130       //
2131       // This case also applies to an earlyclobber'd PhysReg.
2132       unsigned DesignatedReg = VRM->getPhys(VirtReg);
2133       assert(DesignatedReg && "Must map virtreg to physreg!");
2134 
2135       // Note that, if we reused a register for a previous operand, the
2136       // register we want to reload into might not actually be
2137       // available.  If this occurs, use the register indicated by the
2138       // reuser.
2139       if (ReusedOperands.hasReuses())
2140         DesignatedReg = ReusedOperands.
2141           GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
2142                           MaybeDeadStores, RegKills, KillOps, *VRM);
2143 
2144       // If the mapped designated register is actually the physreg we have
2145       // incoming, we don't need to inserted a dead copy.
2146       if (DesignatedReg == PhysReg) {
2147         // If this stack slot value is already available, reuse it!
2148         if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
2149           DEBUG(dbgs() << "Reusing RM#"
2150                 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
2151         else
2152           DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
2153         DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
2154               << " for " << PrintReg(VirtReg)
2155               << " instead of reloading into same physreg.\n");
2156         unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2157         MI.getOperand(i).setReg(RReg);
2158         MI.getOperand(i).setSubReg(0);
2159         ReusedOperands.markClobbered(RReg);
2160         ++NumReused;
2161         continue;
2162       }
2163 
2164       MRI->setPhysRegUsed(DesignatedReg);
2165       ReusedOperands.markClobbered(DesignatedReg);
2166 
2167       // Back-schedule reloads and remats.
2168       MachineBasicBlock::iterator InsertLoc =
2169         ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
2170                          SSorRMId, TII, *MBB->getParent());
2171       MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
2172                                      TII->get(TargetOpcode::COPY),
2173                                      DesignatedReg).addReg(PhysReg);
2174       CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
2175       UpdateKills(*CopyMI, TRI, RegKills, KillOps);
2176 
2177       // This invalidates DesignatedReg.
2178       Spills.ClobberPhysReg(DesignatedReg);
2179 
2180       Spills.addAvailable(ReuseSlot, DesignatedReg);
2181       unsigned RReg =
2182         SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
2183       MI.getOperand(i).setReg(RReg);
2184       MI.getOperand(i).setSubReg(0);
2185       DEBUG(dbgs() << '\t' << *prior(InsertLoc));
2186       ++NumReused;
2187       continue;
2188     } // if (PhysReg)
2189 
2190     // Otherwise, reload it and remember that we have it.
2191     PhysReg = VRM->getPhys(VirtReg);
2192     assert(PhysReg && "Must map virtreg to physreg!");
2193 
2194     // Note that, if we reused a register for a previous operand, the
2195     // register we want to reload into might not actually be
2196     // available.  If this occurs, use the register indicated by the
2197     // reuser.
2198     if (ReusedOperands.hasReuses())
2199       PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2200                   Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
2201 
2202     MRI->setPhysRegUsed(PhysReg);
2203     ReusedOperands.markClobbered(PhysReg);
2204     if (AvoidReload)
2205       ++NumAvoided;
2206     else {
2207       // Back-schedule reloads and remats.
2208       MachineBasicBlock::iterator InsertLoc =
2209         ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI, DoReMat,
2210                          SSorRMId, TII, *MBB->getParent());
2211 
2212       if (DoReMat) {
2213         ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
2214       } else {
2215         const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
2216         TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC,TRI);
2217         MachineInstr *LoadMI = prior(InsertLoc);
2218         VRM->addSpillSlotUse(SSorRMId, LoadMI);
2219         ++NumLoads;
2220         DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
2221       }
2222       // This invalidates PhysReg.
2223       Spills.ClobberPhysReg(PhysReg);
2224 
2225       // Any stores to this stack slot are not dead anymore.
2226       if (!DoReMat)
2227         MaybeDeadStores[SSorRMId] = NULL;
2228       Spills.addAvailable(SSorRMId, PhysReg);
2229       // Assumes this is the last use. IsKill will be unset if reg is reused
2230       // unless it's a two-address operand.
2231       if (!MI.isRegTiedToDefOperand(i) &&
2232           KilledMIRegs.count(VirtReg) == 0) {
2233         MI.getOperand(i).setIsKill();
2234         KilledMIRegs.insert(VirtReg);
2235       }
2236 
2237       UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
2238       DEBUG(dbgs() << '\t' << *prior(InsertLoc));
2239     }
2240     unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2241     MI.getOperand(i).setReg(RReg);
2242     MI.getOperand(i).setSubReg(0);
2243   }
2244 
2245   // Ok - now we can remove stores that have been confirmed dead.
2246   for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
2247     // This was the last use and the spilled value is still available
2248     // for reuse. That means the spill was unnecessary!
2249     int PDSSlot = PotentialDeadStoreSlots[j];
2250     MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
2251     if (DeadStore) {
2252       DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
2253       InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2254       EraseInstr(DeadStore);
2255       MaybeDeadStores[PDSSlot] = NULL;
2256       ++NumDSE;
2257     }
2258   }
2259 }
2260 
2261 /// rewriteMBB - Keep track of which spills are available even after the
2262 /// register allocator is done with them.  If possible, avoid reloading vregs.
2263 void
RewriteMBB(LiveIntervals * LIs,AvailableSpills & Spills,BitVector & RegKills,std::vector<MachineOperand * > & KillOps)2264 LocalRewriter::RewriteMBB(LiveIntervals *LIs,
2265                           AvailableSpills &Spills, BitVector &RegKills,
2266                           std::vector<MachineOperand*> &KillOps) {
2267 
2268   DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
2269                << MBB->getName() << "':\n");
2270 
2271   MachineFunction &MF = *MBB->getParent();
2272 
2273   // MaybeDeadStores - When we need to write a value back into a stack slot,
2274   // keep track of the inserted store.  If the stack slot value is never read
2275   // (because the value was used from some available register, for example), and
2276   // subsequently stored to, the original store is dead.  This map keeps track
2277   // of inserted stores that are not used.  If we see a subsequent store to the
2278   // same stack slot, the original store is deleted.
2279   std::vector<MachineInstr*> MaybeDeadStores;
2280   MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
2281 
2282   // ReMatDefs - These are rematerializable def MIs which are not deleted.
2283   SmallSet<MachineInstr*, 4> ReMatDefs;
2284 
2285   // Keep track of the registers we have already spilled in case there are
2286   // multiple defs of the same register in MI.
2287   SmallSet<unsigned, 8> SpilledMIRegs;
2288 
2289   RegKills.reset();
2290   KillOps.clear();
2291   KillOps.resize(TRI->getNumRegs(), NULL);
2292 
2293   DistanceMap.clear();
2294   for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
2295        MII != E; ) {
2296     MachineBasicBlock::iterator NextMII = llvm::next(MII);
2297 
2298     if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
2299       NextMII = llvm::next(MII);
2300 
2301     if (InsertEmergencySpills(MII))
2302       NextMII = llvm::next(MII);
2303 
2304     InsertRestores(MII, Spills, RegKills, KillOps);
2305 
2306     if (InsertSpills(MII))
2307       NextMII = llvm::next(MII);
2308 
2309     bool Erased = false;
2310     bool BackTracked = false;
2311     MachineInstr &MI = *MII;
2312 
2313     // Remember DbgValue's which reference stack slots.
2314     if (MI.isDebugValue() && MI.getOperand(0).isFI())
2315       Slot2DbgValues[MI.getOperand(0).getIndex()].push_back(&MI);
2316 
2317     /// ReusedOperands - Keep track of operand reuse in case we need to undo
2318     /// reuse.
2319     ReuseInfo ReusedOperands(MI, TRI);
2320 
2321     ProcessUses(MI, Spills, MaybeDeadStores, RegKills, ReusedOperands, KillOps);
2322 
2323     DEBUG(dbgs() << '\t' << MI);
2324 
2325 
2326     // If we have folded references to memory operands, make sure we clear all
2327     // physical registers that may contain the value of the spilled virtual
2328     // register
2329 
2330     // Copy the folded virts to a small vector, we may change MI2VirtMap.
2331     SmallVector<std::pair<unsigned, VirtRegMap::ModRef>, 4> FoldedVirts;
2332     // C++0x FTW!
2333     for (std::pair<VirtRegMap::MI2VirtMapTy::const_iterator,
2334                    VirtRegMap::MI2VirtMapTy::const_iterator> FVRange =
2335            VRM->getFoldedVirts(&MI);
2336          FVRange.first != FVRange.second; ++FVRange.first)
2337       FoldedVirts.push_back(FVRange.first->second);
2338 
2339     SmallSet<int, 2> FoldedSS;
2340     for (unsigned FVI = 0, FVE = FoldedVirts.size(); FVI != FVE; ++FVI) {
2341       unsigned VirtReg = FoldedVirts[FVI].first;
2342       VirtRegMap::ModRef MR = FoldedVirts[FVI].second;
2343       DEBUG(dbgs() << "Folded " << PrintReg(VirtReg) << "  MR: " << MR);
2344 
2345       int SS = VRM->getStackSlot(VirtReg);
2346       if (SS == VirtRegMap::NO_STACK_SLOT)
2347         continue;
2348       FoldedSS.insert(SS);
2349       DEBUG(dbgs() << " - StackSlot: " << SS << "\n");
2350 
2351       // If this folded instruction is just a use, check to see if it's a
2352       // straight load from the virt reg slot.
2353       if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
2354         int FrameIdx;
2355         unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
2356         if (DestReg && FrameIdx == SS) {
2357           // If this spill slot is available, turn it into a copy (or nothing)
2358           // instead of leaving it as a load!
2359           if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
2360             DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
2361             if (DestReg != InReg) {
2362               MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
2363               MachineInstr *CopyMI = BuildMI(*MBB, &MI, MI.getDebugLoc(),
2364                                              TII->get(TargetOpcode::COPY))
2365                 .addReg(DestReg, RegState::Define, DefMO->getSubReg())
2366                 .addReg(InReg, RegState::Kill);
2367               // Revisit the copy so we make sure to notice the effects of the
2368               // operation on the destreg (either needing to RA it if it's
2369               // virtual or needing to clobber any values if it's physical).
2370               NextMII = CopyMI;
2371               NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
2372               BackTracked = true;
2373             } else {
2374               DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2375               // InvalidateKills resurrects any prior kill of the copy's source
2376               // allowing the source reg to be reused in place of the copy.
2377               Spills.disallowClobberPhysReg(InReg);
2378             }
2379 
2380             InvalidateKills(MI, TRI, RegKills, KillOps);
2381             EraseInstr(&MI);
2382             Erased = true;
2383             goto ProcessNextInst;
2384           }
2385         } else {
2386           unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2387           SmallVector<MachineInstr*, 4> NewMIs;
2388           if (PhysReg &&
2389               TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)){
2390             MBB->insert(MII, NewMIs[0]);
2391             InvalidateKills(MI, TRI, RegKills, KillOps);
2392             EraseInstr(&MI);
2393             Erased = true;
2394             --NextMII;  // backtrack to the unfolded instruction.
2395             BackTracked = true;
2396             goto ProcessNextInst;
2397           }
2398         }
2399       }
2400 
2401       // If this reference is not a use, any previous store is now dead.
2402       // Otherwise, the store to this stack slot is not dead anymore.
2403       MachineInstr* DeadStore = MaybeDeadStores[SS];
2404       if (DeadStore) {
2405         bool isDead = !(MR & VirtRegMap::isRef);
2406         MachineInstr *NewStore = NULL;
2407         if (MR & VirtRegMap::isModRef) {
2408           unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2409           SmallVector<MachineInstr*, 4> NewMIs;
2410           // We can reuse this physreg as long as we are allowed to clobber
2411           // the value and there isn't an earlier def that has already clobbered
2412           // the physreg.
2413           if (PhysReg &&
2414               !ReusedOperands.isClobbered(PhysReg) &&
2415               Spills.canClobberPhysReg(PhysReg) &&
2416               !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
2417             MachineOperand *KillOpnd =
2418               DeadStore->findRegisterUseOperand(PhysReg, true);
2419             // Note, if the store is storing a sub-register, it's possible the
2420             // super-register is needed below.
2421             if (KillOpnd && !KillOpnd->getSubReg() &&
2422                 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
2423               MBB->insert(MII, NewMIs[0]);
2424               NewStore = NewMIs[1];
2425               MBB->insert(MII, NewStore);
2426               VRM->addSpillSlotUse(SS, NewStore);
2427               InvalidateKills(MI, TRI, RegKills, KillOps);
2428               EraseInstr(&MI);
2429               Erased = true;
2430               --NextMII;
2431               --NextMII;  // backtrack to the unfolded instruction.
2432               BackTracked = true;
2433               isDead = true;
2434               ++NumSUnfold;
2435             }
2436           }
2437         }
2438 
2439         if (isDead) {  // Previous store is dead.
2440           // If we get here, the store is dead, nuke it now.
2441           DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
2442           InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
2443           EraseInstr(DeadStore);
2444           if (!NewStore)
2445             ++NumDSE;
2446         }
2447 
2448         MaybeDeadStores[SS] = NULL;
2449         if (NewStore) {
2450           // Treat this store as a spill merged into a copy. That makes the
2451           // stack slot value available.
2452           VRM->virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
2453           goto ProcessNextInst;
2454         }
2455       }
2456 
2457       // If the spill slot value is available, and this is a new definition of
2458       // the value, the value is not available anymore.
2459       if (MR & VirtRegMap::isMod) {
2460         // Notice that the value in this stack slot has been modified.
2461         Spills.ModifyStackSlotOrReMat(SS);
2462 
2463         // If this is *just* a mod of the value, check to see if this is just a
2464         // store to the spill slot (i.e. the spill got merged into the copy). If
2465         // so, realize that the vreg is available now, and add the store to the
2466         // MaybeDeadStore info.
2467         int StackSlot;
2468         if (!(MR & VirtRegMap::isRef)) {
2469           if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
2470             assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
2471                    "Src hasn't been allocated yet?");
2472 
2473             if (CommuteToFoldReload(MII, VirtReg, SrcReg, StackSlot,
2474                                     Spills, RegKills, KillOps, TRI)) {
2475               NextMII = llvm::next(MII);
2476               BackTracked = true;
2477               goto ProcessNextInst;
2478             }
2479 
2480             // Okay, this is certainly a store of SrcReg to [StackSlot].  Mark
2481             // this as a potentially dead store in case there is a subsequent
2482             // store into the stack slot without a read from it.
2483             MaybeDeadStores[StackSlot] = &MI;
2484 
2485             // If the stack slot value was previously available in some other
2486             // register, change it now.  Otherwise, make the register
2487             // available in PhysReg.
2488             Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
2489           }
2490         }
2491       }
2492     }
2493 
2494     // Process all of the spilled defs.
2495     SpilledMIRegs.clear();
2496     for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2497       MachineOperand &MO = MI.getOperand(i);
2498       if (!(MO.isReg() && MO.getReg() && MO.isDef()))
2499         continue;
2500 
2501       unsigned VirtReg = MO.getReg();
2502       if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
2503         // Check to see if this is a noop copy.  If so, eliminate the
2504         // instruction before considering the dest reg to be changed.
2505         // Also check if it's copying from an "undef", if so, we can't
2506         // eliminate this or else the undef marker is lost and it will
2507         // confuses the scavenger. This is extremely rare.
2508         if (MI.isIdentityCopy() && !MI.getOperand(1).isUndef() &&
2509             MI.getNumOperands() == 2) {
2510           ++NumDCE;
2511           DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2512           SmallVector<unsigned, 2> KillRegs;
2513           InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
2514           if (MO.isDead() && !KillRegs.empty()) {
2515             // Source register or an implicit super/sub-register use is killed.
2516             assert(TRI->regsOverlap(KillRegs[0], MI.getOperand(0).getReg()));
2517             // Last def is now dead.
2518             TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
2519           }
2520           EraseInstr(&MI);
2521           Erased = true;
2522           Spills.disallowClobberPhysReg(VirtReg);
2523           goto ProcessNextInst;
2524         }
2525 
2526         // If it's not a no-op copy, it clobbers the value in the destreg.
2527         Spills.ClobberPhysReg(VirtReg);
2528         ReusedOperands.markClobbered(VirtReg);
2529 
2530         // Check to see if this instruction is a load from a stack slot into
2531         // a register.  If so, this provides the stack slot value in the reg.
2532         int FrameIdx;
2533         if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
2534           assert(DestReg == VirtReg && "Unknown load situation!");
2535 
2536           // If it is a folded reference, then it's not safe to clobber.
2537           bool Folded = FoldedSS.count(FrameIdx);
2538           // Otherwise, if it wasn't available, remember that it is now!
2539           Spills.addAvailable(FrameIdx, DestReg, !Folded);
2540           goto ProcessNextInst;
2541         }
2542 
2543         continue;
2544       }
2545 
2546       unsigned SubIdx = MO.getSubReg();
2547       bool DoReMat = VRM->isReMaterialized(VirtReg);
2548       if (DoReMat)
2549         ReMatDefs.insert(&MI);
2550 
2551       // The only vregs left are stack slot definitions.
2552       int StackSlot = VRM->getStackSlot(VirtReg);
2553       const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
2554 
2555       // If this def is part of a two-address operand, make sure to execute
2556       // the store from the correct physical register.
2557       unsigned PhysReg;
2558       unsigned TiedOp;
2559       if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
2560         PhysReg = MI.getOperand(TiedOp).getReg();
2561         if (SubIdx) {
2562           unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
2563           assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
2564                  "Can't find corresponding super-register!");
2565           PhysReg = SuperReg;
2566         }
2567       } else {
2568         PhysReg = VRM->getPhys(VirtReg);
2569         if (ReusedOperands.isClobbered(PhysReg)) {
2570           // Another def has taken the assigned physreg. It must have been a
2571           // use&def which got it due to reuse. Undo the reuse!
2572           PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2573                       Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
2574         }
2575       }
2576 
2577       // If StackSlot is available in a register that also holds other stack
2578       // slots, clobber those stack slots now.
2579       Spills.ClobberSharingStackSlots(StackSlot);
2580 
2581       assert(PhysReg && "VR not assigned a physical register?");
2582       MRI->setPhysRegUsed(PhysReg);
2583       unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2584       ReusedOperands.markClobbered(RReg);
2585       MI.getOperand(i).setReg(RReg);
2586       MI.getOperand(i).setSubReg(0);
2587 
2588       if (!MO.isDead() && SpilledMIRegs.insert(VirtReg)) {
2589         MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
2590         SpillRegToStackSlot(MII, -1, PhysReg, StackSlot, RC, true,
2591           LastStore, Spills, ReMatDefs, RegKills, KillOps);
2592         NextMII = llvm::next(MII);
2593 
2594         // Check to see if this is a noop copy.  If so, eliminate the
2595         // instruction before considering the dest reg to be changed.
2596         if (MI.isIdentityCopy()) {
2597           ++NumDCE;
2598           DEBUG(dbgs() << "Removing now-noop copy: " << MI);
2599           InvalidateKills(MI, TRI, RegKills, KillOps);
2600           EraseInstr(&MI);
2601           Erased = true;
2602           UpdateKills(*LastStore, TRI, RegKills, KillOps);
2603           goto ProcessNextInst;
2604         }
2605       }
2606     }
2607     ProcessNextInst:
2608     // Delete dead instructions without side effects.
2609     if (!Erased && !BackTracked && isSafeToDelete(MI)) {
2610       InvalidateKills(MI, TRI, RegKills, KillOps);
2611       EraseInstr(&MI);
2612       Erased = true;
2613     }
2614     if (!Erased)
2615       DistanceMap.insert(std::make_pair(&MI, DistanceMap.size()));
2616     if (!Erased && !BackTracked) {
2617       for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
2618         UpdateKills(*II, TRI, RegKills, KillOps);
2619     }
2620     MII = NextMII;
2621   }
2622 
2623 }
2624 
createVirtRegRewriter()2625 llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
2626   switch (RewriterOpt) {
2627   default: llvm_unreachable("Unreachable!");
2628   case local:
2629     return new LocalRewriter();
2630   case trivial:
2631     return new TrivialRewriter();
2632   }
2633 }
2634