1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/CalcSpillWeights.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/CodeGen/ProcessImplicitDefs.h"
33 #include "llvm/Target/TargetRegisterInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallSet.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/STLExtras.h"
45 #include <algorithm>
46 #include <limits>
47 #include <cmath>
48 using namespace llvm;
49
50 // Hidden options for help debugging.
51 static cl::opt<bool> DisableReMat("disable-rematerialization",
52 cl::init(false), cl::Hidden);
53
54 STATISTIC(numIntervals , "Number of original intervals");
55 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
56 STATISTIC(numSplits , "Number of intervals split");
57
58 char LiveIntervals::ID = 0;
59 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60 "Live Interval Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(LiveVariables)61 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69 "Live Interval Analysis", false, false)
70
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.setPreservesCFG();
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreserved<LiveVariables>();
77 AU.addRequired<MachineLoopInfo>();
78 AU.addPreserved<MachineLoopInfo>();
79 AU.addPreservedID(MachineDominatorsID);
80
81 if (!StrongPHIElim) {
82 AU.addPreservedID(PHIEliminationID);
83 AU.addRequiredID(PHIEliminationID);
84 }
85
86 AU.addRequiredID(TwoAddressInstructionPassID);
87 AU.addPreserved<ProcessImplicitDefs>();
88 AU.addRequired<ProcessImplicitDefs>();
89 AU.addPreserved<SlotIndexes>();
90 AU.addRequiredTransitive<SlotIndexes>();
91 MachineFunctionPass::getAnalysisUsage(AU);
92 }
93
releaseMemory()94 void LiveIntervals::releaseMemory() {
95 // Free the live intervals themselves.
96 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97 E = r2iMap_.end(); I != E; ++I)
98 delete I->second;
99
100 r2iMap_.clear();
101
102 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
106 CloneMIs.pop_back();
107 mf_->DeleteMachineInstr(MI);
108 }
109 }
110
111 /// runOnMachineFunction - Register allocate the whole function
112 ///
runOnMachineFunction(MachineFunction & fn)113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mf_ = &fn;
115 mri_ = &mf_->getRegInfo();
116 tm_ = &fn.getTarget();
117 tri_ = tm_->getRegisterInfo();
118 tii_ = tm_->getInstrInfo();
119 aa_ = &getAnalysis<AliasAnalysis>();
120 lv_ = &getAnalysis<LiveVariables>();
121 indexes_ = &getAnalysis<SlotIndexes>();
122 allocatableRegs_ = tri_->getAllocatableSet(fn);
123
124 computeIntervals();
125
126 numIntervals += getNumIntervals();
127
128 DEBUG(dump());
129 return true;
130 }
131
132 /// print - Implement the dump method.
print(raw_ostream & OS,const Module *) const133 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
137 OS << "\n";
138 }
139
140 printInstrs(OS);
141 }
142
printInstrs(raw_ostream & OS) const143 void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145 mf_->print(OS, indexes_);
146 }
147
dumpInstrs() const148 void LiveIntervals::dumpInstrs() const {
149 printInstrs(dbgs());
150 }
151
conflictsWithPhysReg(const LiveInterval & li,VirtRegMap & vrm,unsigned reg)152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
156 return true;
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
166 }
167 if (!firstMI)
168 return false;
169
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
176 }
177 if (!lastMI)
178 return false;
179
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183 return true;
184
185 MachineBasicBlock::const_iterator E = lastMI;
186 ++E;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
189
190 // Allow copies to and from li.reg
191 if (MI.isCopy())
192 if (MI.getOperand(0).getReg() == li.reg ||
193 MI.getOperand(1).getReg() == li.reg)
194 continue;
195
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
199 if (!mop.isReg())
200 continue;
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
203 continue;
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
206 continue;
207 PhysReg = vrm.getPhys(PhysReg);
208 }
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210 return true;
211 }
212 }
213
214 // No conflicts found.
215 return false;
216 }
217
conflictsWithAliasRef(LiveInterval & li,unsigned Reg,SmallPtrSet<MachineInstr *,32> & JoinedCopies)218 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220 for (LiveInterval::Ranges::const_iterator
221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222 for (SlotIndex index = I->start.getBaseIndex(),
223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224 index != end;
225 index = index.getNextIndex()) {
226 MachineInstr *MI = getInstructionFromIndex(index);
227 if (!MI)
228 continue; // skip deleted instructions
229
230 if (JoinedCopies.count(MI))
231 continue;
232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233 MachineOperand& MO = MI->getOperand(i);
234 if (!MO.isReg())
235 continue;
236 unsigned PhysReg = MO.getReg();
237 if (PhysReg == 0 || PhysReg == Reg ||
238 TargetRegisterInfo::isVirtualRegister(PhysReg))
239 continue;
240 if (tri_->regsOverlap(Reg, PhysReg))
241 return true;
242 }
243 }
244 }
245
246 return false;
247 }
248
249 static
MultipleDefsBySameMI(const MachineInstr & MI,unsigned MOIdx)250 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251 unsigned Reg = MI.getOperand(MOIdx).getReg();
252 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253 const MachineOperand &MO = MI.getOperand(i);
254 if (!MO.isReg())
255 continue;
256 if (MO.getReg() == Reg && MO.isDef()) {
257 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258 MI.getOperand(MOIdx).getSubReg() &&
259 (MO.getSubReg() || MO.isImplicit()));
260 return true;
261 }
262 }
263 return false;
264 }
265
266 /// isPartialRedef - Return true if the specified def at the specific index is
267 /// partially re-defining the specified live interval. A common case of this is
268 /// a definition of the sub-register.
isPartialRedef(SlotIndex MIIdx,MachineOperand & MO,LiveInterval & interval)269 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270 LiveInterval &interval) {
271 if (!MO.getSubReg() || MO.isEarlyClobber())
272 return false;
273
274 SlotIndex RedefIndex = MIIdx.getDefIndex();
275 const LiveRange *OldLR =
276 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278 if (DefMI != 0) {
279 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
280 }
281 return false;
282 }
283
handleVirtualRegisterDef(MachineBasicBlock * mbb,MachineBasicBlock::iterator mi,SlotIndex MIIdx,MachineOperand & MO,unsigned MOIdx,LiveInterval & interval)284 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285 MachineBasicBlock::iterator mi,
286 SlotIndex MIIdx,
287 MachineOperand& MO,
288 unsigned MOIdx,
289 LiveInterval &interval) {
290 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291
292 // Virtual registers may be defined multiple times (due to phi
293 // elimination and 2-addr elimination). Much of what we do only has to be
294 // done once for the vreg. We use an empty interval to detect the first
295 // time we see a vreg.
296 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297 if (interval.empty()) {
298 // Get the Idx of the defining instructions.
299 SlotIndex defIndex = MIIdx.getDefIndex();
300 // Earlyclobbers move back one, so that they overlap the live range
301 // of inputs.
302 if (MO.isEarlyClobber())
303 defIndex = MIIdx.getUseIndex();
304
305 // Make sure the first definition is not a partial redefinition. Add an
306 // <imp-def> of the full register.
307 // FIXME: LiveIntervals shouldn't modify the code like this. Whoever
308 // created the machine instruction should annotate it with <undef> flags
309 // as needed. Then we can simply assert here. The REG_SEQUENCE lowering
310 // is the main suspect.
311 if (MO.getSubReg()) {
312 mi->addRegisterDefined(interval.reg);
313 // Mark all defs of interval.reg on this instruction as reading <undef>.
314 for (unsigned i = MOIdx, e = mi->getNumOperands(); i != e; ++i) {
315 MachineOperand &MO2 = mi->getOperand(i);
316 if (MO2.isReg() && MO2.getReg() == interval.reg && MO2.getSubReg())
317 MO2.setIsUndef();
318 }
319 }
320
321 MachineInstr *CopyMI = NULL;
322 if (mi->isCopyLike()) {
323 CopyMI = mi;
324 }
325
326 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
327 assert(ValNo->id == 0 && "First value in interval is not 0?");
328
329 // Loop over all of the blocks that the vreg is defined in. There are
330 // two cases we have to handle here. The most common case is a vreg
331 // whose lifetime is contained within a basic block. In this case there
332 // will be a single kill, in MBB, which comes after the definition.
333 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
334 // FIXME: what about dead vars?
335 SlotIndex killIdx;
336 if (vi.Kills[0] != mi)
337 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
338 else
339 killIdx = defIndex.getStoreIndex();
340
341 // If the kill happens after the definition, we have an intra-block
342 // live range.
343 if (killIdx > defIndex) {
344 assert(vi.AliveBlocks.empty() &&
345 "Shouldn't be alive across any blocks!");
346 LiveRange LR(defIndex, killIdx, ValNo);
347 interval.addRange(LR);
348 DEBUG(dbgs() << " +" << LR << "\n");
349 return;
350 }
351 }
352
353 // The other case we handle is when a virtual register lives to the end
354 // of the defining block, potentially live across some blocks, then is
355 // live into some number of blocks, but gets killed. Start by adding a
356 // range that goes from this definition to the end of the defining block.
357 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
358 DEBUG(dbgs() << " +" << NewLR);
359 interval.addRange(NewLR);
360
361 bool PHIJoin = lv_->isPHIJoin(interval.reg);
362
363 if (PHIJoin) {
364 // A phi join register is killed at the end of the MBB and revived as a new
365 // valno in the killing blocks.
366 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
367 DEBUG(dbgs() << " phi-join");
368 ValNo->setHasPHIKill(true);
369 } else {
370 // Iterate over all of the blocks that the variable is completely
371 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
372 // live interval.
373 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
374 E = vi.AliveBlocks.end(); I != E; ++I) {
375 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
376 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
377 interval.addRange(LR);
378 DEBUG(dbgs() << " +" << LR);
379 }
380 }
381
382 // Finally, this virtual register is live from the start of any killing
383 // block to the 'use' slot of the killing instruction.
384 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
385 MachineInstr *Kill = vi.Kills[i];
386 SlotIndex Start = getMBBStartIdx(Kill->getParent());
387 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
388
389 // Create interval with one of a NEW value number. Note that this value
390 // number isn't actually defined by an instruction, weird huh? :)
391 if (PHIJoin) {
392 assert(getInstructionFromIndex(Start) == 0 &&
393 "PHI def index points at actual instruction.");
394 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
395 ValNo->setIsPHIDef(true);
396 }
397 LiveRange LR(Start, killIdx, ValNo);
398 interval.addRange(LR);
399 DEBUG(dbgs() << " +" << LR);
400 }
401
402 } else {
403 if (MultipleDefsBySameMI(*mi, MOIdx))
404 // Multiple defs of the same virtual register by the same instruction.
405 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
406 // This is likely due to elimination of REG_SEQUENCE instructions. Return
407 // here since there is nothing to do.
408 return;
409
410 // If this is the second time we see a virtual register definition, it
411 // must be due to phi elimination or two addr elimination. If this is
412 // the result of two address elimination, then the vreg is one of the
413 // def-and-use register operand.
414
415 // It may also be partial redef like this:
416 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
417 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
418 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
419 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
420 // If this is a two-address definition, then we have already processed
421 // the live range. The only problem is that we didn't realize there
422 // are actually two values in the live interval. Because of this we
423 // need to take the LiveRegion that defines this register and split it
424 // into two values.
425 SlotIndex RedefIndex = MIIdx.getDefIndex();
426 if (MO.isEarlyClobber())
427 RedefIndex = MIIdx.getUseIndex();
428
429 const LiveRange *OldLR =
430 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
431 VNInfo *OldValNo = OldLR->valno;
432 SlotIndex DefIndex = OldValNo->def.getDefIndex();
433
434 // Delete the previous value, which should be short and continuous,
435 // because the 2-addr copy must be in the same MBB as the redef.
436 interval.removeRange(DefIndex, RedefIndex);
437
438 // The new value number (#1) is defined by the instruction we claimed
439 // defined value #0.
440 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
441
442 // Value#0 is now defined by the 2-addr instruction.
443 OldValNo->def = RedefIndex;
444 OldValNo->setCopy(0);
445
446 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
447 if (PartReDef && mi->isCopyLike())
448 OldValNo->setCopy(&*mi);
449
450 // Add the new live interval which replaces the range for the input copy.
451 LiveRange LR(DefIndex, RedefIndex, ValNo);
452 DEBUG(dbgs() << " replace range with " << LR);
453 interval.addRange(LR);
454
455 // If this redefinition is dead, we need to add a dummy unit live
456 // range covering the def slot.
457 if (MO.isDead())
458 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
459 OldValNo));
460
461 DEBUG({
462 dbgs() << " RESULT: ";
463 interval.print(dbgs(), tri_);
464 });
465 } else if (lv_->isPHIJoin(interval.reg)) {
466 // In the case of PHI elimination, each variable definition is only
467 // live until the end of the block. We've already taken care of the
468 // rest of the live range.
469
470 SlotIndex defIndex = MIIdx.getDefIndex();
471 if (MO.isEarlyClobber())
472 defIndex = MIIdx.getUseIndex();
473
474 VNInfo *ValNo;
475 MachineInstr *CopyMI = NULL;
476 if (mi->isCopyLike())
477 CopyMI = mi;
478 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
479
480 SlotIndex killIndex = getMBBEndIdx(mbb);
481 LiveRange LR(defIndex, killIndex, ValNo);
482 interval.addRange(LR);
483 ValNo->setHasPHIKill(true);
484 DEBUG(dbgs() << " phi-join +" << LR);
485 } else {
486 llvm_unreachable("Multiply defined register");
487 }
488 }
489
490 DEBUG(dbgs() << '\n');
491 }
492
handlePhysicalRegisterDef(MachineBasicBlock * MBB,MachineBasicBlock::iterator mi,SlotIndex MIIdx,MachineOperand & MO,LiveInterval & interval,MachineInstr * CopyMI)493 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
494 MachineBasicBlock::iterator mi,
495 SlotIndex MIIdx,
496 MachineOperand& MO,
497 LiveInterval &interval,
498 MachineInstr *CopyMI) {
499 // A physical register cannot be live across basic block, so its
500 // lifetime must end somewhere in its defining basic block.
501 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
502
503 SlotIndex baseIndex = MIIdx;
504 SlotIndex start = baseIndex.getDefIndex();
505 // Earlyclobbers move back one.
506 if (MO.isEarlyClobber())
507 start = MIIdx.getUseIndex();
508 SlotIndex end = start;
509
510 // If it is not used after definition, it is considered dead at
511 // the instruction defining it. Hence its interval is:
512 // [defSlot(def), defSlot(def)+1)
513 // For earlyclobbers, the defSlot was pushed back one; the extra
514 // advance below compensates.
515 if (MO.isDead()) {
516 DEBUG(dbgs() << " dead");
517 end = start.getStoreIndex();
518 goto exit;
519 }
520
521 // If it is not dead on definition, it must be killed by a
522 // subsequent instruction. Hence its interval is:
523 // [defSlot(def), useSlot(kill)+1)
524 baseIndex = baseIndex.getNextIndex();
525 while (++mi != MBB->end()) {
526
527 if (mi->isDebugValue())
528 continue;
529 if (getInstructionFromIndex(baseIndex) == 0)
530 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
531
532 if (mi->killsRegister(interval.reg, tri_)) {
533 DEBUG(dbgs() << " killed");
534 end = baseIndex.getDefIndex();
535 goto exit;
536 } else {
537 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
538 if (DefIdx != -1) {
539 if (mi->isRegTiedToUseOperand(DefIdx)) {
540 // Two-address instruction.
541 end = baseIndex.getDefIndex();
542 } else {
543 // Another instruction redefines the register before it is ever read.
544 // Then the register is essentially dead at the instruction that
545 // defines it. Hence its interval is:
546 // [defSlot(def), defSlot(def)+1)
547 DEBUG(dbgs() << " dead");
548 end = start.getStoreIndex();
549 }
550 goto exit;
551 }
552 }
553
554 baseIndex = baseIndex.getNextIndex();
555 }
556
557 // The only case we should have a dead physreg here without a killing or
558 // instruction where we know it's dead is if it is live-in to the function
559 // and never used. Another possible case is the implicit use of the
560 // physical register has been deleted by two-address pass.
561 end = start.getStoreIndex();
562
563 exit:
564 assert(start < end && "did not find end of interval?");
565
566 // Already exists? Extend old live interval.
567 VNInfo *ValNo = interval.getVNInfoAt(start);
568 bool Extend = ValNo != 0;
569 if (!Extend)
570 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
571 if (Extend && MO.isEarlyClobber())
572 ValNo->setHasRedefByEC(true);
573 LiveRange LR(start, end, ValNo);
574 interval.addRange(LR);
575 DEBUG(dbgs() << " +" << LR << '\n');
576 }
577
handleRegisterDef(MachineBasicBlock * MBB,MachineBasicBlock::iterator MI,SlotIndex MIIdx,MachineOperand & MO,unsigned MOIdx)578 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
579 MachineBasicBlock::iterator MI,
580 SlotIndex MIIdx,
581 MachineOperand& MO,
582 unsigned MOIdx) {
583 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
584 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
585 getOrCreateInterval(MO.getReg()));
586 else {
587 MachineInstr *CopyMI = NULL;
588 if (MI->isCopyLike())
589 CopyMI = MI;
590 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
591 getOrCreateInterval(MO.getReg()), CopyMI);
592 }
593 }
594
handleLiveInRegister(MachineBasicBlock * MBB,SlotIndex MIIdx,LiveInterval & interval,bool isAlias)595 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
596 SlotIndex MIIdx,
597 LiveInterval &interval, bool isAlias) {
598 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
599
600 // Look for kills, if it reaches a def before it's killed, then it shouldn't
601 // be considered a livein.
602 MachineBasicBlock::iterator mi = MBB->begin();
603 MachineBasicBlock::iterator E = MBB->end();
604 // Skip over DBG_VALUE at the start of the MBB.
605 if (mi != E && mi->isDebugValue()) {
606 while (++mi != E && mi->isDebugValue())
607 ;
608 if (mi == E)
609 // MBB is empty except for DBG_VALUE's.
610 return;
611 }
612
613 SlotIndex baseIndex = MIIdx;
614 SlotIndex start = baseIndex;
615 if (getInstructionFromIndex(baseIndex) == 0)
616 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
617
618 SlotIndex end = baseIndex;
619 bool SeenDefUse = false;
620
621 while (mi != E) {
622 if (mi->killsRegister(interval.reg, tri_)) {
623 DEBUG(dbgs() << " killed");
624 end = baseIndex.getDefIndex();
625 SeenDefUse = true;
626 break;
627 } else if (mi->definesRegister(interval.reg, tri_)) {
628 // Another instruction redefines the register before it is ever read.
629 // Then the register is essentially dead at the instruction that defines
630 // it. Hence its interval is:
631 // [defSlot(def), defSlot(def)+1)
632 DEBUG(dbgs() << " dead");
633 end = start.getStoreIndex();
634 SeenDefUse = true;
635 break;
636 }
637
638 while (++mi != E && mi->isDebugValue())
639 // Skip over DBG_VALUE.
640 ;
641 if (mi != E)
642 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
643 }
644
645 // Live-in register might not be used at all.
646 if (!SeenDefUse) {
647 if (isAlias) {
648 DEBUG(dbgs() << " dead");
649 end = MIIdx.getStoreIndex();
650 } else {
651 DEBUG(dbgs() << " live through");
652 end = getMBBEndIdx(MBB);
653 }
654 }
655
656 SlotIndex defIdx = getMBBStartIdx(MBB);
657 assert(getInstructionFromIndex(defIdx) == 0 &&
658 "PHI def index points at actual instruction.");
659 VNInfo *vni =
660 interval.getNextValue(defIdx, 0, VNInfoAllocator);
661 vni->setIsPHIDef(true);
662 LiveRange LR(start, end, vni);
663
664 interval.addRange(LR);
665 DEBUG(dbgs() << " +" << LR << '\n');
666 }
667
668 /// computeIntervals - computes the live intervals for virtual
669 /// registers. for some ordering of the machine instructions [1,N] a
670 /// live interval is an interval [i, j) where 1 <= i <= j < N for
671 /// which a variable is live
computeIntervals()672 void LiveIntervals::computeIntervals() {
673 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
674 << "********** Function: "
675 << ((Value*)mf_->getFunction())->getName() << '\n');
676
677 SmallVector<unsigned, 8> UndefUses;
678 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
679 MBBI != E; ++MBBI) {
680 MachineBasicBlock *MBB = MBBI;
681 if (MBB->empty())
682 continue;
683
684 // Track the index of the current machine instr.
685 SlotIndex MIIndex = getMBBStartIdx(MBB);
686 DEBUG(dbgs() << "BB#" << MBB->getNumber()
687 << ":\t\t# derived from " << MBB->getName() << "\n");
688
689 // Create intervals for live-ins to this BB first.
690 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
691 LE = MBB->livein_end(); LI != LE; ++LI) {
692 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
693 // Multiple live-ins can alias the same register.
694 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
695 if (!hasInterval(*AS))
696 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
697 true);
698 }
699
700 // Skip over empty initial indices.
701 if (getInstructionFromIndex(MIIndex) == 0)
702 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
703
704 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
705 MI != miEnd; ++MI) {
706 DEBUG(dbgs() << MIIndex << "\t" << *MI);
707 if (MI->isDebugValue())
708 continue;
709
710 // Handle defs.
711 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
712 MachineOperand &MO = MI->getOperand(i);
713 if (!MO.isReg() || !MO.getReg())
714 continue;
715
716 // handle register defs - build intervals
717 if (MO.isDef())
718 handleRegisterDef(MBB, MI, MIIndex, MO, i);
719 else if (MO.isUndef())
720 UndefUses.push_back(MO.getReg());
721 }
722
723 // Move to the next instr slot.
724 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
725 }
726 }
727
728 // Create empty intervals for registers defined by implicit_def's (except
729 // for those implicit_def that define values which are liveout of their
730 // blocks.
731 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
732 unsigned UndefReg = UndefUses[i];
733 (void)getOrCreateInterval(UndefReg);
734 }
735 }
736
createInterval(unsigned reg)737 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
738 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
739 return new LiveInterval(reg, Weight);
740 }
741
742 /// dupInterval - Duplicate a live interval. The caller is responsible for
743 /// managing the allocated memory.
dupInterval(LiveInterval * li)744 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
745 LiveInterval *NewLI = createInterval(li->reg);
746 NewLI->Copy(*li, mri_, getVNInfoAllocator());
747 return NewLI;
748 }
749
750 /// shrinkToUses - After removing some uses of a register, shrink its live
751 /// range to just the remaining uses. This method does not compute reaching
752 /// defs for new uses, and it doesn't remove dead defs.
shrinkToUses(LiveInterval * li,SmallVectorImpl<MachineInstr * > * dead)753 bool LiveIntervals::shrinkToUses(LiveInterval *li,
754 SmallVectorImpl<MachineInstr*> *dead) {
755 DEBUG(dbgs() << "Shrink: " << *li << '\n');
756 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
757 && "Can't only shrink physical registers");
758 // Find all the values used, including PHI kills.
759 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
760
761 // Blocks that have already been added to WorkList as live-out.
762 SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
763
764 // Visit all instructions reading li->reg.
765 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
766 MachineInstr *UseMI = I.skipInstruction();) {
767 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
768 continue;
769 SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
770 VNInfo *VNI = li->getVNInfoAt(Idx);
771 if (!VNI) {
772 // This shouldn't happen: readsVirtualRegister returns true, but there is
773 // no live value. It is likely caused by a target getting <undef> flags
774 // wrong.
775 DEBUG(dbgs() << Idx << '\t' << *UseMI
776 << "Warning: Instr claims to read non-existent value in "
777 << *li << '\n');
778 continue;
779 }
780 if (VNI->def == Idx) {
781 // Special case: An early-clobber tied operand reads and writes the
782 // register one slot early.
783 Idx = Idx.getPrevSlot();
784 VNI = li->getVNInfoAt(Idx);
785 assert(VNI && "Early-clobber tied value not available");
786 }
787 WorkList.push_back(std::make_pair(Idx, VNI));
788 }
789
790 // Create a new live interval with only minimal live segments per def.
791 LiveInterval NewLI(li->reg, 0);
792 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
793 I != E; ++I) {
794 VNInfo *VNI = *I;
795 if (VNI->isUnused())
796 continue;
797 NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
798
799 // A use tied to an early-clobber def ends at the load slot and isn't caught
800 // above. Catch it here instead. This probably only ever happens for inline
801 // assembly.
802 if (VNI->def.isUse())
803 if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
804 WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
805 }
806
807 // Keep track of the PHIs that are in use.
808 SmallPtrSet<VNInfo*, 8> UsedPHIs;
809
810 // Extend intervals to reach all uses in WorkList.
811 while (!WorkList.empty()) {
812 SlotIndex Idx = WorkList.back().first;
813 VNInfo *VNI = WorkList.back().second;
814 WorkList.pop_back();
815 const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
816 SlotIndex BlockStart = getMBBStartIdx(MBB);
817
818 // Extend the live range for VNI to be live at Idx.
819 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx.getNextSlot())) {
820 (void)ExtVNI;
821 assert(ExtVNI == VNI && "Unexpected existing value number");
822 // Is this a PHIDef we haven't seen before?
823 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
824 continue;
825 // The PHI is live, make sure the predecessors are live-out.
826 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
827 PE = MBB->pred_end(); PI != PE; ++PI) {
828 if (!LiveOut.insert(*PI))
829 continue;
830 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
831 // A predecessor is not required to have a live-out value for a PHI.
832 if (VNInfo *PVNI = li->getVNInfoAt(Stop))
833 WorkList.push_back(std::make_pair(Stop, PVNI));
834 }
835 continue;
836 }
837
838 // VNI is live-in to MBB.
839 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
840 NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
841
842 // Make sure VNI is live-out from the predecessors.
843 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
844 PE = MBB->pred_end(); PI != PE; ++PI) {
845 if (!LiveOut.insert(*PI))
846 continue;
847 SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
848 assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
849 WorkList.push_back(std::make_pair(Stop, VNI));
850 }
851 }
852
853 // Handle dead values.
854 bool CanSeparate = false;
855 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
856 I != E; ++I) {
857 VNInfo *VNI = *I;
858 if (VNI->isUnused())
859 continue;
860 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
861 assert(LII != NewLI.end() && "Missing live range for PHI");
862 if (LII->end != VNI->def.getNextSlot())
863 continue;
864 if (VNI->isPHIDef()) {
865 // This is a dead PHI. Remove it.
866 VNI->setIsUnused(true);
867 NewLI.removeRange(*LII);
868 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
869 CanSeparate = true;
870 } else {
871 // This is a dead def. Make sure the instruction knows.
872 MachineInstr *MI = getInstructionFromIndex(VNI->def);
873 assert(MI && "No instruction defining live value");
874 MI->addRegisterDead(li->reg, tri_);
875 if (dead && MI->allDefsAreDead()) {
876 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
877 dead->push_back(MI);
878 }
879 }
880 }
881
882 // Move the trimmed ranges back.
883 li->ranges.swap(NewLI.ranges);
884 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
885 return CanSeparate;
886 }
887
888
889 //===----------------------------------------------------------------------===//
890 // Register allocator hooks.
891 //
892
893 MachineBasicBlock::iterator
getLastSplitPoint(const LiveInterval & li,MachineBasicBlock * mbb) const894 LiveIntervals::getLastSplitPoint(const LiveInterval &li,
895 MachineBasicBlock *mbb) const {
896 const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
897
898 // If li is not live into a landing pad, we can insert spill code before the
899 // first terminator.
900 if (!lpad || !isLiveInToMBB(li, lpad))
901 return mbb->getFirstTerminator();
902
903 // When there is a landing pad, spill code must go before the call instruction
904 // that can throw.
905 MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
906 while (I != B) {
907 --I;
908 if (I->getDesc().isCall())
909 return I;
910 }
911 // The block contains no calls that can throw, so use the first terminator.
912 return mbb->getFirstTerminator();
913 }
914
addKillFlags()915 void LiveIntervals::addKillFlags() {
916 for (iterator I = begin(), E = end(); I != E; ++I) {
917 unsigned Reg = I->first;
918 if (TargetRegisterInfo::isPhysicalRegister(Reg))
919 continue;
920 if (mri_->reg_nodbg_empty(Reg))
921 continue;
922 LiveInterval *LI = I->second;
923
924 // Every instruction that kills Reg corresponds to a live range end point.
925 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
926 ++RI) {
927 // A LOAD index indicates an MBB edge.
928 if (RI->end.isLoad())
929 continue;
930 MachineInstr *MI = getInstructionFromIndex(RI->end);
931 if (!MI)
932 continue;
933 MI->addRegisterKilled(Reg, NULL);
934 }
935 }
936 }
937
938 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
939 /// allow one) virtual register operand, then its uses are implicitly using
940 /// the register. Returns the virtual register.
getReMatImplicitUse(const LiveInterval & li,MachineInstr * MI) const941 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
942 MachineInstr *MI) const {
943 unsigned RegOp = 0;
944 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
945 MachineOperand &MO = MI->getOperand(i);
946 if (!MO.isReg() || !MO.isUse())
947 continue;
948 unsigned Reg = MO.getReg();
949 if (Reg == 0 || Reg == li.reg)
950 continue;
951
952 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
953 !allocatableRegs_[Reg])
954 continue;
955 // FIXME: For now, only remat MI with at most one register operand.
956 assert(!RegOp &&
957 "Can't rematerialize instruction with multiple register operand!");
958 RegOp = MO.getReg();
959 #ifndef NDEBUG
960 break;
961 #endif
962 }
963 return RegOp;
964 }
965
966 /// isValNoAvailableAt - Return true if the val# of the specified interval
967 /// which reaches the given instruction also reaches the specified use index.
isValNoAvailableAt(const LiveInterval & li,MachineInstr * MI,SlotIndex UseIdx) const968 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
969 SlotIndex UseIdx) const {
970 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
971 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
972 }
973
974 /// isReMaterializable - Returns true if the definition MI of the specified
975 /// val# of the specified interval is re-materializable.
976 bool
isReMaterializable(const LiveInterval & li,const VNInfo * ValNo,MachineInstr * MI,const SmallVectorImpl<LiveInterval * > * SpillIs,bool & isLoad)977 LiveIntervals::isReMaterializable(const LiveInterval &li,
978 const VNInfo *ValNo, MachineInstr *MI,
979 const SmallVectorImpl<LiveInterval*> *SpillIs,
980 bool &isLoad) {
981 if (DisableReMat)
982 return false;
983
984 if (!tii_->isTriviallyReMaterializable(MI, aa_))
985 return false;
986
987 // Target-specific code can mark an instruction as being rematerializable
988 // if it has one virtual reg use, though it had better be something like
989 // a PIC base register which is likely to be live everywhere.
990 unsigned ImpUse = getReMatImplicitUse(li, MI);
991 if (ImpUse) {
992 const LiveInterval &ImpLi = getInterval(ImpUse);
993 for (MachineRegisterInfo::use_nodbg_iterator
994 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
995 ri != re; ++ri) {
996 MachineInstr *UseMI = &*ri;
997 SlotIndex UseIdx = getInstructionIndex(UseMI);
998 if (li.getVNInfoAt(UseIdx) != ValNo)
999 continue;
1000 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1001 return false;
1002 }
1003
1004 // If a register operand of the re-materialized instruction is going to
1005 // be spilled next, then it's not legal to re-materialize this instruction.
1006 if (SpillIs)
1007 for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
1008 if (ImpUse == (*SpillIs)[i]->reg)
1009 return false;
1010 }
1011 return true;
1012 }
1013
1014 /// isReMaterializable - Returns true if the definition MI of the specified
1015 /// val# of the specified interval is re-materializable.
isReMaterializable(const LiveInterval & li,const VNInfo * ValNo,MachineInstr * MI)1016 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1017 const VNInfo *ValNo, MachineInstr *MI) {
1018 bool Dummy2;
1019 return isReMaterializable(li, ValNo, MI, 0, Dummy2);
1020 }
1021
1022 /// isReMaterializable - Returns true if every definition of MI of every
1023 /// val# of the specified interval is re-materializable.
1024 bool
isReMaterializable(const LiveInterval & li,const SmallVectorImpl<LiveInterval * > * SpillIs,bool & isLoad)1025 LiveIntervals::isReMaterializable(const LiveInterval &li,
1026 const SmallVectorImpl<LiveInterval*> *SpillIs,
1027 bool &isLoad) {
1028 isLoad = false;
1029 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1030 i != e; ++i) {
1031 const VNInfo *VNI = *i;
1032 if (VNI->isUnused())
1033 continue; // Dead val#.
1034 // Is the def for the val# rematerializable?
1035 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1036 if (!ReMatDefMI)
1037 return false;
1038 bool DefIsLoad = false;
1039 if (!ReMatDefMI ||
1040 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1041 return false;
1042 isLoad |= DefIsLoad;
1043 }
1044 return true;
1045 }
1046
1047 /// FilterFoldedOps - Filter out two-address use operands. Return
1048 /// true if it finds any issue with the operands that ought to prevent
1049 /// folding.
FilterFoldedOps(MachineInstr * MI,SmallVector<unsigned,2> & Ops,unsigned & MRInfo,SmallVector<unsigned,2> & FoldOps)1050 static bool FilterFoldedOps(MachineInstr *MI,
1051 SmallVector<unsigned, 2> &Ops,
1052 unsigned &MRInfo,
1053 SmallVector<unsigned, 2> &FoldOps) {
1054 MRInfo = 0;
1055 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1056 unsigned OpIdx = Ops[i];
1057 MachineOperand &MO = MI->getOperand(OpIdx);
1058 // FIXME: fold subreg use.
1059 if (MO.getSubReg())
1060 return true;
1061 if (MO.isDef())
1062 MRInfo |= (unsigned)VirtRegMap::isMod;
1063 else {
1064 // Filter out two-address use operand(s).
1065 if (MI->isRegTiedToDefOperand(OpIdx)) {
1066 MRInfo = VirtRegMap::isModRef;
1067 continue;
1068 }
1069 MRInfo |= (unsigned)VirtRegMap::isRef;
1070 }
1071 FoldOps.push_back(OpIdx);
1072 }
1073 return false;
1074 }
1075
1076
1077 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1078 /// slot / to reg or any rematerialized load into ith operand of specified
1079 /// MI. If it is successul, MI is updated with the newly created MI and
1080 /// returns true.
tryFoldMemoryOperand(MachineInstr * & MI,VirtRegMap & vrm,MachineInstr * DefMI,SlotIndex InstrIdx,SmallVector<unsigned,2> & Ops,bool isSS,int Slot,unsigned Reg)1081 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1082 VirtRegMap &vrm, MachineInstr *DefMI,
1083 SlotIndex InstrIdx,
1084 SmallVector<unsigned, 2> &Ops,
1085 bool isSS, int Slot, unsigned Reg) {
1086 // If it is an implicit def instruction, just delete it.
1087 if (MI->isImplicitDef()) {
1088 RemoveMachineInstrFromMaps(MI);
1089 vrm.RemoveMachineInstrFromMaps(MI);
1090 MI->eraseFromParent();
1091 ++numFolds;
1092 return true;
1093 }
1094
1095 // Filter the list of operand indexes that are to be folded. Abort if
1096 // any operand will prevent folding.
1097 unsigned MRInfo = 0;
1098 SmallVector<unsigned, 2> FoldOps;
1099 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1100 return false;
1101
1102 // The only time it's safe to fold into a two address instruction is when
1103 // it's folding reload and spill from / into a spill stack slot.
1104 if (DefMI && (MRInfo & VirtRegMap::isMod))
1105 return false;
1106
1107 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1108 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1109 if (fmi) {
1110 // Remember this instruction uses the spill slot.
1111 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1112
1113 // Attempt to fold the memory reference into the instruction. If
1114 // we can do this, we don't need to insert spill code.
1115 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1116 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1117 vrm.transferSpillPts(MI, fmi);
1118 vrm.transferRestorePts(MI, fmi);
1119 vrm.transferEmergencySpills(MI, fmi);
1120 ReplaceMachineInstrInMaps(MI, fmi);
1121 MI->eraseFromParent();
1122 MI = fmi;
1123 ++numFolds;
1124 return true;
1125 }
1126 return false;
1127 }
1128
1129 /// canFoldMemoryOperand - Returns true if the specified load / store
1130 /// folding is possible.
canFoldMemoryOperand(MachineInstr * MI,SmallVector<unsigned,2> & Ops,bool ReMat) const1131 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1132 SmallVector<unsigned, 2> &Ops,
1133 bool ReMat) const {
1134 // Filter the list of operand indexes that are to be folded. Abort if
1135 // any operand will prevent folding.
1136 unsigned MRInfo = 0;
1137 SmallVector<unsigned, 2> FoldOps;
1138 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1139 return false;
1140
1141 // It's only legal to remat for a use, not a def.
1142 if (ReMat && (MRInfo & VirtRegMap::isMod))
1143 return false;
1144
1145 return tii_->canFoldMemoryOperand(MI, FoldOps);
1146 }
1147
intervalIsInOneMBB(const LiveInterval & li) const1148 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1149 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1150
1151 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1152
1153 if (mbb == 0)
1154 return false;
1155
1156 for (++itr; itr != li.ranges.end(); ++itr) {
1157 MachineBasicBlock *mbb2 =
1158 indexes_->getMBBCoveringRange(itr->start, itr->end);
1159
1160 if (mbb2 != mbb)
1161 return false;
1162 }
1163
1164 return true;
1165 }
1166
1167 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1168 /// interval on to-be re-materialized operands of MI) with new register.
rewriteImplicitOps(const LiveInterval & li,MachineInstr * MI,unsigned NewVReg,VirtRegMap & vrm)1169 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1170 MachineInstr *MI, unsigned NewVReg,
1171 VirtRegMap &vrm) {
1172 // There is an implicit use. That means one of the other operand is
1173 // being remat'ed and the remat'ed instruction has li.reg as an
1174 // use operand. Make sure we rewrite that as well.
1175 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1176 MachineOperand &MO = MI->getOperand(i);
1177 if (!MO.isReg())
1178 continue;
1179 unsigned Reg = MO.getReg();
1180 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1181 continue;
1182 if (!vrm.isReMaterialized(Reg))
1183 continue;
1184 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1185 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1186 if (UseMO)
1187 UseMO->setReg(NewVReg);
1188 }
1189 }
1190
1191 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1192 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1193 bool LiveIntervals::
rewriteInstructionForSpills(const LiveInterval & li,const VNInfo * VNI,bool TrySplit,SlotIndex index,SlotIndex end,MachineInstr * MI,MachineInstr * ReMatOrigDefMI,MachineInstr * ReMatDefMI,unsigned Slot,int LdSlot,bool isLoad,bool isLoadSS,bool DefIsReMat,bool CanDelete,VirtRegMap & vrm,const TargetRegisterClass * rc,SmallVector<int,4> & ReMatIds,const MachineLoopInfo * loopInfo,unsigned & NewVReg,unsigned ImpUse,bool & HasDef,bool & HasUse,DenseMap<unsigned,unsigned> & MBBVRegsMap,std::vector<LiveInterval * > & NewLIs)1194 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1195 bool TrySplit, SlotIndex index, SlotIndex end,
1196 MachineInstr *MI,
1197 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1198 unsigned Slot, int LdSlot,
1199 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1200 VirtRegMap &vrm,
1201 const TargetRegisterClass* rc,
1202 SmallVector<int, 4> &ReMatIds,
1203 const MachineLoopInfo *loopInfo,
1204 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1205 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1206 std::vector<LiveInterval*> &NewLIs) {
1207 bool CanFold = false;
1208 RestartInstruction:
1209 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1210 MachineOperand& mop = MI->getOperand(i);
1211 if (!mop.isReg())
1212 continue;
1213 unsigned Reg = mop.getReg();
1214 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1215 continue;
1216 if (Reg != li.reg)
1217 continue;
1218
1219 bool TryFold = !DefIsReMat;
1220 bool FoldSS = true; // Default behavior unless it's a remat.
1221 int FoldSlot = Slot;
1222 if (DefIsReMat) {
1223 // If this is the rematerializable definition MI itself and
1224 // all of its uses are rematerialized, simply delete it.
1225 if (MI == ReMatOrigDefMI && CanDelete) {
1226 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1227 << *MI << '\n');
1228 RemoveMachineInstrFromMaps(MI);
1229 vrm.RemoveMachineInstrFromMaps(MI);
1230 MI->eraseFromParent();
1231 break;
1232 }
1233
1234 // If def for this use can't be rematerialized, then try folding.
1235 // If def is rematerializable and it's a load, also try folding.
1236 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1237 if (isLoad) {
1238 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1239 FoldSS = isLoadSS;
1240 FoldSlot = LdSlot;
1241 }
1242 }
1243
1244 // Scan all of the operands of this instruction rewriting operands
1245 // to use NewVReg instead of li.reg as appropriate. We do this for
1246 // two reasons:
1247 //
1248 // 1. If the instr reads the same spilled vreg multiple times, we
1249 // want to reuse the NewVReg.
1250 // 2. If the instr is a two-addr instruction, we are required to
1251 // keep the src/dst regs pinned.
1252 //
1253 // Keep track of whether we replace a use and/or def so that we can
1254 // create the spill interval with the appropriate range.
1255 SmallVector<unsigned, 2> Ops;
1256 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1257
1258 // Create a new virtual register for the spill interval.
1259 // Create the new register now so we can map the fold instruction
1260 // to the new register so when it is unfolded we get the correct
1261 // answer.
1262 bool CreatedNewVReg = false;
1263 if (NewVReg == 0) {
1264 NewVReg = mri_->createVirtualRegister(rc);
1265 vrm.grow();
1266 CreatedNewVReg = true;
1267
1268 // The new virtual register should get the same allocation hints as the
1269 // old one.
1270 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1271 if (Hint.first || Hint.second)
1272 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1273 }
1274
1275 if (!TryFold)
1276 CanFold = false;
1277 else {
1278 // Do not fold load / store here if we are splitting. We'll find an
1279 // optimal point to insert a load / store later.
1280 if (!TrySplit) {
1281 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1282 Ops, FoldSS, FoldSlot, NewVReg)) {
1283 // Folding the load/store can completely change the instruction in
1284 // unpredictable ways, rescan it from the beginning.
1285
1286 if (FoldSS) {
1287 // We need to give the new vreg the same stack slot as the
1288 // spilled interval.
1289 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1290 }
1291
1292 HasUse = false;
1293 HasDef = false;
1294 CanFold = false;
1295 if (isNotInMIMap(MI))
1296 break;
1297 goto RestartInstruction;
1298 }
1299 } else {
1300 // We'll try to fold it later if it's profitable.
1301 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1302 }
1303 }
1304
1305 mop.setReg(NewVReg);
1306 if (mop.isImplicit())
1307 rewriteImplicitOps(li, MI, NewVReg, vrm);
1308
1309 // Reuse NewVReg for other reads.
1310 bool HasEarlyClobber = false;
1311 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1312 MachineOperand &mopj = MI->getOperand(Ops[j]);
1313 mopj.setReg(NewVReg);
1314 if (mopj.isImplicit())
1315 rewriteImplicitOps(li, MI, NewVReg, vrm);
1316 if (mopj.isEarlyClobber())
1317 HasEarlyClobber = true;
1318 }
1319
1320 if (CreatedNewVReg) {
1321 if (DefIsReMat) {
1322 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1323 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1324 // Each valnum may have its own remat id.
1325 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1326 } else {
1327 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1328 }
1329 if (!CanDelete || (HasUse && HasDef)) {
1330 // If this is a two-addr instruction then its use operands are
1331 // rematerializable but its def is not. It should be assigned a
1332 // stack slot.
1333 vrm.assignVirt2StackSlot(NewVReg, Slot);
1334 }
1335 } else {
1336 vrm.assignVirt2StackSlot(NewVReg, Slot);
1337 }
1338 } else if (HasUse && HasDef &&
1339 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1340 // If this interval hasn't been assigned a stack slot (because earlier
1341 // def is a deleted remat def), do it now.
1342 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1343 vrm.assignVirt2StackSlot(NewVReg, Slot);
1344 }
1345
1346 // Re-matting an instruction with virtual register use. Add the
1347 // register as an implicit use on the use MI.
1348 if (DefIsReMat && ImpUse)
1349 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1350
1351 // Create a new register interval for this spill / remat.
1352 LiveInterval &nI = getOrCreateInterval(NewVReg);
1353 if (CreatedNewVReg) {
1354 NewLIs.push_back(&nI);
1355 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1356 if (TrySplit)
1357 vrm.setIsSplitFromReg(NewVReg, li.reg);
1358 }
1359
1360 if (HasUse) {
1361 if (CreatedNewVReg) {
1362 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1363 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1364 DEBUG(dbgs() << " +" << LR);
1365 nI.addRange(LR);
1366 } else {
1367 // Extend the split live interval to this def / use.
1368 SlotIndex End = index.getDefIndex();
1369 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1370 nI.getValNumInfo(nI.getNumValNums()-1));
1371 DEBUG(dbgs() << " +" << LR);
1372 nI.addRange(LR);
1373 }
1374 }
1375 if (HasDef) {
1376 // An early clobber starts at the use slot, except for an early clobber
1377 // tied to a use operand (yes, that is a thing).
1378 LiveRange LR(HasEarlyClobber && !HasUse ?
1379 index.getUseIndex() : index.getDefIndex(),
1380 index.getStoreIndex(),
1381 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1382 DEBUG(dbgs() << " +" << LR);
1383 nI.addRange(LR);
1384 }
1385
1386 DEBUG({
1387 dbgs() << "\t\t\t\tAdded new interval: ";
1388 nI.print(dbgs(), tri_);
1389 dbgs() << '\n';
1390 });
1391 }
1392 return CanFold;
1393 }
anyKillInMBBAfterIdx(const LiveInterval & li,const VNInfo * VNI,MachineBasicBlock * MBB,SlotIndex Idx) const1394 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1395 const VNInfo *VNI,
1396 MachineBasicBlock *MBB,
1397 SlotIndex Idx) const {
1398 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1399 }
1400
1401 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1402 /// during spilling.
1403 namespace {
1404 struct RewriteInfo {
1405 SlotIndex Index;
1406 MachineInstr *MI;
RewriteInfo__anon7cfa0f640111::RewriteInfo1407 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1408 };
1409
1410 struct RewriteInfoCompare {
operator ()__anon7cfa0f640111::RewriteInfoCompare1411 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1412 return LHS.Index < RHS.Index;
1413 }
1414 };
1415 }
1416
1417 void LiveIntervals::
rewriteInstructionsForSpills(const LiveInterval & li,bool TrySplit,LiveInterval::Ranges::const_iterator & I,MachineInstr * ReMatOrigDefMI,MachineInstr * ReMatDefMI,unsigned Slot,int LdSlot,bool isLoad,bool isLoadSS,bool DefIsReMat,bool CanDelete,VirtRegMap & vrm,const TargetRegisterClass * rc,SmallVector<int,4> & ReMatIds,const MachineLoopInfo * loopInfo,BitVector & SpillMBBs,DenseMap<unsigned,std::vector<SRInfo>> & SpillIdxes,BitVector & RestoreMBBs,DenseMap<unsigned,std::vector<SRInfo>> & RestoreIdxes,DenseMap<unsigned,unsigned> & MBBVRegsMap,std::vector<LiveInterval * > & NewLIs)1418 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1419 LiveInterval::Ranges::const_iterator &I,
1420 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1421 unsigned Slot, int LdSlot,
1422 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1423 VirtRegMap &vrm,
1424 const TargetRegisterClass* rc,
1425 SmallVector<int, 4> &ReMatIds,
1426 const MachineLoopInfo *loopInfo,
1427 BitVector &SpillMBBs,
1428 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1429 BitVector &RestoreMBBs,
1430 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1431 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1432 std::vector<LiveInterval*> &NewLIs) {
1433 bool AllCanFold = true;
1434 unsigned NewVReg = 0;
1435 SlotIndex start = I->start.getBaseIndex();
1436 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1437
1438 // First collect all the def / use in this live range that will be rewritten.
1439 // Make sure they are sorted according to instruction index.
1440 std::vector<RewriteInfo> RewriteMIs;
1441 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1442 re = mri_->reg_end(); ri != re; ) {
1443 MachineInstr *MI = &*ri;
1444 MachineOperand &O = ri.getOperand();
1445 ++ri;
1446 if (MI->isDebugValue()) {
1447 // Modify DBG_VALUE now that the value is in a spill slot.
1448 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1449 uint64_t Offset = MI->getOperand(1).getImm();
1450 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1451 DebugLoc DL = MI->getDebugLoc();
1452 int FI = isLoadSS ? LdSlot : (int)Slot;
1453 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1454 Offset, MDPtr, DL)) {
1455 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1456 ReplaceMachineInstrInMaps(MI, NewDV);
1457 MachineBasicBlock *MBB = MI->getParent();
1458 MBB->insert(MBB->erase(MI), NewDV);
1459 continue;
1460 }
1461 }
1462
1463 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1464 RemoveMachineInstrFromMaps(MI);
1465 vrm.RemoveMachineInstrFromMaps(MI);
1466 MI->eraseFromParent();
1467 continue;
1468 }
1469 assert(!(O.isImplicit() && O.isUse()) &&
1470 "Spilling register that's used as implicit use?");
1471 SlotIndex index = getInstructionIndex(MI);
1472 if (index < start || index >= end)
1473 continue;
1474
1475 if (O.isUndef())
1476 // Must be defined by an implicit def. It should not be spilled. Note,
1477 // this is for correctness reason. e.g.
1478 // 8 %reg1024<def> = IMPLICIT_DEF
1479 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1480 // The live range [12, 14) are not part of the r1024 live interval since
1481 // it's defined by an implicit def. It will not conflicts with live
1482 // interval of r1025. Now suppose both registers are spilled, you can
1483 // easily see a situation where both registers are reloaded before
1484 // the INSERT_SUBREG and both target registers that would overlap.
1485 continue;
1486 RewriteMIs.push_back(RewriteInfo(index, MI));
1487 }
1488 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1489
1490 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1491 // Now rewrite the defs and uses.
1492 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1493 RewriteInfo &rwi = RewriteMIs[i];
1494 ++i;
1495 SlotIndex index = rwi.Index;
1496 MachineInstr *MI = rwi.MI;
1497 // If MI def and/or use the same register multiple times, then there
1498 // are multiple entries.
1499 while (i != e && RewriteMIs[i].MI == MI) {
1500 assert(RewriteMIs[i].Index == index);
1501 ++i;
1502 }
1503 MachineBasicBlock *MBB = MI->getParent();
1504
1505 if (ImpUse && MI != ReMatDefMI) {
1506 // Re-matting an instruction with virtual register use. Prevent interval
1507 // from being spilled.
1508 getInterval(ImpUse).markNotSpillable();
1509 }
1510
1511 unsigned MBBId = MBB->getNumber();
1512 unsigned ThisVReg = 0;
1513 if (TrySplit) {
1514 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1515 if (NVI != MBBVRegsMap.end()) {
1516 ThisVReg = NVI->second;
1517 // One common case:
1518 // x = use
1519 // ...
1520 // ...
1521 // def = ...
1522 // = use
1523 // It's better to start a new interval to avoid artificially
1524 // extend the new interval.
1525 if (MI->readsWritesVirtualRegister(li.reg) ==
1526 std::make_pair(false,true)) {
1527 MBBVRegsMap.erase(MBB->getNumber());
1528 ThisVReg = 0;
1529 }
1530 }
1531 }
1532
1533 bool IsNew = ThisVReg == 0;
1534 if (IsNew) {
1535 // This ends the previous live interval. If all of its def / use
1536 // can be folded, give it a low spill weight.
1537 if (NewVReg && TrySplit && AllCanFold) {
1538 LiveInterval &nI = getOrCreateInterval(NewVReg);
1539 nI.weight /= 10.0F;
1540 }
1541 AllCanFold = true;
1542 }
1543 NewVReg = ThisVReg;
1544
1545 bool HasDef = false;
1546 bool HasUse = false;
1547 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1548 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1549 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1550 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1551 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1552 if (!HasDef && !HasUse)
1553 continue;
1554
1555 AllCanFold &= CanFold;
1556
1557 // Update weight of spill interval.
1558 LiveInterval &nI = getOrCreateInterval(NewVReg);
1559 if (!TrySplit) {
1560 // The spill weight is now infinity as it cannot be spilled again.
1561 nI.markNotSpillable();
1562 continue;
1563 }
1564
1565 // Keep track of the last def and first use in each MBB.
1566 if (HasDef) {
1567 if (MI != ReMatOrigDefMI || !CanDelete) {
1568 bool HasKill = false;
1569 if (!HasUse)
1570 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1571 else {
1572 // If this is a two-address code, then this index starts a new VNInfo.
1573 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1574 if (VNI)
1575 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1576 }
1577 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1578 SpillIdxes.find(MBBId);
1579 if (!HasKill) {
1580 if (SII == SpillIdxes.end()) {
1581 std::vector<SRInfo> S;
1582 S.push_back(SRInfo(index, NewVReg, true));
1583 SpillIdxes.insert(std::make_pair(MBBId, S));
1584 } else if (SII->second.back().vreg != NewVReg) {
1585 SII->second.push_back(SRInfo(index, NewVReg, true));
1586 } else if (index > SII->second.back().index) {
1587 // If there is an earlier def and this is a two-address
1588 // instruction, then it's not possible to fold the store (which
1589 // would also fold the load).
1590 SRInfo &Info = SII->second.back();
1591 Info.index = index;
1592 Info.canFold = !HasUse;
1593 }
1594 SpillMBBs.set(MBBId);
1595 } else if (SII != SpillIdxes.end() &&
1596 SII->second.back().vreg == NewVReg &&
1597 index > SII->second.back().index) {
1598 // There is an earlier def that's not killed (must be two-address).
1599 // The spill is no longer needed.
1600 SII->second.pop_back();
1601 if (SII->second.empty()) {
1602 SpillIdxes.erase(MBBId);
1603 SpillMBBs.reset(MBBId);
1604 }
1605 }
1606 }
1607 }
1608
1609 if (HasUse) {
1610 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1611 SpillIdxes.find(MBBId);
1612 if (SII != SpillIdxes.end() &&
1613 SII->second.back().vreg == NewVReg &&
1614 index > SII->second.back().index)
1615 // Use(s) following the last def, it's not safe to fold the spill.
1616 SII->second.back().canFold = false;
1617 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1618 RestoreIdxes.find(MBBId);
1619 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1620 // If we are splitting live intervals, only fold if it's the first
1621 // use and there isn't another use later in the MBB.
1622 RII->second.back().canFold = false;
1623 else if (IsNew) {
1624 // Only need a reload if there isn't an earlier def / use.
1625 if (RII == RestoreIdxes.end()) {
1626 std::vector<SRInfo> Infos;
1627 Infos.push_back(SRInfo(index, NewVReg, true));
1628 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1629 } else {
1630 RII->second.push_back(SRInfo(index, NewVReg, true));
1631 }
1632 RestoreMBBs.set(MBBId);
1633 }
1634 }
1635
1636 // Update spill weight.
1637 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1638 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1639 }
1640
1641 if (NewVReg && TrySplit && AllCanFold) {
1642 // If all of its def / use can be folded, give it a low spill weight.
1643 LiveInterval &nI = getOrCreateInterval(NewVReg);
1644 nI.weight /= 10.0F;
1645 }
1646 }
1647
alsoFoldARestore(int Id,SlotIndex index,unsigned vr,BitVector & RestoreMBBs,DenseMap<unsigned,std::vector<SRInfo>> & RestoreIdxes)1648 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1649 unsigned vr, BitVector &RestoreMBBs,
1650 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1651 if (!RestoreMBBs[Id])
1652 return false;
1653 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1654 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1655 if (Restores[i].index == index &&
1656 Restores[i].vreg == vr &&
1657 Restores[i].canFold)
1658 return true;
1659 return false;
1660 }
1661
eraseRestoreInfo(int Id,SlotIndex index,unsigned vr,BitVector & RestoreMBBs,DenseMap<unsigned,std::vector<SRInfo>> & RestoreIdxes)1662 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1663 unsigned vr, BitVector &RestoreMBBs,
1664 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1665 if (!RestoreMBBs[Id])
1666 return;
1667 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1668 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1669 if (Restores[i].index == index && Restores[i].vreg)
1670 Restores[i].index = SlotIndex();
1671 }
1672
1673 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1674 /// spilled and create empty intervals for their uses.
1675 void
handleSpilledImpDefs(const LiveInterval & li,VirtRegMap & vrm,const TargetRegisterClass * rc,std::vector<LiveInterval * > & NewLIs)1676 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1677 const TargetRegisterClass* rc,
1678 std::vector<LiveInterval*> &NewLIs) {
1679 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1680 re = mri_->reg_end(); ri != re; ) {
1681 MachineOperand &O = ri.getOperand();
1682 MachineInstr *MI = &*ri;
1683 ++ri;
1684 if (MI->isDebugValue()) {
1685 // Remove debug info for now.
1686 O.setReg(0U);
1687 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1688 continue;
1689 }
1690 if (O.isDef()) {
1691 assert(MI->isImplicitDef() &&
1692 "Register def was not rewritten?");
1693 RemoveMachineInstrFromMaps(MI);
1694 vrm.RemoveMachineInstrFromMaps(MI);
1695 MI->eraseFromParent();
1696 } else {
1697 // This must be an use of an implicit_def so it's not part of the live
1698 // interval. Create a new empty live interval for it.
1699 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1700 unsigned NewVReg = mri_->createVirtualRegister(rc);
1701 vrm.grow();
1702 vrm.setIsImplicitlyDefined(NewVReg);
1703 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1704 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1705 MachineOperand &MO = MI->getOperand(i);
1706 if (MO.isReg() && MO.getReg() == li.reg) {
1707 MO.setReg(NewVReg);
1708 MO.setIsUndef();
1709 }
1710 }
1711 }
1712 }
1713 }
1714
1715 float
getSpillWeight(bool isDef,bool isUse,unsigned loopDepth)1716 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1717 // Limit the loop depth ridiculousness.
1718 if (loopDepth > 200)
1719 loopDepth = 200;
1720
1721 // The loop depth is used to roughly estimate the number of times the
1722 // instruction is executed. Something like 10^d is simple, but will quickly
1723 // overflow a float. This expression behaves like 10^d for small d, but is
1724 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1725 // headroom before overflow.
1726 // By the way, powf() might be unavailable here. For consistency,
1727 // We may take pow(double,double).
1728 float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
1729
1730 return (isDef + isUse) * lc;
1731 }
1732
normalizeSpillWeights(std::vector<LiveInterval * > & NewLIs)1733 static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1734 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1735 NewLIs[i]->weight =
1736 normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1737 }
1738
1739 std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval & li,const SmallVectorImpl<LiveInterval * > * SpillIs,const MachineLoopInfo * loopInfo,VirtRegMap & vrm)1740 addIntervalsForSpills(const LiveInterval &li,
1741 const SmallVectorImpl<LiveInterval*> *SpillIs,
1742 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1743 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1744
1745 DEBUG({
1746 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1747 li.print(dbgs(), tri_);
1748 dbgs() << '\n';
1749 });
1750
1751 // Each bit specify whether a spill is required in the MBB.
1752 BitVector SpillMBBs(mf_->getNumBlockIDs());
1753 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1754 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1755 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1756 DenseMap<unsigned,unsigned> MBBVRegsMap;
1757 std::vector<LiveInterval*> NewLIs;
1758 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1759
1760 unsigned NumValNums = li.getNumValNums();
1761 SmallVector<MachineInstr*, 4> ReMatDefs;
1762 ReMatDefs.resize(NumValNums, NULL);
1763 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1764 ReMatOrigDefs.resize(NumValNums, NULL);
1765 SmallVector<int, 4> ReMatIds;
1766 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1767 BitVector ReMatDelete(NumValNums);
1768 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1769
1770 // Spilling a split live interval. It cannot be split any further. Also,
1771 // it's also guaranteed to be a single val# / range interval.
1772 if (vrm.getPreSplitReg(li.reg)) {
1773 vrm.setIsSplitFromReg(li.reg, 0);
1774 // Unset the split kill marker on the last use.
1775 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1776 if (KillIdx != SlotIndex()) {
1777 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1778 assert(KillMI && "Last use disappeared?");
1779 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1780 assert(KillOp != -1 && "Last use disappeared?");
1781 KillMI->getOperand(KillOp).setIsKill(false);
1782 }
1783 vrm.removeKillPoint(li.reg);
1784 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1785 Slot = vrm.getStackSlot(li.reg);
1786 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1787 MachineInstr *ReMatDefMI = DefIsReMat ?
1788 vrm.getReMaterializedMI(li.reg) : NULL;
1789 int LdSlot = 0;
1790 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1791 bool isLoad = isLoadSS ||
1792 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1793 bool IsFirstRange = true;
1794 for (LiveInterval::Ranges::const_iterator
1795 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1796 // If this is a split live interval with multiple ranges, it means there
1797 // are two-address instructions that re-defined the value. Only the
1798 // first def can be rematerialized!
1799 if (IsFirstRange) {
1800 // Note ReMatOrigDefMI has already been deleted.
1801 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1802 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1803 false, vrm, rc, ReMatIds, loopInfo,
1804 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1805 MBBVRegsMap, NewLIs);
1806 } else {
1807 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1808 Slot, 0, false, false, false,
1809 false, vrm, rc, ReMatIds, loopInfo,
1810 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1811 MBBVRegsMap, NewLIs);
1812 }
1813 IsFirstRange = false;
1814 }
1815
1816 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1817 normalizeSpillWeights(NewLIs);
1818 return NewLIs;
1819 }
1820
1821 bool TrySplit = !intervalIsInOneMBB(li);
1822 if (TrySplit)
1823 ++numSplits;
1824 bool NeedStackSlot = false;
1825 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1826 i != e; ++i) {
1827 const VNInfo *VNI = *i;
1828 unsigned VN = VNI->id;
1829 if (VNI->isUnused())
1830 continue; // Dead val#.
1831 // Is the def for the val# rematerializable?
1832 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1833 bool dummy;
1834 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1835 // Remember how to remat the def of this val#.
1836 ReMatOrigDefs[VN] = ReMatDefMI;
1837 // Original def may be modified so we have to make a copy here.
1838 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1839 CloneMIs.push_back(Clone);
1840 ReMatDefs[VN] = Clone;
1841
1842 bool CanDelete = true;
1843 if (VNI->hasPHIKill()) {
1844 // A kill is a phi node, not all of its uses can be rematerialized.
1845 // It must not be deleted.
1846 CanDelete = false;
1847 // Need a stack slot if there is any live range where uses cannot be
1848 // rematerialized.
1849 NeedStackSlot = true;
1850 }
1851 if (CanDelete)
1852 ReMatDelete.set(VN);
1853 } else {
1854 // Need a stack slot if there is any live range where uses cannot be
1855 // rematerialized.
1856 NeedStackSlot = true;
1857 }
1858 }
1859
1860 // One stack slot per live interval.
1861 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1862 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1863 Slot = vrm.assignVirt2StackSlot(li.reg);
1864
1865 // This case only occurs when the prealloc splitter has already assigned
1866 // a stack slot to this vreg.
1867 else
1868 Slot = vrm.getStackSlot(li.reg);
1869 }
1870
1871 // Create new intervals and rewrite defs and uses.
1872 for (LiveInterval::Ranges::const_iterator
1873 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1874 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1875 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1876 bool DefIsReMat = ReMatDefMI != NULL;
1877 bool CanDelete = ReMatDelete[I->valno->id];
1878 int LdSlot = 0;
1879 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1880 bool isLoad = isLoadSS ||
1881 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1882 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1883 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1884 CanDelete, vrm, rc, ReMatIds, loopInfo,
1885 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1886 MBBVRegsMap, NewLIs);
1887 }
1888
1889 // Insert spills / restores if we are splitting.
1890 if (!TrySplit) {
1891 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1892 normalizeSpillWeights(NewLIs);
1893 return NewLIs;
1894 }
1895
1896 SmallPtrSet<LiveInterval*, 4> AddedKill;
1897 SmallVector<unsigned, 2> Ops;
1898 if (NeedStackSlot) {
1899 int Id = SpillMBBs.find_first();
1900 while (Id != -1) {
1901 std::vector<SRInfo> &spills = SpillIdxes[Id];
1902 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1903 SlotIndex index = spills[i].index;
1904 unsigned VReg = spills[i].vreg;
1905 LiveInterval &nI = getOrCreateInterval(VReg);
1906 bool isReMat = vrm.isReMaterialized(VReg);
1907 MachineInstr *MI = getInstructionFromIndex(index);
1908 bool CanFold = false;
1909 bool FoundUse = false;
1910 Ops.clear();
1911 if (spills[i].canFold) {
1912 CanFold = true;
1913 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1914 MachineOperand &MO = MI->getOperand(j);
1915 if (!MO.isReg() || MO.getReg() != VReg)
1916 continue;
1917
1918 Ops.push_back(j);
1919 if (MO.isDef())
1920 continue;
1921 if (isReMat ||
1922 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1923 RestoreMBBs, RestoreIdxes))) {
1924 // MI has two-address uses of the same register. If the use
1925 // isn't the first and only use in the BB, then we can't fold
1926 // it. FIXME: Move this to rewriteInstructionsForSpills.
1927 CanFold = false;
1928 break;
1929 }
1930 FoundUse = true;
1931 }
1932 }
1933 // Fold the store into the def if possible.
1934 bool Folded = false;
1935 if (CanFold && !Ops.empty()) {
1936 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1937 Folded = true;
1938 if (FoundUse) {
1939 // Also folded uses, do not issue a load.
1940 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1941 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1942 }
1943 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1944 }
1945 }
1946
1947 // Otherwise tell the spiller to issue a spill.
1948 if (!Folded) {
1949 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1950 bool isKill = LR->end == index.getStoreIndex();
1951 if (!MI->registerDefIsDead(nI.reg))
1952 // No need to spill a dead def.
1953 vrm.addSpillPoint(VReg, isKill, MI);
1954 if (isKill)
1955 AddedKill.insert(&nI);
1956 }
1957 }
1958 Id = SpillMBBs.find_next(Id);
1959 }
1960 }
1961
1962 int Id = RestoreMBBs.find_first();
1963 while (Id != -1) {
1964 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1965 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1966 SlotIndex index = restores[i].index;
1967 if (index == SlotIndex())
1968 continue;
1969 unsigned VReg = restores[i].vreg;
1970 LiveInterval &nI = getOrCreateInterval(VReg);
1971 bool isReMat = vrm.isReMaterialized(VReg);
1972 MachineInstr *MI = getInstructionFromIndex(index);
1973 bool CanFold = false;
1974 Ops.clear();
1975 if (restores[i].canFold) {
1976 CanFold = true;
1977 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1978 MachineOperand &MO = MI->getOperand(j);
1979 if (!MO.isReg() || MO.getReg() != VReg)
1980 continue;
1981
1982 if (MO.isDef()) {
1983 // If this restore were to be folded, it would have been folded
1984 // already.
1985 CanFold = false;
1986 break;
1987 }
1988 Ops.push_back(j);
1989 }
1990 }
1991
1992 // Fold the load into the use if possible.
1993 bool Folded = false;
1994 if (CanFold && !Ops.empty()) {
1995 if (!isReMat)
1996 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1997 else {
1998 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1999 int LdSlot = 0;
2000 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2001 // If the rematerializable def is a load, also try to fold it.
2002 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2003 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2004 Ops, isLoadSS, LdSlot, VReg);
2005 if (!Folded) {
2006 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2007 if (ImpUse) {
2008 // Re-matting an instruction with virtual register use. Add the
2009 // register as an implicit use on the use MI and mark the register
2010 // interval as unspillable.
2011 LiveInterval &ImpLi = getInterval(ImpUse);
2012 ImpLi.markNotSpillable();
2013 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2014 }
2015 }
2016 }
2017 }
2018 // If folding is not possible / failed, then tell the spiller to issue a
2019 // load / rematerialization for us.
2020 if (Folded)
2021 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2022 else
2023 vrm.addRestorePoint(VReg, MI);
2024 }
2025 Id = RestoreMBBs.find_next(Id);
2026 }
2027
2028 // Finalize intervals: add kills, finalize spill weights, and filter out
2029 // dead intervals.
2030 std::vector<LiveInterval*> RetNewLIs;
2031 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2032 LiveInterval *LI = NewLIs[i];
2033 if (!LI->empty()) {
2034 if (!AddedKill.count(LI)) {
2035 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2036 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2037 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2038 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2039 assert(UseIdx != -1);
2040 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2041 LastUse->getOperand(UseIdx).setIsKill();
2042 vrm.addKillPoint(LI->reg, LastUseIdx);
2043 }
2044 }
2045 RetNewLIs.push_back(LI);
2046 }
2047 }
2048
2049 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2050 normalizeSpillWeights(RetNewLIs);
2051 return RetNewLIs;
2052 }
2053
2054 /// hasAllocatableSuperReg - Return true if the specified physical register has
2055 /// any super register that's allocatable.
hasAllocatableSuperReg(unsigned Reg) const2056 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2057 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2058 if (allocatableRegs_[*AS] && hasInterval(*AS))
2059 return true;
2060 return false;
2061 }
2062
2063 /// getRepresentativeReg - Find the largest super register of the specified
2064 /// physical register.
getRepresentativeReg(unsigned Reg) const2065 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2066 // Find the largest super-register that is allocatable.
2067 unsigned BestReg = Reg;
2068 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2069 unsigned SuperReg = *AS;
2070 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2071 BestReg = SuperReg;
2072 break;
2073 }
2074 }
2075 return BestReg;
2076 }
2077
2078 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2079 /// specified interval that conflicts with the specified physical register.
getNumConflictsWithPhysReg(const LiveInterval & li,unsigned PhysReg) const2080 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2081 unsigned PhysReg) const {
2082 unsigned NumConflicts = 0;
2083 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2084 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2085 E = mri_->reg_end(); I != E; ++I) {
2086 MachineOperand &O = I.getOperand();
2087 MachineInstr *MI = O.getParent();
2088 if (MI->isDebugValue())
2089 continue;
2090 SlotIndex Index = getInstructionIndex(MI);
2091 if (pli.liveAt(Index))
2092 ++NumConflicts;
2093 }
2094 return NumConflicts;
2095 }
2096
2097 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2098 /// around all defs and uses of the specified interval. Return true if it
2099 /// was able to cut its interval.
spillPhysRegAroundRegDefsUses(const LiveInterval & li,unsigned PhysReg,VirtRegMap & vrm)2100 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2101 unsigned PhysReg, VirtRegMap &vrm) {
2102 unsigned SpillReg = getRepresentativeReg(PhysReg);
2103
2104 DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2105 << " represented by " << tri_->getName(SpillReg) << '\n');
2106
2107 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2108 // If there are registers which alias PhysReg, but which are not a
2109 // sub-register of the chosen representative super register. Assert
2110 // since we can't handle it yet.
2111 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2112 tri_->isSuperRegister(*AS, SpillReg));
2113
2114 bool Cut = false;
2115 SmallVector<unsigned, 4> PRegs;
2116 if (hasInterval(SpillReg))
2117 PRegs.push_back(SpillReg);
2118 for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2119 if (hasInterval(*SR))
2120 PRegs.push_back(*SR);
2121
2122 DEBUG({
2123 dbgs() << "Trying to spill:";
2124 for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2125 dbgs() << ' ' << tri_->getName(PRegs[i]);
2126 dbgs() << '\n';
2127 });
2128
2129 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2130 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2131 E = mri_->reg_end(); I != E; ++I) {
2132 MachineOperand &O = I.getOperand();
2133 MachineInstr *MI = O.getParent();
2134 if (MI->isDebugValue() || SeenMIs.count(MI))
2135 continue;
2136 SeenMIs.insert(MI);
2137 SlotIndex Index = getInstructionIndex(MI);
2138 bool LiveReg = false;
2139 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2140 unsigned PReg = PRegs[i];
2141 LiveInterval &pli = getInterval(PReg);
2142 if (!pli.liveAt(Index))
2143 continue;
2144 LiveReg = true;
2145 SlotIndex StartIdx = Index.getLoadIndex();
2146 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2147 if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2148 std::string msg;
2149 raw_string_ostream Msg(msg);
2150 Msg << "Ran out of registers during register allocation!";
2151 if (MI->isInlineAsm()) {
2152 Msg << "\nPlease check your inline asm statement for invalid "
2153 << "constraints:\n";
2154 MI->print(Msg, tm_);
2155 }
2156 report_fatal_error(Msg.str());
2157 }
2158 pli.removeRange(StartIdx, EndIdx);
2159 LiveReg = true;
2160 }
2161 if (!LiveReg)
2162 continue;
2163 DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2164 vrm.addEmergencySpill(SpillReg, MI);
2165 Cut = true;
2166 }
2167 return Cut;
2168 }
2169
addLiveRangeToEndOfBlock(unsigned reg,MachineInstr * startInst)2170 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2171 MachineInstr* startInst) {
2172 LiveInterval& Interval = getOrCreateInterval(reg);
2173 VNInfo* VN = Interval.getNextValue(
2174 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2175 startInst, getVNInfoAllocator());
2176 VN->setHasPHIKill(true);
2177 LiveRange LR(
2178 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2179 getMBBEndIdx(startInst->getParent()), VN);
2180 Interval.addRange(LR);
2181
2182 return LR;
2183 }
2184
2185