• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/GraphWriter.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 
34 using namespace llvm;
35 
36 #define DEBUG_TYPE "misched"
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 cl::opt<bool>
44 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45                        cl::desc("Print critical path length to stdout"));
46 }
47 
48 #ifndef NDEBUG
49 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50   cl::desc("Pop up a window to show MISched dags after they are processed"));
51 
52 /// In some situations a few uninteresting nodes depend on nearly all other
53 /// nodes in the graph, provide a cutoff to hide them.
54 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
55   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
56 
57 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
58   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
59 
60 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
61   cl::desc("Only schedule this function"));
62 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
63   cl::desc("Only schedule this MBB#"));
64 #else
65 static bool ViewMISchedDAGs = false;
66 #endif // NDEBUG
67 
68 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
69 /// size of the ready lists.
70 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
71   cl::desc("Limit ready list to N instructions"), cl::init(256));
72 
73 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
74   cl::desc("Enable register pressure scheduling."), cl::init(true));
75 
76 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
77   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
78 
79 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
80                                         cl::desc("Enable memop clustering."),
81                                         cl::init(true));
82 
83 // Experimental heuristics
84 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
85   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
86 
87 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
88   cl::desc("Verify machine instrs before and after machine scheduling"));
89 
90 // DAG subtrees must have at least this many nodes.
91 static const unsigned MinSubtreeSize = 8;
92 
93 // Pin the vtables to this file.
anchor()94 void MachineSchedStrategy::anchor() {}
anchor()95 void ScheduleDAGMutation::anchor() {}
96 
97 //===----------------------------------------------------------------------===//
98 // Machine Instruction Scheduling Pass and Registry
99 //===----------------------------------------------------------------------===//
100 
MachineSchedContext()101 MachineSchedContext::MachineSchedContext():
102     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
103   RegClassInfo = new RegisterClassInfo();
104 }
105 
~MachineSchedContext()106 MachineSchedContext::~MachineSchedContext() {
107   delete RegClassInfo;
108 }
109 
110 namespace {
111 /// Base class for a machine scheduler class that can run at any point.
112 class MachineSchedulerBase : public MachineSchedContext,
113                              public MachineFunctionPass {
114 public:
MachineSchedulerBase(char & ID)115   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
116 
117   void print(raw_ostream &O, const Module* = nullptr) const override;
118 
119 protected:
120   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
121 };
122 
123 /// MachineScheduler runs after coalescing and before register allocation.
124 class MachineScheduler : public MachineSchedulerBase {
125 public:
126   MachineScheduler();
127 
128   void getAnalysisUsage(AnalysisUsage &AU) const override;
129 
130   bool runOnMachineFunction(MachineFunction&) override;
131 
132   static char ID; // Class identification, replacement for typeinfo
133 
134 protected:
135   ScheduleDAGInstrs *createMachineScheduler();
136 };
137 
138 /// PostMachineScheduler runs after shortly before code emission.
139 class PostMachineScheduler : public MachineSchedulerBase {
140 public:
141   PostMachineScheduler();
142 
143   void getAnalysisUsage(AnalysisUsage &AU) const override;
144 
145   bool runOnMachineFunction(MachineFunction&) override;
146 
147   static char ID; // Class identification, replacement for typeinfo
148 
149 protected:
150   ScheduleDAGInstrs *createPostMachineScheduler();
151 };
152 } // namespace
153 
154 char MachineScheduler::ID = 0;
155 
156 char &llvm::MachineSchedulerID = MachineScheduler::ID;
157 
158 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
159                       "Machine Instruction Scheduler", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)160 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
161 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
162 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
163 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
164                     "Machine Instruction Scheduler", false, false)
165 
166 MachineScheduler::MachineScheduler()
167 : MachineSchedulerBase(ID) {
168   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
169 }
170 
getAnalysisUsage(AnalysisUsage & AU) const171 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
172   AU.setPreservesCFG();
173   AU.addRequiredID(MachineDominatorsID);
174   AU.addRequired<MachineLoopInfo>();
175   AU.addRequired<AAResultsWrapperPass>();
176   AU.addRequired<TargetPassConfig>();
177   AU.addRequired<SlotIndexes>();
178   AU.addPreserved<SlotIndexes>();
179   AU.addRequired<LiveIntervals>();
180   AU.addPreserved<LiveIntervals>();
181   MachineFunctionPass::getAnalysisUsage(AU);
182 }
183 
184 char PostMachineScheduler::ID = 0;
185 
186 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
187 
188 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
189                 "PostRA Machine Instruction Scheduler", false, false)
190 
PostMachineScheduler()191 PostMachineScheduler::PostMachineScheduler()
192 : MachineSchedulerBase(ID) {
193   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
194 }
195 
getAnalysisUsage(AnalysisUsage & AU) const196 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
197   AU.setPreservesCFG();
198   AU.addRequiredID(MachineDominatorsID);
199   AU.addRequired<MachineLoopInfo>();
200   AU.addRequired<TargetPassConfig>();
201   MachineFunctionPass::getAnalysisUsage(AU);
202 }
203 
204 MachinePassRegistry MachineSchedRegistry::Registry;
205 
206 /// A dummy default scheduler factory indicates whether the scheduler
207 /// is overridden on the command line.
useDefaultMachineSched(MachineSchedContext * C)208 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
209   return nullptr;
210 }
211 
212 /// MachineSchedOpt allows command line selection of the scheduler.
213 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
214                RegisterPassParser<MachineSchedRegistry> >
215 MachineSchedOpt("misched",
216                 cl::init(&useDefaultMachineSched), cl::Hidden,
217                 cl::desc("Machine instruction scheduler to use"));
218 
219 static MachineSchedRegistry
220 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
221                      useDefaultMachineSched);
222 
223 static cl::opt<bool> EnableMachineSched(
224     "enable-misched",
225     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
226     cl::Hidden);
227 
228 static cl::opt<bool> EnablePostRAMachineSched(
229     "enable-post-misched",
230     cl::desc("Enable the post-ra machine instruction scheduling pass."),
231     cl::init(true), cl::Hidden);
232 
233 /// Forward declare the standard machine scheduler. This will be used as the
234 /// default scheduler if the target does not set a default.
235 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
236 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
237 
238 /// Decrement this iterator until reaching the top or a non-debug instr.
239 static MachineBasicBlock::const_iterator
priorNonDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator Beg)240 priorNonDebug(MachineBasicBlock::const_iterator I,
241               MachineBasicBlock::const_iterator Beg) {
242   assert(I != Beg && "reached the top of the region, cannot decrement");
243   while (--I != Beg) {
244     if (!I->isDebugValue())
245       break;
246   }
247   return I;
248 }
249 
250 /// Non-const version.
251 static MachineBasicBlock::iterator
priorNonDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator Beg)252 priorNonDebug(MachineBasicBlock::iterator I,
253               MachineBasicBlock::const_iterator Beg) {
254   return const_cast<MachineInstr*>(
255     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
256 }
257 
258 /// If this iterator is a debug value, increment until reaching the End or a
259 /// non-debug instruction.
260 static MachineBasicBlock::const_iterator
nextIfDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator End)261 nextIfDebug(MachineBasicBlock::const_iterator I,
262             MachineBasicBlock::const_iterator End) {
263   for(; I != End; ++I) {
264     if (!I->isDebugValue())
265       break;
266   }
267   return I;
268 }
269 
270 /// Non-const version.
271 static MachineBasicBlock::iterator
nextIfDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator End)272 nextIfDebug(MachineBasicBlock::iterator I,
273             MachineBasicBlock::const_iterator End) {
274   // Cast the return value to nonconst MachineInstr, then cast to an
275   // instr_iterator, which does not check for null, finally return a
276   // bundle_iterator.
277   return MachineBasicBlock::instr_iterator(
278     const_cast<MachineInstr*>(
279       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
280 }
281 
282 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
createMachineScheduler()283 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
284   // Select the scheduler, or set the default.
285   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
286   if (Ctor != useDefaultMachineSched)
287     return Ctor(this);
288 
289   // Get the default scheduler set by the target for this function.
290   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
291   if (Scheduler)
292     return Scheduler;
293 
294   // Default to GenericScheduler.
295   return createGenericSchedLive(this);
296 }
297 
298 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
299 /// the caller. We don't have a command line option to override the postRA
300 /// scheduler. The Target must configure it.
createPostMachineScheduler()301 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
302   // Get the postRA scheduler set by the target for this function.
303   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
304   if (Scheduler)
305     return Scheduler;
306 
307   // Default to GenericScheduler.
308   return createGenericSchedPostRA(this);
309 }
310 
311 /// Top-level MachineScheduler pass driver.
312 ///
313 /// Visit blocks in function order. Divide each block into scheduling regions
314 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
315 /// consistent with the DAG builder, which traverses the interior of the
316 /// scheduling regions bottom-up.
317 ///
318 /// This design avoids exposing scheduling boundaries to the DAG builder,
319 /// simplifying the DAG builder's support for "special" target instructions.
320 /// At the same time the design allows target schedulers to operate across
321 /// scheduling boundaries, for example to bundle the boudary instructions
322 /// without reordering them. This creates complexity, because the target
323 /// scheduler must update the RegionBegin and RegionEnd positions cached by
324 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
325 /// design would be to split blocks at scheduling boundaries, but LLVM has a
326 /// general bias against block splitting purely for implementation simplicity.
runOnMachineFunction(MachineFunction & mf)327 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
328   if (skipFunction(*mf.getFunction()))
329     return false;
330 
331   if (EnableMachineSched.getNumOccurrences()) {
332     if (!EnableMachineSched)
333       return false;
334   } else if (!mf.getSubtarget().enableMachineScheduler())
335     return false;
336 
337   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
338 
339   // Initialize the context of the pass.
340   MF = &mf;
341   MLI = &getAnalysis<MachineLoopInfo>();
342   MDT = &getAnalysis<MachineDominatorTree>();
343   PassConfig = &getAnalysis<TargetPassConfig>();
344   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
345 
346   LIS = &getAnalysis<LiveIntervals>();
347 
348   if (VerifyScheduling) {
349     DEBUG(LIS->dump());
350     MF->verify(this, "Before machine scheduling.");
351   }
352   RegClassInfo->runOnMachineFunction(*MF);
353 
354   // Instantiate the selected scheduler for this target, function, and
355   // optimization level.
356   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
357   scheduleRegions(*Scheduler, false);
358 
359   DEBUG(LIS->dump());
360   if (VerifyScheduling)
361     MF->verify(this, "After machine scheduling.");
362   return true;
363 }
364 
runOnMachineFunction(MachineFunction & mf)365 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
366   if (skipFunction(*mf.getFunction()))
367     return false;
368 
369   if (EnablePostRAMachineSched.getNumOccurrences()) {
370     if (!EnablePostRAMachineSched)
371       return false;
372   } else if (!mf.getSubtarget().enablePostRAScheduler()) {
373     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
374     return false;
375   }
376   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
377 
378   // Initialize the context of the pass.
379   MF = &mf;
380   PassConfig = &getAnalysis<TargetPassConfig>();
381 
382   if (VerifyScheduling)
383     MF->verify(this, "Before post machine scheduling.");
384 
385   // Instantiate the selected scheduler for this target, function, and
386   // optimization level.
387   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
388   scheduleRegions(*Scheduler, true);
389 
390   if (VerifyScheduling)
391     MF->verify(this, "After post machine scheduling.");
392   return true;
393 }
394 
395 /// Return true of the given instruction should not be included in a scheduling
396 /// region.
397 ///
398 /// MachineScheduler does not currently support scheduling across calls. To
399 /// handle calls, the DAG builder needs to be modified to create register
400 /// anti/output dependencies on the registers clobbered by the call's regmask
401 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
402 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
403 /// the boundary, but there would be no benefit to postRA scheduling across
404 /// calls this late anyway.
isSchedBoundary(MachineBasicBlock::iterator MI,MachineBasicBlock * MBB,MachineFunction * MF,const TargetInstrInfo * TII)405 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
406                             MachineBasicBlock *MBB,
407                             MachineFunction *MF,
408                             const TargetInstrInfo *TII) {
409   return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
410 }
411 
412 /// Main driver for both MachineScheduler and PostMachineScheduler.
scheduleRegions(ScheduleDAGInstrs & Scheduler,bool FixKillFlags)413 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
414                                            bool FixKillFlags) {
415   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
416 
417   // Visit all machine basic blocks.
418   //
419   // TODO: Visit blocks in global postorder or postorder within the bottom-up
420   // loop tree. Then we can optionally compute global RegPressure.
421   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
422        MBB != MBBEnd; ++MBB) {
423 
424     Scheduler.startBlock(&*MBB);
425 
426 #ifndef NDEBUG
427     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
428       continue;
429     if (SchedOnlyBlock.getNumOccurrences()
430         && (int)SchedOnlyBlock != MBB->getNumber())
431       continue;
432 #endif
433 
434     // Break the block into scheduling regions [I, RegionEnd), and schedule each
435     // region as soon as it is discovered. RegionEnd points the scheduling
436     // boundary at the bottom of the region. The DAG does not include RegionEnd,
437     // but the region does (i.e. the next RegionEnd is above the previous
438     // RegionBegin). If the current block has no terminator then RegionEnd ==
439     // MBB->end() for the bottom region.
440     //
441     // The Scheduler may insert instructions during either schedule() or
442     // exitRegion(), even for empty regions. So the local iterators 'I' and
443     // 'RegionEnd' are invalid across these calls.
444     //
445     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
446     // as a single instruction.
447     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
448         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
449 
450       // Avoid decrementing RegionEnd for blocks with no terminator.
451       if (RegionEnd != MBB->end() ||
452           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
453         --RegionEnd;
454       }
455 
456       // The next region starts above the previous region. Look backward in the
457       // instruction stream until we find the nearest boundary.
458       unsigned NumRegionInstrs = 0;
459       MachineBasicBlock::iterator I = RegionEnd;
460       for (;I != MBB->begin(); --I) {
461         if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII))
462           break;
463         if (!I->isDebugValue())
464           ++NumRegionInstrs;
465       }
466       // Notify the scheduler of the region, even if we may skip scheduling
467       // it. Perhaps it still needs to be bundled.
468       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
469 
470       // Skip empty scheduling regions (0 or 1 schedulable instructions).
471       if (I == RegionEnd || I == std::prev(RegionEnd)) {
472         // Close the current region. Bundle the terminator if needed.
473         // This invalidates 'RegionEnd' and 'I'.
474         Scheduler.exitRegion();
475         continue;
476       }
477       DEBUG(dbgs() << "********** MI Scheduling **********\n");
478       DEBUG(dbgs() << MF->getName()
479             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
480             << "\n  From: " << *I << "    To: ";
481             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
482             else dbgs() << "End";
483             dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
484       if (DumpCriticalPathLength) {
485         errs() << MF->getName();
486         errs() << ":BB# " << MBB->getNumber();
487         errs() << " " << MBB->getName() << " \n";
488       }
489 
490       // Schedule a region: possibly reorder instructions.
491       // This invalidates 'RegionEnd' and 'I'.
492       Scheduler.schedule();
493 
494       // Close the current region.
495       Scheduler.exitRegion();
496 
497       // Scheduling has invalidated the current iterator 'I'. Ask the
498       // scheduler for the top of it's scheduled region.
499       RegionEnd = Scheduler.begin();
500     }
501     Scheduler.finishBlock();
502     // FIXME: Ideally, no further passes should rely on kill flags. However,
503     // thumb2 size reduction is currently an exception, so the PostMIScheduler
504     // needs to do this.
505     if (FixKillFlags)
506         Scheduler.fixupKills(&*MBB);
507   }
508   Scheduler.finalizeSchedule();
509 }
510 
print(raw_ostream & O,const Module * m) const511 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
512   // unimplemented
513 }
514 
515 LLVM_DUMP_METHOD
dump()516 void ReadyQueue::dump() {
517   dbgs() << "Queue " << Name << ": ";
518   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
519     dbgs() << Queue[i]->NodeNum << " ";
520   dbgs() << "\n";
521 }
522 
523 //===----------------------------------------------------------------------===//
524 // ScheduleDAGMI - Basic machine instruction scheduling. This is
525 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
526 // virtual registers.
527 // ===----------------------------------------------------------------------===/
528 
529 // Provide a vtable anchor.
~ScheduleDAGMI()530 ScheduleDAGMI::~ScheduleDAGMI() {
531 }
532 
canAddEdge(SUnit * SuccSU,SUnit * PredSU)533 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
534   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
535 }
536 
addEdge(SUnit * SuccSU,const SDep & PredDep)537 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
538   if (SuccSU != &ExitSU) {
539     // Do not use WillCreateCycle, it assumes SD scheduling.
540     // If Pred is reachable from Succ, then the edge creates a cycle.
541     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
542       return false;
543     Topo.AddPred(SuccSU, PredDep.getSUnit());
544   }
545   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
546   // Return true regardless of whether a new edge needed to be inserted.
547   return true;
548 }
549 
550 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
551 /// NumPredsLeft reaches zero, release the successor node.
552 ///
553 /// FIXME: Adjust SuccSU height based on MinLatency.
releaseSucc(SUnit * SU,SDep * SuccEdge)554 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
555   SUnit *SuccSU = SuccEdge->getSUnit();
556 
557   if (SuccEdge->isWeak()) {
558     --SuccSU->WeakPredsLeft;
559     if (SuccEdge->isCluster())
560       NextClusterSucc = SuccSU;
561     return;
562   }
563 #ifndef NDEBUG
564   if (SuccSU->NumPredsLeft == 0) {
565     dbgs() << "*** Scheduling failed! ***\n";
566     SuccSU->dump(this);
567     dbgs() << " has been released too many times!\n";
568     llvm_unreachable(nullptr);
569   }
570 #endif
571   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
572   // CurrCycle may have advanced since then.
573   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
574     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
575 
576   --SuccSU->NumPredsLeft;
577   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
578     SchedImpl->releaseTopNode(SuccSU);
579 }
580 
581 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
releaseSuccessors(SUnit * SU)582 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
583   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
584        I != E; ++I) {
585     releaseSucc(SU, &*I);
586   }
587 }
588 
589 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
590 /// NumSuccsLeft reaches zero, release the predecessor node.
591 ///
592 /// FIXME: Adjust PredSU height based on MinLatency.
releasePred(SUnit * SU,SDep * PredEdge)593 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
594   SUnit *PredSU = PredEdge->getSUnit();
595 
596   if (PredEdge->isWeak()) {
597     --PredSU->WeakSuccsLeft;
598     if (PredEdge->isCluster())
599       NextClusterPred = PredSU;
600     return;
601   }
602 #ifndef NDEBUG
603   if (PredSU->NumSuccsLeft == 0) {
604     dbgs() << "*** Scheduling failed! ***\n";
605     PredSU->dump(this);
606     dbgs() << " has been released too many times!\n";
607     llvm_unreachable(nullptr);
608   }
609 #endif
610   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
611   // CurrCycle may have advanced since then.
612   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
613     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
614 
615   --PredSU->NumSuccsLeft;
616   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
617     SchedImpl->releaseBottomNode(PredSU);
618 }
619 
620 /// releasePredecessors - Call releasePred on each of SU's predecessors.
releasePredecessors(SUnit * SU)621 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
622   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
623        I != E; ++I) {
624     releasePred(SU, &*I);
625   }
626 }
627 
628 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
629 /// crossing a scheduling boundary. [begin, end) includes all instructions in
630 /// the region, including the boundary itself and single-instruction regions
631 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)632 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
633                                      MachineBasicBlock::iterator begin,
634                                      MachineBasicBlock::iterator end,
635                                      unsigned regioninstrs)
636 {
637   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
638 
639   SchedImpl->initPolicy(begin, end, regioninstrs);
640 }
641 
642 /// This is normally called from the main scheduler loop but may also be invoked
643 /// by the scheduling strategy to perform additional code motion.
moveInstruction(MachineInstr * MI,MachineBasicBlock::iterator InsertPos)644 void ScheduleDAGMI::moveInstruction(
645   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
646   // Advance RegionBegin if the first instruction moves down.
647   if (&*RegionBegin == MI)
648     ++RegionBegin;
649 
650   // Update the instruction stream.
651   BB->splice(InsertPos, BB, MI);
652 
653   // Update LiveIntervals
654   if (LIS)
655     LIS->handleMove(*MI, /*UpdateFlags=*/true);
656 
657   // Recede RegionBegin if an instruction moves above the first.
658   if (RegionBegin == InsertPos)
659     RegionBegin = MI;
660 }
661 
checkSchedLimit()662 bool ScheduleDAGMI::checkSchedLimit() {
663 #ifndef NDEBUG
664   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
665     CurrentTop = CurrentBottom;
666     return false;
667   }
668   ++NumInstrsScheduled;
669 #endif
670   return true;
671 }
672 
673 /// Per-region scheduling driver, called back from
674 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
675 /// does not consider liveness or register pressure. It is useful for PostRA
676 /// scheduling and potentially other custom schedulers.
schedule()677 void ScheduleDAGMI::schedule() {
678   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
679   DEBUG(SchedImpl->dumpPolicy());
680 
681   // Build the DAG.
682   buildSchedGraph(AA);
683 
684   Topo.InitDAGTopologicalSorting();
685 
686   postprocessDAG();
687 
688   SmallVector<SUnit*, 8> TopRoots, BotRoots;
689   findRootsAndBiasEdges(TopRoots, BotRoots);
690 
691   // Initialize the strategy before modifying the DAG.
692   // This may initialize a DFSResult to be used for queue priority.
693   SchedImpl->initialize(this);
694 
695   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
696           SUnits[su].dumpAll(this));
697   if (ViewMISchedDAGs) viewGraph();
698 
699   // Initialize ready queues now that the DAG and priority data are finalized.
700   initQueues(TopRoots, BotRoots);
701 
702   bool IsTopNode = false;
703   while (true) {
704     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
705     SUnit *SU = SchedImpl->pickNode(IsTopNode);
706     if (!SU) break;
707 
708     assert(!SU->isScheduled && "Node already scheduled");
709     if (!checkSchedLimit())
710       break;
711 
712     MachineInstr *MI = SU->getInstr();
713     if (IsTopNode) {
714       assert(SU->isTopReady() && "node still has unscheduled dependencies");
715       if (&*CurrentTop == MI)
716         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
717       else
718         moveInstruction(MI, CurrentTop);
719     } else {
720       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
721       MachineBasicBlock::iterator priorII =
722         priorNonDebug(CurrentBottom, CurrentTop);
723       if (&*priorII == MI)
724         CurrentBottom = priorII;
725       else {
726         if (&*CurrentTop == MI)
727           CurrentTop = nextIfDebug(++CurrentTop, priorII);
728         moveInstruction(MI, CurrentBottom);
729         CurrentBottom = MI;
730       }
731     }
732     // Notify the scheduling strategy before updating the DAG.
733     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
734     // runs, it can then use the accurate ReadyCycle time to determine whether
735     // newly released nodes can move to the readyQ.
736     SchedImpl->schedNode(SU, IsTopNode);
737 
738     updateQueues(SU, IsTopNode);
739   }
740   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
741 
742   placeDebugValues();
743 
744   DEBUG({
745       unsigned BBNum = begin()->getParent()->getNumber();
746       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
747       dumpSchedule();
748       dbgs() << '\n';
749     });
750 }
751 
752 /// Apply each ScheduleDAGMutation step in order.
postprocessDAG()753 void ScheduleDAGMI::postprocessDAG() {
754   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
755     Mutations[i]->apply(this);
756   }
757 }
758 
759 void ScheduleDAGMI::
findRootsAndBiasEdges(SmallVectorImpl<SUnit * > & TopRoots,SmallVectorImpl<SUnit * > & BotRoots)760 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
761                       SmallVectorImpl<SUnit*> &BotRoots) {
762   for (std::vector<SUnit>::iterator
763          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
764     SUnit *SU = &(*I);
765     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
766 
767     // Order predecessors so DFSResult follows the critical path.
768     SU->biasCriticalPath();
769 
770     // A SUnit is ready to top schedule if it has no predecessors.
771     if (!I->NumPredsLeft)
772       TopRoots.push_back(SU);
773     // A SUnit is ready to bottom schedule if it has no successors.
774     if (!I->NumSuccsLeft)
775       BotRoots.push_back(SU);
776   }
777   ExitSU.biasCriticalPath();
778 }
779 
780 /// Identify DAG roots and setup scheduler queues.
initQueues(ArrayRef<SUnit * > TopRoots,ArrayRef<SUnit * > BotRoots)781 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
782                                ArrayRef<SUnit*> BotRoots) {
783   NextClusterSucc = nullptr;
784   NextClusterPred = nullptr;
785 
786   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
787   //
788   // Nodes with unreleased weak edges can still be roots.
789   // Release top roots in forward order.
790   for (SmallVectorImpl<SUnit*>::const_iterator
791          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
792     SchedImpl->releaseTopNode(*I);
793   }
794   // Release bottom roots in reverse order so the higher priority nodes appear
795   // first. This is more natural and slightly more efficient.
796   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
797          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
798     SchedImpl->releaseBottomNode(*I);
799   }
800 
801   releaseSuccessors(&EntrySU);
802   releasePredecessors(&ExitSU);
803 
804   SchedImpl->registerRoots();
805 
806   // Advance past initial DebugValues.
807   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
808   CurrentBottom = RegionEnd;
809 }
810 
811 /// Update scheduler queues after scheduling an instruction.
updateQueues(SUnit * SU,bool IsTopNode)812 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
813   // Release dependent instructions for scheduling.
814   if (IsTopNode)
815     releaseSuccessors(SU);
816   else
817     releasePredecessors(SU);
818 
819   SU->isScheduled = true;
820 }
821 
822 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
placeDebugValues()823 void ScheduleDAGMI::placeDebugValues() {
824   // If first instruction was a DBG_VALUE then put it back.
825   if (FirstDbgValue) {
826     BB->splice(RegionBegin, BB, FirstDbgValue);
827     RegionBegin = FirstDbgValue;
828   }
829 
830   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
831          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
832     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
833     MachineInstr *DbgValue = P.first;
834     MachineBasicBlock::iterator OrigPrevMI = P.second;
835     if (&*RegionBegin == DbgValue)
836       ++RegionBegin;
837     BB->splice(++OrigPrevMI, BB, DbgValue);
838     if (OrigPrevMI == std::prev(RegionEnd))
839       RegionEnd = DbgValue;
840   }
841   DbgValues.clear();
842   FirstDbgValue = nullptr;
843 }
844 
845 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dumpSchedule() const846 void ScheduleDAGMI::dumpSchedule() const {
847   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
848     if (SUnit *SU = getSUnit(&(*MI)))
849       SU->dump(this);
850     else
851       dbgs() << "Missing SUnit\n";
852   }
853 }
854 #endif
855 
856 //===----------------------------------------------------------------------===//
857 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
858 // preservation.
859 //===----------------------------------------------------------------------===//
860 
~ScheduleDAGMILive()861 ScheduleDAGMILive::~ScheduleDAGMILive() {
862   delete DFSResult;
863 }
864 
865 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
866 /// crossing a scheduling boundary. [begin, end) includes all instructions in
867 /// the region, including the boundary itself and single-instruction regions
868 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)869 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
870                                 MachineBasicBlock::iterator begin,
871                                 MachineBasicBlock::iterator end,
872                                 unsigned regioninstrs)
873 {
874   // ScheduleDAGMI initializes SchedImpl's per-region policy.
875   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
876 
877   // For convenience remember the end of the liveness region.
878   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
879 
880   SUPressureDiffs.clear();
881 
882   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
883   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
884 
885   assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
886          "ShouldTrackLaneMasks requires ShouldTrackPressure");
887 }
888 
889 // Setup the register pressure trackers for the top scheduled top and bottom
890 // scheduled regions.
initRegPressure()891 void ScheduleDAGMILive::initRegPressure() {
892   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
893                     ShouldTrackLaneMasks, false);
894   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
895                     ShouldTrackLaneMasks, false);
896 
897   // Close the RPTracker to finalize live ins.
898   RPTracker.closeRegion();
899 
900   DEBUG(RPTracker.dump());
901 
902   // Initialize the live ins and live outs.
903   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
904   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
905 
906   // Close one end of the tracker so we can call
907   // getMaxUpward/DownwardPressureDelta before advancing across any
908   // instructions. This converts currently live regs into live ins/outs.
909   TopRPTracker.closeTop();
910   BotRPTracker.closeBottom();
911 
912   BotRPTracker.initLiveThru(RPTracker);
913   if (!BotRPTracker.getLiveThru().empty()) {
914     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
915     DEBUG(dbgs() << "Live Thru: ";
916           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
917   };
918 
919   // For each live out vreg reduce the pressure change associated with other
920   // uses of the same vreg below the live-out reaching def.
921   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
922 
923   // Account for liveness generated by the region boundary.
924   if (LiveRegionEnd != RegionEnd) {
925     SmallVector<RegisterMaskPair, 8> LiveUses;
926     BotRPTracker.recede(&LiveUses);
927     updatePressureDiffs(LiveUses);
928   }
929 
930   DEBUG(
931     dbgs() << "Top Pressure:\n";
932     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
933     dbgs() << "Bottom Pressure:\n";
934     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
935   );
936 
937   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
938 
939   // Cache the list of excess pressure sets in this region. This will also track
940   // the max pressure in the scheduled code for these sets.
941   RegionCriticalPSets.clear();
942   const std::vector<unsigned> &RegionPressure =
943     RPTracker.getPressure().MaxSetPressure;
944   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
945     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
946     if (RegionPressure[i] > Limit) {
947       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
948             << " Limit " << Limit
949             << " Actual " << RegionPressure[i] << "\n");
950       RegionCriticalPSets.push_back(PressureChange(i));
951     }
952   }
953   DEBUG(dbgs() << "Excess PSets: ";
954         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
955           dbgs() << TRI->getRegPressureSetName(
956             RegionCriticalPSets[i].getPSet()) << " ";
957         dbgs() << "\n");
958 }
959 
960 void ScheduleDAGMILive::
updateScheduledPressure(const SUnit * SU,const std::vector<unsigned> & NewMaxPressure)961 updateScheduledPressure(const SUnit *SU,
962                         const std::vector<unsigned> &NewMaxPressure) {
963   const PressureDiff &PDiff = getPressureDiff(SU);
964   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
965   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
966        I != E; ++I) {
967     if (!I->isValid())
968       break;
969     unsigned ID = I->getPSet();
970     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
971       ++CritIdx;
972     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
973       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
974           && NewMaxPressure[ID] <= INT16_MAX)
975         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
976     }
977     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
978     if (NewMaxPressure[ID] >= Limit - 2) {
979       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
980             << NewMaxPressure[ID]
981             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
982             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
983     }
984   }
985 }
986 
987 /// Update the PressureDiff array for liveness after scheduling this
988 /// instruction.
updatePressureDiffs(ArrayRef<RegisterMaskPair> LiveUses)989 void ScheduleDAGMILive::updatePressureDiffs(
990     ArrayRef<RegisterMaskPair> LiveUses) {
991   for (const RegisterMaskPair &P : LiveUses) {
992     unsigned Reg = P.RegUnit;
993     /// FIXME: Currently assuming single-use physregs.
994     if (!TRI->isVirtualRegister(Reg))
995       continue;
996 
997     if (ShouldTrackLaneMasks) {
998       // If the register has just become live then other uses won't change
999       // this fact anymore => decrement pressure.
1000       // If the register has just become dead then other uses make it come
1001       // back to life => increment pressure.
1002       bool Decrement = P.LaneMask != 0;
1003 
1004       for (const VReg2SUnit &V2SU
1005            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1006         SUnit &SU = *V2SU.SU;
1007         if (SU.isScheduled || &SU == &ExitSU)
1008           continue;
1009 
1010         PressureDiff &PDiff = getPressureDiff(&SU);
1011         PDiff.addPressureChange(Reg, Decrement, &MRI);
1012         DEBUG(
1013           dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1014                  << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1015                  << ' ' << *SU.getInstr();
1016           dbgs() << "              to ";
1017           PDiff.dump(*TRI);
1018         );
1019       }
1020     } else {
1021       assert(P.LaneMask != 0);
1022       DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1023       // This may be called before CurrentBottom has been initialized. However,
1024       // BotRPTracker must have a valid position. We want the value live into the
1025       // instruction or live out of the block, so ask for the previous
1026       // instruction's live-out.
1027       const LiveInterval &LI = LIS->getInterval(Reg);
1028       VNInfo *VNI;
1029       MachineBasicBlock::const_iterator I =
1030         nextIfDebug(BotRPTracker.getPos(), BB->end());
1031       if (I == BB->end())
1032         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1033       else {
1034         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1035         VNI = LRQ.valueIn();
1036       }
1037       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1038       assert(VNI && "No live value at use.");
1039       for (const VReg2SUnit &V2SU
1040            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1041         SUnit *SU = V2SU.SU;
1042         // If this use comes before the reaching def, it cannot be a last use,
1043         // so decrease its pressure change.
1044         if (!SU->isScheduled && SU != &ExitSU) {
1045           LiveQueryResult LRQ =
1046               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1047           if (LRQ.valueIn() == VNI) {
1048             PressureDiff &PDiff = getPressureDiff(SU);
1049             PDiff.addPressureChange(Reg, true, &MRI);
1050             DEBUG(
1051               dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1052                      << *SU->getInstr();
1053               dbgs() << "              to ";
1054               PDiff.dump(*TRI);
1055             );
1056           }
1057         }
1058       }
1059     }
1060   }
1061 }
1062 
1063 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1064 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1065 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1066 ///
1067 /// This is a skeletal driver, with all the functionality pushed into helpers,
1068 /// so that it can be easily extended by experimental schedulers. Generally,
1069 /// implementing MachineSchedStrategy should be sufficient to implement a new
1070 /// scheduling algorithm. However, if a scheduler further subclasses
1071 /// ScheduleDAGMILive then it will want to override this virtual method in order
1072 /// to update any specialized state.
schedule()1073 void ScheduleDAGMILive::schedule() {
1074   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1075   DEBUG(SchedImpl->dumpPolicy());
1076   buildDAGWithRegPressure();
1077 
1078   Topo.InitDAGTopologicalSorting();
1079 
1080   postprocessDAG();
1081 
1082   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1083   findRootsAndBiasEdges(TopRoots, BotRoots);
1084 
1085   // Initialize the strategy before modifying the DAG.
1086   // This may initialize a DFSResult to be used for queue priority.
1087   SchedImpl->initialize(this);
1088 
1089   DEBUG(
1090     for (const SUnit &SU : SUnits) {
1091       SU.dumpAll(this);
1092       if (ShouldTrackPressure) {
1093         dbgs() << "  Pressure Diff      : ";
1094         getPressureDiff(&SU).dump(*TRI);
1095       }
1096       dbgs() << '\n';
1097     }
1098   );
1099   if (ViewMISchedDAGs) viewGraph();
1100 
1101   // Initialize ready queues now that the DAG and priority data are finalized.
1102   initQueues(TopRoots, BotRoots);
1103 
1104   bool IsTopNode = false;
1105   while (true) {
1106     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1107     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1108     if (!SU) break;
1109 
1110     assert(!SU->isScheduled && "Node already scheduled");
1111     if (!checkSchedLimit())
1112       break;
1113 
1114     scheduleMI(SU, IsTopNode);
1115 
1116     if (DFSResult) {
1117       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1118       if (!ScheduledTrees.test(SubtreeID)) {
1119         ScheduledTrees.set(SubtreeID);
1120         DFSResult->scheduleTree(SubtreeID);
1121         SchedImpl->scheduleTree(SubtreeID);
1122       }
1123     }
1124 
1125     // Notify the scheduling strategy after updating the DAG.
1126     SchedImpl->schedNode(SU, IsTopNode);
1127 
1128     updateQueues(SU, IsTopNode);
1129   }
1130   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1131 
1132   placeDebugValues();
1133 
1134   DEBUG({
1135       unsigned BBNum = begin()->getParent()->getNumber();
1136       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1137       dumpSchedule();
1138       dbgs() << '\n';
1139     });
1140 }
1141 
1142 /// Build the DAG and setup three register pressure trackers.
buildDAGWithRegPressure()1143 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1144   if (!ShouldTrackPressure) {
1145     RPTracker.reset();
1146     RegionCriticalPSets.clear();
1147     buildSchedGraph(AA);
1148     return;
1149   }
1150 
1151   // Initialize the register pressure tracker used by buildSchedGraph.
1152   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1153                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1154 
1155   // Account for liveness generate by the region boundary.
1156   if (LiveRegionEnd != RegionEnd)
1157     RPTracker.recede();
1158 
1159   // Build the DAG, and compute current register pressure.
1160   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1161 
1162   // Initialize top/bottom trackers after computing region pressure.
1163   initRegPressure();
1164 }
1165 
computeDFSResult()1166 void ScheduleDAGMILive::computeDFSResult() {
1167   if (!DFSResult)
1168     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1169   DFSResult->clear();
1170   ScheduledTrees.clear();
1171   DFSResult->resize(SUnits.size());
1172   DFSResult->compute(SUnits);
1173   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1174 }
1175 
1176 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1177 /// only provides the critical path for single block loops. To handle loops that
1178 /// span blocks, we could use the vreg path latencies provided by
1179 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1180 /// available for use in the scheduler.
1181 ///
1182 /// The cyclic path estimation identifies a def-use pair that crosses the back
1183 /// edge and considers the depth and height of the nodes. For example, consider
1184 /// the following instruction sequence where each instruction has unit latency
1185 /// and defines an epomymous virtual register:
1186 ///
1187 /// a->b(a,c)->c(b)->d(c)->exit
1188 ///
1189 /// The cyclic critical path is a two cycles: b->c->b
1190 /// The acyclic critical path is four cycles: a->b->c->d->exit
1191 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1192 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1193 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1194 /// LiveInDepth = depth(b) = len(a->b) = 1
1195 ///
1196 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1197 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1198 /// CyclicCriticalPath = min(2, 2) = 2
1199 ///
1200 /// This could be relevant to PostRA scheduling, but is currently implemented
1201 /// assuming LiveIntervals.
computeCyclicCriticalPath()1202 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1203   // This only applies to single block loop.
1204   if (!BB->isSuccessor(BB))
1205     return 0;
1206 
1207   unsigned MaxCyclicLatency = 0;
1208   // Visit each live out vreg def to find def/use pairs that cross iterations.
1209   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1210     unsigned Reg = P.RegUnit;
1211     if (!TRI->isVirtualRegister(Reg))
1212         continue;
1213     const LiveInterval &LI = LIS->getInterval(Reg);
1214     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1215     if (!DefVNI)
1216       continue;
1217 
1218     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1219     const SUnit *DefSU = getSUnit(DefMI);
1220     if (!DefSU)
1221       continue;
1222 
1223     unsigned LiveOutHeight = DefSU->getHeight();
1224     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1225     // Visit all local users of the vreg def.
1226     for (const VReg2SUnit &V2SU
1227          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1228       SUnit *SU = V2SU.SU;
1229       if (SU == &ExitSU)
1230         continue;
1231 
1232       // Only consider uses of the phi.
1233       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1234       if (!LRQ.valueIn()->isPHIDef())
1235         continue;
1236 
1237       // Assume that a path spanning two iterations is a cycle, which could
1238       // overestimate in strange cases. This allows cyclic latency to be
1239       // estimated as the minimum slack of the vreg's depth or height.
1240       unsigned CyclicLatency = 0;
1241       if (LiveOutDepth > SU->getDepth())
1242         CyclicLatency = LiveOutDepth - SU->getDepth();
1243 
1244       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1245       if (LiveInHeight > LiveOutHeight) {
1246         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1247           CyclicLatency = LiveInHeight - LiveOutHeight;
1248       } else
1249         CyclicLatency = 0;
1250 
1251       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1252             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1253       if (CyclicLatency > MaxCyclicLatency)
1254         MaxCyclicLatency = CyclicLatency;
1255     }
1256   }
1257   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1258   return MaxCyclicLatency;
1259 }
1260 
1261 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1262 /// the Top RP tracker in case the region beginning has changed.
initQueues(ArrayRef<SUnit * > TopRoots,ArrayRef<SUnit * > BotRoots)1263 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1264                                    ArrayRef<SUnit*> BotRoots) {
1265   ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1266   if (ShouldTrackPressure) {
1267     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1268     TopRPTracker.setPos(CurrentTop);
1269   }
1270 }
1271 
1272 /// Move an instruction and update register pressure.
scheduleMI(SUnit * SU,bool IsTopNode)1273 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1274   // Move the instruction to its new location in the instruction stream.
1275   MachineInstr *MI = SU->getInstr();
1276 
1277   if (IsTopNode) {
1278     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1279     if (&*CurrentTop == MI)
1280       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1281     else {
1282       moveInstruction(MI, CurrentTop);
1283       TopRPTracker.setPos(MI);
1284     }
1285 
1286     if (ShouldTrackPressure) {
1287       // Update top scheduled pressure.
1288       RegisterOperands RegOpers;
1289       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1290       if (ShouldTrackLaneMasks) {
1291         // Adjust liveness and add missing dead+read-undef flags.
1292         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1293         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1294       } else {
1295         // Adjust for missing dead-def flags.
1296         RegOpers.detectDeadDefs(*MI, *LIS);
1297       }
1298 
1299       TopRPTracker.advance(RegOpers);
1300       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1301       DEBUG(
1302         dbgs() << "Top Pressure:\n";
1303         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1304       );
1305 
1306       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1307     }
1308   } else {
1309     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1310     MachineBasicBlock::iterator priorII =
1311       priorNonDebug(CurrentBottom, CurrentTop);
1312     if (&*priorII == MI)
1313       CurrentBottom = priorII;
1314     else {
1315       if (&*CurrentTop == MI) {
1316         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1317         TopRPTracker.setPos(CurrentTop);
1318       }
1319       moveInstruction(MI, CurrentBottom);
1320       CurrentBottom = MI;
1321     }
1322     if (ShouldTrackPressure) {
1323       RegisterOperands RegOpers;
1324       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1325       if (ShouldTrackLaneMasks) {
1326         // Adjust liveness and add missing dead+read-undef flags.
1327         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1328         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1329       } else {
1330         // Adjust for missing dead-def flags.
1331         RegOpers.detectDeadDefs(*MI, *LIS);
1332       }
1333 
1334       BotRPTracker.recedeSkipDebugValues();
1335       SmallVector<RegisterMaskPair, 8> LiveUses;
1336       BotRPTracker.recede(RegOpers, &LiveUses);
1337       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1338       DEBUG(
1339         dbgs() << "Bottom Pressure:\n";
1340         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1341       );
1342 
1343       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1344       updatePressureDiffs(LiveUses);
1345     }
1346   }
1347 }
1348 
1349 //===----------------------------------------------------------------------===//
1350 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1351 //===----------------------------------------------------------------------===//
1352 
1353 namespace {
1354 /// \brief Post-process the DAG to create cluster edges between neighboring
1355 /// loads or between neighboring stores.
1356 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1357   struct MemOpInfo {
1358     SUnit *SU;
1359     unsigned BaseReg;
1360     int64_t Offset;
MemOpInfo__anon5aa060a70211::BaseMemOpClusterMutation::MemOpInfo1361     MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1362         : SU(su), BaseReg(reg), Offset(ofs) {}
1363 
operator <__anon5aa060a70211::BaseMemOpClusterMutation::MemOpInfo1364     bool operator<(const MemOpInfo&RHS) const {
1365       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1366     }
1367   };
1368 
1369   const TargetInstrInfo *TII;
1370   const TargetRegisterInfo *TRI;
1371   bool IsLoad;
1372 
1373 public:
BaseMemOpClusterMutation(const TargetInstrInfo * tii,const TargetRegisterInfo * tri,bool IsLoad)1374   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1375                            const TargetRegisterInfo *tri, bool IsLoad)
1376       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1377 
1378   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1379 
1380 protected:
1381   void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1382 };
1383 
1384 class StoreClusterMutation : public BaseMemOpClusterMutation {
1385 public:
StoreClusterMutation(const TargetInstrInfo * tii,const TargetRegisterInfo * tri)1386   StoreClusterMutation(const TargetInstrInfo *tii,
1387                        const TargetRegisterInfo *tri)
1388       : BaseMemOpClusterMutation(tii, tri, false) {}
1389 };
1390 
1391 class LoadClusterMutation : public BaseMemOpClusterMutation {
1392 public:
LoadClusterMutation(const TargetInstrInfo * tii,const TargetRegisterInfo * tri)1393   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1394       : BaseMemOpClusterMutation(tii, tri, true) {}
1395 };
1396 } // anonymous
1397 
clusterNeighboringMemOps(ArrayRef<SUnit * > MemOps,ScheduleDAGMI * DAG)1398 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1399     ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1400   SmallVector<MemOpInfo, 32> MemOpRecords;
1401   for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1402     SUnit *SU = MemOps[Idx];
1403     unsigned BaseReg;
1404     int64_t Offset;
1405     if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
1406       MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
1407   }
1408   if (MemOpRecords.size() < 2)
1409     return;
1410 
1411   std::sort(MemOpRecords.begin(), MemOpRecords.end());
1412   unsigned ClusterLength = 1;
1413   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1414     if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
1415       ClusterLength = 1;
1416       continue;
1417     }
1418 
1419     SUnit *SUa = MemOpRecords[Idx].SU;
1420     SUnit *SUb = MemOpRecords[Idx+1].SU;
1421     if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(),
1422                                  ClusterLength) &&
1423         DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1424       DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1425             << SUb->NodeNum << ")\n");
1426       // Copy successor edges from SUa to SUb. Interleaving computation
1427       // dependent on SUa can prevent load combining due to register reuse.
1428       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1429       // loads should have effectively the same inputs.
1430       for (SUnit::const_succ_iterator
1431              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1432         if (SI->getSUnit() == SUb)
1433           continue;
1434         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1435         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1436       }
1437       ++ClusterLength;
1438     } else
1439       ClusterLength = 1;
1440   }
1441 }
1442 
1443 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
apply(ScheduleDAGInstrs * DAGInstrs)1444 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1445 
1446   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1447 
1448   // Map DAG NodeNum to store chain ID.
1449   DenseMap<unsigned, unsigned> StoreChainIDs;
1450   // Map each store chain to a set of dependent MemOps.
1451   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1452   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1453     SUnit *SU = &DAG->SUnits[Idx];
1454     if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1455         (!IsLoad && !SU->getInstr()->mayStore()))
1456       continue;
1457 
1458     unsigned ChainPredID = DAG->SUnits.size();
1459     for (SUnit::const_pred_iterator
1460            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1461       if (PI->isCtrl()) {
1462         ChainPredID = PI->getSUnit()->NodeNum;
1463         break;
1464       }
1465     }
1466     // Check if this chain-like pred has been seen
1467     // before. ChainPredID==MaxNodeID at the top of the schedule.
1468     unsigned NumChains = StoreChainDependents.size();
1469     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1470       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1471     if (Result.second)
1472       StoreChainDependents.resize(NumChains + 1);
1473     StoreChainDependents[Result.first->second].push_back(SU);
1474   }
1475 
1476   // Iterate over the store chains.
1477   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1478     clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
1479 }
1480 
1481 //===----------------------------------------------------------------------===//
1482 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1483 //===----------------------------------------------------------------------===//
1484 
1485 namespace {
1486 /// \brief Post-process the DAG to create cluster edges between instructions
1487 /// that may be fused by the processor into a single operation.
1488 class MacroFusion : public ScheduleDAGMutation {
1489   const TargetInstrInfo &TII;
1490   const TargetRegisterInfo &TRI;
1491 public:
MacroFusion(const TargetInstrInfo & TII,const TargetRegisterInfo & TRI)1492   MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1493     : TII(TII), TRI(TRI) {}
1494 
1495   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1496 };
1497 } // anonymous
1498 
1499 /// Returns true if \p MI reads a register written by \p Other.
HasDataDep(const TargetRegisterInfo & TRI,const MachineInstr & MI,const MachineInstr & Other)1500 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1501                        const MachineInstr &Other) {
1502   for (const MachineOperand &MO : MI.uses()) {
1503     if (!MO.isReg() || !MO.readsReg())
1504       continue;
1505 
1506     unsigned Reg = MO.getReg();
1507     if (Other.modifiesRegister(Reg, &TRI))
1508       return true;
1509   }
1510   return false;
1511 }
1512 
1513 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1514 /// fused operations.
apply(ScheduleDAGInstrs * DAGInstrs)1515 void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1516   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1517 
1518   // For now, assume targets can only fuse with the branch.
1519   SUnit &ExitSU = DAG->ExitSU;
1520   MachineInstr *Branch = ExitSU.getInstr();
1521   if (!Branch)
1522     return;
1523 
1524   for (SUnit &SU : DAG->SUnits) {
1525     // SUnits with successors can't be schedule in front of the ExitSU.
1526     if (!SU.Succs.empty())
1527       continue;
1528     // We only care if the node writes to a register that the branch reads.
1529     MachineInstr *Pred = SU.getInstr();
1530     if (!HasDataDep(TRI, *Branch, *Pred))
1531       continue;
1532 
1533     if (!TII.shouldScheduleAdjacent(*Pred, *Branch))
1534       continue;
1535 
1536     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1537     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1538     // need to copy predecessor edges from ExitSU to SU, since top-down
1539     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1540     // of SU, we could create an artificial edge from the deepest root, but it
1541     // hasn't been needed yet.
1542     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1543     (void)Success;
1544     assert(Success && "No DAG nodes should be reachable from ExitSU");
1545 
1546     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1547     break;
1548   }
1549 }
1550 
1551 //===----------------------------------------------------------------------===//
1552 // CopyConstrain - DAG post-processing to encourage copy elimination.
1553 //===----------------------------------------------------------------------===//
1554 
1555 namespace {
1556 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1557 /// the one use that defines the copy's source vreg, most likely an induction
1558 /// variable increment.
1559 class CopyConstrain : public ScheduleDAGMutation {
1560   // Transient state.
1561   SlotIndex RegionBeginIdx;
1562   // RegionEndIdx is the slot index of the last non-debug instruction in the
1563   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1564   SlotIndex RegionEndIdx;
1565 public:
CopyConstrain(const TargetInstrInfo *,const TargetRegisterInfo *)1566   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1567 
1568   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1569 
1570 protected:
1571   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1572 };
1573 } // anonymous
1574 
1575 /// constrainLocalCopy handles two possibilities:
1576 /// 1) Local src:
1577 /// I0:     = dst
1578 /// I1: src = ...
1579 /// I2:     = dst
1580 /// I3: dst = src (copy)
1581 /// (create pred->succ edges I0->I1, I2->I1)
1582 ///
1583 /// 2) Local copy:
1584 /// I0: dst = src (copy)
1585 /// I1:     = dst
1586 /// I2: src = ...
1587 /// I3:     = dst
1588 /// (create pred->succ edges I1->I2, I3->I2)
1589 ///
1590 /// Although the MachineScheduler is currently constrained to single blocks,
1591 /// this algorithm should handle extended blocks. An EBB is a set of
1592 /// contiguously numbered blocks such that the previous block in the EBB is
1593 /// always the single predecessor.
constrainLocalCopy(SUnit * CopySU,ScheduleDAGMILive * DAG)1594 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1595   LiveIntervals *LIS = DAG->getLIS();
1596   MachineInstr *Copy = CopySU->getInstr();
1597 
1598   // Check for pure vreg copies.
1599   const MachineOperand &SrcOp = Copy->getOperand(1);
1600   unsigned SrcReg = SrcOp.getReg();
1601   if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1602     return;
1603 
1604   const MachineOperand &DstOp = Copy->getOperand(0);
1605   unsigned DstReg = DstOp.getReg();
1606   if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1607     return;
1608 
1609   // Check if either the dest or source is local. If it's live across a back
1610   // edge, it's not local. Note that if both vregs are live across the back
1611   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1612   // If both the copy's source and dest are local live intervals, then we
1613   // should treat the dest as the global for the purpose of adding
1614   // constraints. This adds edges from source's other uses to the copy.
1615   unsigned LocalReg = SrcReg;
1616   unsigned GlobalReg = DstReg;
1617   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1618   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1619     LocalReg = DstReg;
1620     GlobalReg = SrcReg;
1621     LocalLI = &LIS->getInterval(LocalReg);
1622     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1623       return;
1624   }
1625   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1626 
1627   // Find the global segment after the start of the local LI.
1628   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1629   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1630   // local live range. We could create edges from other global uses to the local
1631   // start, but the coalescer should have already eliminated these cases, so
1632   // don't bother dealing with it.
1633   if (GlobalSegment == GlobalLI->end())
1634     return;
1635 
1636   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1637   // returned the next global segment. But if GlobalSegment overlaps with
1638   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1639   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1640   if (GlobalSegment->contains(LocalLI->beginIndex()))
1641     ++GlobalSegment;
1642 
1643   if (GlobalSegment == GlobalLI->end())
1644     return;
1645 
1646   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1647   if (GlobalSegment != GlobalLI->begin()) {
1648     // Two address defs have no hole.
1649     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1650                                GlobalSegment->start)) {
1651       return;
1652     }
1653     // If the prior global segment may be defined by the same two-address
1654     // instruction that also defines LocalLI, then can't make a hole here.
1655     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1656                                LocalLI->beginIndex())) {
1657       return;
1658     }
1659     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1660     // it would be a disconnected component in the live range.
1661     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1662            "Disconnected LRG within the scheduling region.");
1663   }
1664   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1665   if (!GlobalDef)
1666     return;
1667 
1668   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1669   if (!GlobalSU)
1670     return;
1671 
1672   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1673   // constraining the uses of the last local def to precede GlobalDef.
1674   SmallVector<SUnit*,8> LocalUses;
1675   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1676   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1677   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1678   for (SUnit::const_succ_iterator
1679          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1680        I != E; ++I) {
1681     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1682       continue;
1683     if (I->getSUnit() == GlobalSU)
1684       continue;
1685     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1686       return;
1687     LocalUses.push_back(I->getSUnit());
1688   }
1689   // Open the top of the GlobalLI hole by constraining any earlier global uses
1690   // to precede the start of LocalLI.
1691   SmallVector<SUnit*,8> GlobalUses;
1692   MachineInstr *FirstLocalDef =
1693     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1694   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1695   for (SUnit::const_pred_iterator
1696          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1697     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1698       continue;
1699     if (I->getSUnit() == FirstLocalSU)
1700       continue;
1701     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1702       return;
1703     GlobalUses.push_back(I->getSUnit());
1704   }
1705   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1706   // Add the weak edges.
1707   for (SmallVectorImpl<SUnit*>::const_iterator
1708          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1709     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1710           << GlobalSU->NodeNum << ")\n");
1711     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1712   }
1713   for (SmallVectorImpl<SUnit*>::const_iterator
1714          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1715     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1716           << FirstLocalSU->NodeNum << ")\n");
1717     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1718   }
1719 }
1720 
1721 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1722 /// copy elimination.
apply(ScheduleDAGInstrs * DAGInstrs)1723 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1724   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1725   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1726 
1727   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1728   if (FirstPos == DAG->end())
1729     return;
1730   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1731   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1732       *priorNonDebug(DAG->end(), DAG->begin()));
1733 
1734   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1735     SUnit *SU = &DAG->SUnits[Idx];
1736     if (!SU->getInstr()->isCopy())
1737       continue;
1738 
1739     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1740   }
1741 }
1742 
1743 //===----------------------------------------------------------------------===//
1744 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1745 // and possibly other custom schedulers.
1746 //===----------------------------------------------------------------------===//
1747 
1748 static const unsigned InvalidCycle = ~0U;
1749 
~SchedBoundary()1750 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1751 
reset()1752 void SchedBoundary::reset() {
1753   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1754   // Destroying and reconstructing it is very expensive though. So keep
1755   // invalid, placeholder HazardRecs.
1756   if (HazardRec && HazardRec->isEnabled()) {
1757     delete HazardRec;
1758     HazardRec = nullptr;
1759   }
1760   Available.clear();
1761   Pending.clear();
1762   CheckPending = false;
1763   NextSUs.clear();
1764   CurrCycle = 0;
1765   CurrMOps = 0;
1766   MinReadyCycle = UINT_MAX;
1767   ExpectedLatency = 0;
1768   DependentLatency = 0;
1769   RetiredMOps = 0;
1770   MaxExecutedResCount = 0;
1771   ZoneCritResIdx = 0;
1772   IsResourceLimited = false;
1773   ReservedCycles.clear();
1774 #ifndef NDEBUG
1775   // Track the maximum number of stall cycles that could arise either from the
1776   // latency of a DAG edge or the number of cycles that a processor resource is
1777   // reserved (SchedBoundary::ReservedCycles).
1778   MaxObservedStall = 0;
1779 #endif
1780   // Reserve a zero-count for invalid CritResIdx.
1781   ExecutedResCounts.resize(1);
1782   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1783 }
1784 
1785 void SchedRemainder::
init(ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)1786 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1787   reset();
1788   if (!SchedModel->hasInstrSchedModel())
1789     return;
1790   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1791   for (std::vector<SUnit>::iterator
1792          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1793     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1794     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1795       * SchedModel->getMicroOpFactor();
1796     for (TargetSchedModel::ProcResIter
1797            PI = SchedModel->getWriteProcResBegin(SC),
1798            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1799       unsigned PIdx = PI->ProcResourceIdx;
1800       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1801       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1802     }
1803   }
1804 }
1805 
1806 void SchedBoundary::
init(ScheduleDAGMI * dag,const TargetSchedModel * smodel,SchedRemainder * rem)1807 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1808   reset();
1809   DAG = dag;
1810   SchedModel = smodel;
1811   Rem = rem;
1812   if (SchedModel->hasInstrSchedModel()) {
1813     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1814     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1815   }
1816 }
1817 
1818 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1819 /// these "soft stalls" differently than the hard stall cycles based on CPU
1820 /// resources and computed by checkHazard(). A fully in-order model
1821 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1822 /// available for scheduling until they are ready. However, a weaker in-order
1823 /// model may use this for heuristics. For example, if a processor has in-order
1824 /// behavior when reading certain resources, this may come into play.
getLatencyStallCycles(SUnit * SU)1825 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1826   if (!SU->isUnbuffered)
1827     return 0;
1828 
1829   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1830   if (ReadyCycle > CurrCycle)
1831     return ReadyCycle - CurrCycle;
1832   return 0;
1833 }
1834 
1835 /// Compute the next cycle at which the given processor resource can be
1836 /// scheduled.
1837 unsigned SchedBoundary::
getNextResourceCycle(unsigned PIdx,unsigned Cycles)1838 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1839   unsigned NextUnreserved = ReservedCycles[PIdx];
1840   // If this resource has never been used, always return cycle zero.
1841   if (NextUnreserved == InvalidCycle)
1842     return 0;
1843   // For bottom-up scheduling add the cycles needed for the current operation.
1844   if (!isTop())
1845     NextUnreserved += Cycles;
1846   return NextUnreserved;
1847 }
1848 
1849 /// Does this SU have a hazard within the current instruction group.
1850 ///
1851 /// The scheduler supports two modes of hazard recognition. The first is the
1852 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1853 /// supports highly complicated in-order reservation tables
1854 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1855 ///
1856 /// The second is a streamlined mechanism that checks for hazards based on
1857 /// simple counters that the scheduler itself maintains. It explicitly checks
1858 /// for instruction dispatch limitations, including the number of micro-ops that
1859 /// can dispatch per cycle.
1860 ///
1861 /// TODO: Also check whether the SU must start a new group.
checkHazard(SUnit * SU)1862 bool SchedBoundary::checkHazard(SUnit *SU) {
1863   if (HazardRec->isEnabled()
1864       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1865     return true;
1866   }
1867   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1868   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1869     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1870           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1871     return true;
1872   }
1873   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1874     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1875     for (TargetSchedModel::ProcResIter
1876            PI = SchedModel->getWriteProcResBegin(SC),
1877            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1878       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1879       if (NRCycle > CurrCycle) {
1880 #ifndef NDEBUG
1881         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1882 #endif
1883         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1884               << SchedModel->getResourceName(PI->ProcResourceIdx)
1885               << "=" << NRCycle << "c\n");
1886         return true;
1887       }
1888     }
1889   }
1890   return false;
1891 }
1892 
1893 // Find the unscheduled node in ReadySUs with the highest latency.
1894 unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit * > ReadySUs)1895 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1896   SUnit *LateSU = nullptr;
1897   unsigned RemLatency = 0;
1898   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1899        I != E; ++I) {
1900     unsigned L = getUnscheduledLatency(*I);
1901     if (L > RemLatency) {
1902       RemLatency = L;
1903       LateSU = *I;
1904     }
1905   }
1906   if (LateSU) {
1907     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1908           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1909   }
1910   return RemLatency;
1911 }
1912 
1913 // Count resources in this zone and the remaining unscheduled
1914 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1915 // resource index, or zero if the zone is issue limited.
1916 unsigned SchedBoundary::
getOtherResourceCount(unsigned & OtherCritIdx)1917 getOtherResourceCount(unsigned &OtherCritIdx) {
1918   OtherCritIdx = 0;
1919   if (!SchedModel->hasInstrSchedModel())
1920     return 0;
1921 
1922   unsigned OtherCritCount = Rem->RemIssueCount
1923     + (RetiredMOps * SchedModel->getMicroOpFactor());
1924   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1925         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1926   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1927        PIdx != PEnd; ++PIdx) {
1928     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1929     if (OtherCount > OtherCritCount) {
1930       OtherCritCount = OtherCount;
1931       OtherCritIdx = PIdx;
1932     }
1933   }
1934   if (OtherCritIdx) {
1935     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1936           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1937           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1938   }
1939   return OtherCritCount;
1940 }
1941 
releaseNode(SUnit * SU,unsigned ReadyCycle)1942 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1943   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1944 
1945 #ifndef NDEBUG
1946   // ReadyCycle was been bumped up to the CurrCycle when this node was
1947   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1948   // scheduling, so may now be greater than ReadyCycle.
1949   if (ReadyCycle > CurrCycle)
1950     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1951 #endif
1952 
1953   if (ReadyCycle < MinReadyCycle)
1954     MinReadyCycle = ReadyCycle;
1955 
1956   // Check for interlocks first. For the purpose of other heuristics, an
1957   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1958   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1959   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
1960       Available.size() >= ReadyListLimit)
1961     Pending.push(SU);
1962   else
1963     Available.push(SU);
1964 
1965   // Record this node as an immediate dependent of the scheduled node.
1966   NextSUs.insert(SU);
1967 }
1968 
releaseTopNode(SUnit * SU)1969 void SchedBoundary::releaseTopNode(SUnit *SU) {
1970   if (SU->isScheduled)
1971     return;
1972 
1973   releaseNode(SU, SU->TopReadyCycle);
1974 }
1975 
releaseBottomNode(SUnit * SU)1976 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1977   if (SU->isScheduled)
1978     return;
1979 
1980   releaseNode(SU, SU->BotReadyCycle);
1981 }
1982 
1983 /// Move the boundary of scheduled code by one cycle.
bumpCycle(unsigned NextCycle)1984 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1985   if (SchedModel->getMicroOpBufferSize() == 0) {
1986     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1987     if (MinReadyCycle > NextCycle)
1988       NextCycle = MinReadyCycle;
1989   }
1990   // Update the current micro-ops, which will issue in the next cycle.
1991   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1992   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1993 
1994   // Decrement DependentLatency based on the next cycle.
1995   if ((NextCycle - CurrCycle) > DependentLatency)
1996     DependentLatency = 0;
1997   else
1998     DependentLatency -= (NextCycle - CurrCycle);
1999 
2000   if (!HazardRec->isEnabled()) {
2001     // Bypass HazardRec virtual calls.
2002     CurrCycle = NextCycle;
2003   } else {
2004     // Bypass getHazardType calls in case of long latency.
2005     for (; CurrCycle != NextCycle; ++CurrCycle) {
2006       if (isTop())
2007         HazardRec->AdvanceCycle();
2008       else
2009         HazardRec->RecedeCycle();
2010     }
2011   }
2012   CheckPending = true;
2013   unsigned LFactor = SchedModel->getLatencyFactor();
2014   IsResourceLimited =
2015     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2016     > (int)LFactor;
2017 
2018   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2019 }
2020 
incExecutedResources(unsigned PIdx,unsigned Count)2021 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2022   ExecutedResCounts[PIdx] += Count;
2023   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2024     MaxExecutedResCount = ExecutedResCounts[PIdx];
2025 }
2026 
2027 /// Add the given processor resource to this scheduled zone.
2028 ///
2029 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2030 /// during which this resource is consumed.
2031 ///
2032 /// \return the next cycle at which the instruction may execute without
2033 /// oversubscribing resources.
2034 unsigned SchedBoundary::
countResource(unsigned PIdx,unsigned Cycles,unsigned NextCycle)2035 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2036   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2037   unsigned Count = Factor * Cycles;
2038   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
2039         << " +" << Cycles << "x" << Factor << "u\n");
2040 
2041   // Update Executed resources counts.
2042   incExecutedResources(PIdx, Count);
2043   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2044   Rem->RemainingCounts[PIdx] -= Count;
2045 
2046   // Check if this resource exceeds the current critical resource. If so, it
2047   // becomes the critical resource.
2048   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2049     ZoneCritResIdx = PIdx;
2050     DEBUG(dbgs() << "  *** Critical resource "
2051           << SchedModel->getResourceName(PIdx) << ": "
2052           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2053   }
2054   // For reserved resources, record the highest cycle using the resource.
2055   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2056   if (NextAvailable > CurrCycle) {
2057     DEBUG(dbgs() << "  Resource conflict: "
2058           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2059           << NextAvailable << "\n");
2060   }
2061   return NextAvailable;
2062 }
2063 
2064 /// Move the boundary of scheduled code by one SUnit.
bumpNode(SUnit * SU)2065 void SchedBoundary::bumpNode(SUnit *SU) {
2066   // Update the reservation table.
2067   if (HazardRec->isEnabled()) {
2068     if (!isTop() && SU->isCall) {
2069       // Calls are scheduled with their preceding instructions. For bottom-up
2070       // scheduling, clear the pipeline state before emitting.
2071       HazardRec->Reset();
2072     }
2073     HazardRec->EmitInstruction(SU);
2074   }
2075   // checkHazard should prevent scheduling multiple instructions per cycle that
2076   // exceed the issue width.
2077   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2078   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2079   assert(
2080       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2081       "Cannot schedule this instruction's MicroOps in the current cycle.");
2082 
2083   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2084   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2085 
2086   unsigned NextCycle = CurrCycle;
2087   switch (SchedModel->getMicroOpBufferSize()) {
2088   case 0:
2089     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2090     break;
2091   case 1:
2092     if (ReadyCycle > NextCycle) {
2093       NextCycle = ReadyCycle;
2094       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2095     }
2096     break;
2097   default:
2098     // We don't currently model the OOO reorder buffer, so consider all
2099     // scheduled MOps to be "retired". We do loosely model in-order resource
2100     // latency. If this instruction uses an in-order resource, account for any
2101     // likely stall cycles.
2102     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2103       NextCycle = ReadyCycle;
2104     break;
2105   }
2106   RetiredMOps += IncMOps;
2107 
2108   // Update resource counts and critical resource.
2109   if (SchedModel->hasInstrSchedModel()) {
2110     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2111     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2112     Rem->RemIssueCount -= DecRemIssue;
2113     if (ZoneCritResIdx) {
2114       // Scale scheduled micro-ops for comparing with the critical resource.
2115       unsigned ScaledMOps =
2116         RetiredMOps * SchedModel->getMicroOpFactor();
2117 
2118       // If scaled micro-ops are now more than the previous critical resource by
2119       // a full cycle, then micro-ops issue becomes critical.
2120       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2121           >= (int)SchedModel->getLatencyFactor()) {
2122         ZoneCritResIdx = 0;
2123         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2124               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2125       }
2126     }
2127     for (TargetSchedModel::ProcResIter
2128            PI = SchedModel->getWriteProcResBegin(SC),
2129            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2130       unsigned RCycle =
2131         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2132       if (RCycle > NextCycle)
2133         NextCycle = RCycle;
2134     }
2135     if (SU->hasReservedResource) {
2136       // For reserved resources, record the highest cycle using the resource.
2137       // For top-down scheduling, this is the cycle in which we schedule this
2138       // instruction plus the number of cycles the operations reserves the
2139       // resource. For bottom-up is it simply the instruction's cycle.
2140       for (TargetSchedModel::ProcResIter
2141              PI = SchedModel->getWriteProcResBegin(SC),
2142              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2143         unsigned PIdx = PI->ProcResourceIdx;
2144         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2145           if (isTop()) {
2146             ReservedCycles[PIdx] =
2147               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2148           }
2149           else
2150             ReservedCycles[PIdx] = NextCycle;
2151         }
2152       }
2153     }
2154   }
2155   // Update ExpectedLatency and DependentLatency.
2156   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2157   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2158   if (SU->getDepth() > TopLatency) {
2159     TopLatency = SU->getDepth();
2160     DEBUG(dbgs() << "  " << Available.getName()
2161           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2162   }
2163   if (SU->getHeight() > BotLatency) {
2164     BotLatency = SU->getHeight();
2165     DEBUG(dbgs() << "  " << Available.getName()
2166           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2167   }
2168   // If we stall for any reason, bump the cycle.
2169   if (NextCycle > CurrCycle) {
2170     bumpCycle(NextCycle);
2171   } else {
2172     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2173     // resource limited. If a stall occurred, bumpCycle does this.
2174     unsigned LFactor = SchedModel->getLatencyFactor();
2175     IsResourceLimited =
2176       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2177       > (int)LFactor;
2178   }
2179   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2180   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2181   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2182   // bump the cycle to avoid uselessly checking everything in the readyQ.
2183   CurrMOps += IncMOps;
2184   while (CurrMOps >= SchedModel->getIssueWidth()) {
2185     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2186           << " at cycle " << CurrCycle << '\n');
2187     bumpCycle(++NextCycle);
2188   }
2189   DEBUG(dumpScheduledState());
2190 }
2191 
2192 /// Release pending ready nodes in to the available queue. This makes them
2193 /// visible to heuristics.
releasePending()2194 void SchedBoundary::releasePending() {
2195   // If the available queue is empty, it is safe to reset MinReadyCycle.
2196   if (Available.empty())
2197     MinReadyCycle = UINT_MAX;
2198 
2199   // Check to see if any of the pending instructions are ready to issue.  If
2200   // so, add them to the available queue.
2201   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2202   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2203     SUnit *SU = *(Pending.begin()+i);
2204     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2205 
2206     if (ReadyCycle < MinReadyCycle)
2207       MinReadyCycle = ReadyCycle;
2208 
2209     if (!IsBuffered && ReadyCycle > CurrCycle)
2210       continue;
2211 
2212     if (checkHazard(SU))
2213       continue;
2214 
2215     if (Available.size() >= ReadyListLimit)
2216       break;
2217 
2218     Available.push(SU);
2219     Pending.remove(Pending.begin()+i);
2220     --i; --e;
2221   }
2222   CheckPending = false;
2223 }
2224 
2225 /// Remove SU from the ready set for this boundary.
removeReady(SUnit * SU)2226 void SchedBoundary::removeReady(SUnit *SU) {
2227   if (Available.isInQueue(SU))
2228     Available.remove(Available.find(SU));
2229   else {
2230     assert(Pending.isInQueue(SU) && "bad ready count");
2231     Pending.remove(Pending.find(SU));
2232   }
2233 }
2234 
2235 /// If this queue only has one ready candidate, return it. As a side effect,
2236 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2237 /// one node is ready. If multiple instructions are ready, return NULL.
pickOnlyChoice()2238 SUnit *SchedBoundary::pickOnlyChoice() {
2239   if (CheckPending)
2240     releasePending();
2241 
2242   if (CurrMOps > 0) {
2243     // Defer any ready instrs that now have a hazard.
2244     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2245       if (checkHazard(*I)) {
2246         Pending.push(*I);
2247         I = Available.remove(I);
2248         continue;
2249       }
2250       ++I;
2251     }
2252   }
2253   for (unsigned i = 0; Available.empty(); ++i) {
2254 //  FIXME: Re-enable assert once PR20057 is resolved.
2255 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2256 //           "permanent hazard");
2257     (void)i;
2258     bumpCycle(CurrCycle + 1);
2259     releasePending();
2260   }
2261 
2262   DEBUG(Pending.dump());
2263   DEBUG(Available.dump());
2264 
2265   if (Available.size() == 1)
2266     return *Available.begin();
2267   return nullptr;
2268 }
2269 
2270 #ifndef NDEBUG
2271 // This is useful information to dump after bumpNode.
2272 // Note that the Queue contents are more useful before pickNodeFromQueue.
dumpScheduledState()2273 void SchedBoundary::dumpScheduledState() {
2274   unsigned ResFactor;
2275   unsigned ResCount;
2276   if (ZoneCritResIdx) {
2277     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2278     ResCount = getResourceCount(ZoneCritResIdx);
2279   } else {
2280     ResFactor = SchedModel->getMicroOpFactor();
2281     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2282   }
2283   unsigned LFactor = SchedModel->getLatencyFactor();
2284   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2285          << "  Retired: " << RetiredMOps;
2286   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2287   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2288          << ResCount / ResFactor << " "
2289          << SchedModel->getResourceName(ZoneCritResIdx)
2290          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2291          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2292          << " limited.\n";
2293 }
2294 #endif
2295 
2296 //===----------------------------------------------------------------------===//
2297 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2298 //===----------------------------------------------------------------------===//
2299 
2300 void GenericSchedulerBase::SchedCandidate::
initResourceDelta(const ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)2301 initResourceDelta(const ScheduleDAGMI *DAG,
2302                   const TargetSchedModel *SchedModel) {
2303   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2304     return;
2305 
2306   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2307   for (TargetSchedModel::ProcResIter
2308          PI = SchedModel->getWriteProcResBegin(SC),
2309          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2310     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2311       ResDelta.CritResources += PI->Cycles;
2312     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2313       ResDelta.DemandedResources += PI->Cycles;
2314   }
2315 }
2316 
2317 /// Set the CandPolicy given a scheduling zone given the current resources and
2318 /// latencies inside and outside the zone.
setPolicy(CandPolicy & Policy,bool IsPostRA,SchedBoundary & CurrZone,SchedBoundary * OtherZone)2319 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2320                                      SchedBoundary &CurrZone,
2321                                      SchedBoundary *OtherZone) {
2322   // Apply preemptive heuristics based on the total latency and resources
2323   // inside and outside this zone. Potential stalls should be considered before
2324   // following this policy.
2325 
2326   // Compute remaining latency. We need this both to determine whether the
2327   // overall schedule has become latency-limited and whether the instructions
2328   // outside this zone are resource or latency limited.
2329   //
2330   // The "dependent" latency is updated incrementally during scheduling as the
2331   // max height/depth of scheduled nodes minus the cycles since it was
2332   // scheduled:
2333   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2334   //
2335   // The "independent" latency is the max ready queue depth:
2336   //   ILat = max N.depth for N in Available|Pending
2337   //
2338   // RemainingLatency is the greater of independent and dependent latency.
2339   unsigned RemLatency = CurrZone.getDependentLatency();
2340   RemLatency = std::max(RemLatency,
2341                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2342   RemLatency = std::max(RemLatency,
2343                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2344 
2345   // Compute the critical resource outside the zone.
2346   unsigned OtherCritIdx = 0;
2347   unsigned OtherCount =
2348     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2349 
2350   bool OtherResLimited = false;
2351   if (SchedModel->hasInstrSchedModel()) {
2352     unsigned LFactor = SchedModel->getLatencyFactor();
2353     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2354   }
2355   // Schedule aggressively for latency in PostRA mode. We don't check for
2356   // acyclic latency during PostRA, and highly out-of-order processors will
2357   // skip PostRA scheduling.
2358   if (!OtherResLimited) {
2359     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2360       Policy.ReduceLatency |= true;
2361       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2362             << " RemainingLatency " << RemLatency << " + "
2363             << CurrZone.getCurrCycle() << "c > CritPath "
2364             << Rem.CriticalPath << "\n");
2365     }
2366   }
2367   // If the same resource is limiting inside and outside the zone, do nothing.
2368   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2369     return;
2370 
2371   DEBUG(
2372     if (CurrZone.isResourceLimited()) {
2373       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2374              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2375              << "\n";
2376     }
2377     if (OtherResLimited)
2378       dbgs() << "  RemainingLimit: "
2379              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2380     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2381       dbgs() << "  Latency limited both directions.\n");
2382 
2383   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2384     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2385 
2386   if (OtherResLimited)
2387     Policy.DemandResIdx = OtherCritIdx;
2388 }
2389 
2390 #ifndef NDEBUG
getReasonStr(GenericSchedulerBase::CandReason Reason)2391 const char *GenericSchedulerBase::getReasonStr(
2392   GenericSchedulerBase::CandReason Reason) {
2393   switch (Reason) {
2394   case NoCand:         return "NOCAND    ";
2395   case Only1:          return "ONLY1     ";
2396   case PhysRegCopy:    return "PREG-COPY ";
2397   case RegExcess:      return "REG-EXCESS";
2398   case RegCritical:    return "REG-CRIT  ";
2399   case Stall:          return "STALL     ";
2400   case Cluster:        return "CLUSTER   ";
2401   case Weak:           return "WEAK      ";
2402   case RegMax:         return "REG-MAX   ";
2403   case ResourceReduce: return "RES-REDUCE";
2404   case ResourceDemand: return "RES-DEMAND";
2405   case TopDepthReduce: return "TOP-DEPTH ";
2406   case TopPathReduce:  return "TOP-PATH  ";
2407   case BotHeightReduce:return "BOT-HEIGHT";
2408   case BotPathReduce:  return "BOT-PATH  ";
2409   case NextDefUse:     return "DEF-USE   ";
2410   case NodeOrder:      return "ORDER     ";
2411   };
2412   llvm_unreachable("Unknown reason!");
2413 }
2414 
traceCandidate(const SchedCandidate & Cand)2415 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2416   PressureChange P;
2417   unsigned ResIdx = 0;
2418   unsigned Latency = 0;
2419   switch (Cand.Reason) {
2420   default:
2421     break;
2422   case RegExcess:
2423     P = Cand.RPDelta.Excess;
2424     break;
2425   case RegCritical:
2426     P = Cand.RPDelta.CriticalMax;
2427     break;
2428   case RegMax:
2429     P = Cand.RPDelta.CurrentMax;
2430     break;
2431   case ResourceReduce:
2432     ResIdx = Cand.Policy.ReduceResIdx;
2433     break;
2434   case ResourceDemand:
2435     ResIdx = Cand.Policy.DemandResIdx;
2436     break;
2437   case TopDepthReduce:
2438     Latency = Cand.SU->getDepth();
2439     break;
2440   case TopPathReduce:
2441     Latency = Cand.SU->getHeight();
2442     break;
2443   case BotHeightReduce:
2444     Latency = Cand.SU->getHeight();
2445     break;
2446   case BotPathReduce:
2447     Latency = Cand.SU->getDepth();
2448     break;
2449   }
2450   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2451   if (P.isValid())
2452     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2453            << ":" << P.getUnitInc() << " ";
2454   else
2455     dbgs() << "      ";
2456   if (ResIdx)
2457     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2458   else
2459     dbgs() << "         ";
2460   if (Latency)
2461     dbgs() << " " << Latency << " cycles ";
2462   else
2463     dbgs() << "          ";
2464   dbgs() << '\n';
2465 }
2466 #endif
2467 
2468 /// Return true if this heuristic determines order.
tryLess(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2469 static bool tryLess(int TryVal, int CandVal,
2470                     GenericSchedulerBase::SchedCandidate &TryCand,
2471                     GenericSchedulerBase::SchedCandidate &Cand,
2472                     GenericSchedulerBase::CandReason Reason) {
2473   if (TryVal < CandVal) {
2474     TryCand.Reason = Reason;
2475     return true;
2476   }
2477   if (TryVal > CandVal) {
2478     if (Cand.Reason > Reason)
2479       Cand.Reason = Reason;
2480     return true;
2481   }
2482   return false;
2483 }
2484 
tryGreater(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2485 static bool tryGreater(int TryVal, int CandVal,
2486                        GenericSchedulerBase::SchedCandidate &TryCand,
2487                        GenericSchedulerBase::SchedCandidate &Cand,
2488                        GenericSchedulerBase::CandReason Reason) {
2489   if (TryVal > CandVal) {
2490     TryCand.Reason = Reason;
2491     return true;
2492   }
2493   if (TryVal < CandVal) {
2494     if (Cand.Reason > Reason)
2495       Cand.Reason = Reason;
2496     return true;
2497   }
2498   return false;
2499 }
2500 
tryLatency(GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,SchedBoundary & Zone)2501 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2502                        GenericSchedulerBase::SchedCandidate &Cand,
2503                        SchedBoundary &Zone) {
2504   if (Zone.isTop()) {
2505     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2506       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2507                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2508         return true;
2509     }
2510     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2511                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2512       return true;
2513   } else {
2514     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2515       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2516                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2517         return true;
2518     }
2519     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2520                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2521       return true;
2522   }
2523   return false;
2524 }
2525 
tracePick(GenericSchedulerBase::CandReason Reason,bool IsTop)2526 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2527   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2528         << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2529 }
2530 
tracePick(const GenericSchedulerBase::SchedCandidate & Cand)2531 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
2532   tracePick(Cand.Reason, Cand.AtTop);
2533 }
2534 
initialize(ScheduleDAGMI * dag)2535 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2536   assert(dag->hasVRegLiveness() &&
2537          "(PreRA)GenericScheduler needs vreg liveness");
2538   DAG = static_cast<ScheduleDAGMILive*>(dag);
2539   SchedModel = DAG->getSchedModel();
2540   TRI = DAG->TRI;
2541 
2542   Rem.init(DAG, SchedModel);
2543   Top.init(DAG, SchedModel, &Rem);
2544   Bot.init(DAG, SchedModel, &Rem);
2545 
2546   // Initialize resource counts.
2547 
2548   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2549   // are disabled, then these HazardRecs will be disabled.
2550   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2551   if (!Top.HazardRec) {
2552     Top.HazardRec =
2553         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2554             Itin, DAG);
2555   }
2556   if (!Bot.HazardRec) {
2557     Bot.HazardRec =
2558         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2559             Itin, DAG);
2560   }
2561   TopCand.SU = nullptr;
2562   BotCand.SU = nullptr;
2563 }
2564 
2565 /// Initialize the per-region scheduling policy.
initPolicy(MachineBasicBlock::iterator Begin,MachineBasicBlock::iterator End,unsigned NumRegionInstrs)2566 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2567                                   MachineBasicBlock::iterator End,
2568                                   unsigned NumRegionInstrs) {
2569   const MachineFunction &MF = *Begin->getParent()->getParent();
2570   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2571 
2572   // Avoid setting up the register pressure tracker for small regions to save
2573   // compile time. As a rough heuristic, only track pressure when the number of
2574   // schedulable instructions exceeds half the integer register file.
2575   RegionPolicy.ShouldTrackPressure = true;
2576   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2577     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2578     if (TLI->isTypeLegal(LegalIntVT)) {
2579       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2580         TLI->getRegClassFor(LegalIntVT));
2581       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2582     }
2583   }
2584 
2585   // For generic targets, we default to bottom-up, because it's simpler and more
2586   // compile-time optimizations have been implemented in that direction.
2587   RegionPolicy.OnlyBottomUp = true;
2588 
2589   // Allow the subtarget to override default policy.
2590   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
2591 
2592   // After subtarget overrides, apply command line options.
2593   if (!EnableRegPressure)
2594     RegionPolicy.ShouldTrackPressure = false;
2595 
2596   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2597   // e.g. -misched-bottomup=false allows scheduling in both directions.
2598   assert((!ForceTopDown || !ForceBottomUp) &&
2599          "-misched-topdown incompatible with -misched-bottomup");
2600   if (ForceBottomUp.getNumOccurrences() > 0) {
2601     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2602     if (RegionPolicy.OnlyBottomUp)
2603       RegionPolicy.OnlyTopDown = false;
2604   }
2605   if (ForceTopDown.getNumOccurrences() > 0) {
2606     RegionPolicy.OnlyTopDown = ForceTopDown;
2607     if (RegionPolicy.OnlyTopDown)
2608       RegionPolicy.OnlyBottomUp = false;
2609   }
2610 }
2611 
dumpPolicy()2612 void GenericScheduler::dumpPolicy() {
2613   dbgs() << "GenericScheduler RegionPolicy: "
2614          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2615          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2616          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2617          << "\n";
2618 }
2619 
2620 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2621 /// critical path by more cycles than it takes to drain the instruction buffer.
2622 /// We estimate an upper bounds on in-flight instructions as:
2623 ///
2624 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2625 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2626 /// InFlightResources = InFlightIterations * LoopResources
2627 ///
2628 /// TODO: Check execution resources in addition to IssueCount.
checkAcyclicLatency()2629 void GenericScheduler::checkAcyclicLatency() {
2630   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2631     return;
2632 
2633   // Scaled number of cycles per loop iteration.
2634   unsigned IterCount =
2635     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2636              Rem.RemIssueCount);
2637   // Scaled acyclic critical path.
2638   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2639   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2640   unsigned InFlightCount =
2641     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2642   unsigned BufferLimit =
2643     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2644 
2645   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2646 
2647   DEBUG(dbgs() << "IssueCycles="
2648         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2649         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2650         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2651         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2652         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2653         if (Rem.IsAcyclicLatencyLimited)
2654           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2655 }
2656 
registerRoots()2657 void GenericScheduler::registerRoots() {
2658   Rem.CriticalPath = DAG->ExitSU.getDepth();
2659 
2660   // Some roots may not feed into ExitSU. Check all of them in case.
2661   for (std::vector<SUnit*>::const_iterator
2662          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2663     if ((*I)->getDepth() > Rem.CriticalPath)
2664       Rem.CriticalPath = (*I)->getDepth();
2665   }
2666   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2667   if (DumpCriticalPathLength) {
2668     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2669   }
2670 
2671   if (EnableCyclicPath) {
2672     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2673     checkAcyclicLatency();
2674   }
2675 }
2676 
tryPressure(const PressureChange & TryP,const PressureChange & CandP,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason,const TargetRegisterInfo * TRI,const MachineFunction & MF)2677 static bool tryPressure(const PressureChange &TryP,
2678                         const PressureChange &CandP,
2679                         GenericSchedulerBase::SchedCandidate &TryCand,
2680                         GenericSchedulerBase::SchedCandidate &Cand,
2681                         GenericSchedulerBase::CandReason Reason,
2682                         const TargetRegisterInfo *TRI,
2683                         const MachineFunction &MF) {
2684   // If one candidate decreases and the other increases, go with it.
2685   // Invalid candidates have UnitInc==0.
2686   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2687                  Reason)) {
2688     return true;
2689   }
2690   // Do not compare the magnitude of pressure changes between top and bottom
2691   // boundary.
2692   if (Cand.AtTop != TryCand.AtTop)
2693     return false;
2694 
2695   // If both candidates affect the same set in the same boundary, go with the
2696   // smallest increase.
2697   unsigned TryPSet = TryP.getPSetOrMax();
2698   unsigned CandPSet = CandP.getPSetOrMax();
2699   if (TryPSet == CandPSet) {
2700     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2701                    Reason);
2702   }
2703 
2704   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2705                                  std::numeric_limits<int>::max();
2706 
2707   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2708                                    std::numeric_limits<int>::max();
2709 
2710   // If the candidates are decreasing pressure, reverse priority.
2711   if (TryP.getUnitInc() < 0)
2712     std::swap(TryRank, CandRank);
2713   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2714 }
2715 
getWeakLeft(const SUnit * SU,bool isTop)2716 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2717   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2718 }
2719 
2720 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2721 /// their physreg def/use.
2722 ///
2723 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2724 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2725 /// with the operation that produces or consumes the physreg. We'll do this when
2726 /// regalloc has support for parallel copies.
biasPhysRegCopy(const SUnit * SU,bool isTop)2727 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2728   const MachineInstr *MI = SU->getInstr();
2729   if (!MI->isCopy())
2730     return 0;
2731 
2732   unsigned ScheduledOper = isTop ? 1 : 0;
2733   unsigned UnscheduledOper = isTop ? 0 : 1;
2734   // If we have already scheduled the physreg produce/consumer, immediately
2735   // schedule the copy.
2736   if (TargetRegisterInfo::isPhysicalRegister(
2737         MI->getOperand(ScheduledOper).getReg()))
2738     return 1;
2739   // If the physreg is at the boundary, defer it. Otherwise schedule it
2740   // immediately to free the dependent. We can hoist the copy later.
2741   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2742   if (TargetRegisterInfo::isPhysicalRegister(
2743         MI->getOperand(UnscheduledOper).getReg()))
2744     return AtBoundary ? -1 : 1;
2745   return 0;
2746 }
2747 
initCandidate(SchedCandidate & Cand,SUnit * SU,bool AtTop,const RegPressureTracker & RPTracker,RegPressureTracker & TempTracker)2748 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2749                                      bool AtTop,
2750                                      const RegPressureTracker &RPTracker,
2751                                      RegPressureTracker &TempTracker) {
2752   Cand.SU = SU;
2753   Cand.AtTop = AtTop;
2754   if (DAG->isTrackingPressure()) {
2755     if (AtTop) {
2756       TempTracker.getMaxDownwardPressureDelta(
2757         Cand.SU->getInstr(),
2758         Cand.RPDelta,
2759         DAG->getRegionCriticalPSets(),
2760         DAG->getRegPressure().MaxSetPressure);
2761     } else {
2762       if (VerifyScheduling) {
2763         TempTracker.getMaxUpwardPressureDelta(
2764           Cand.SU->getInstr(),
2765           &DAG->getPressureDiff(Cand.SU),
2766           Cand.RPDelta,
2767           DAG->getRegionCriticalPSets(),
2768           DAG->getRegPressure().MaxSetPressure);
2769       } else {
2770         RPTracker.getUpwardPressureDelta(
2771           Cand.SU->getInstr(),
2772           DAG->getPressureDiff(Cand.SU),
2773           Cand.RPDelta,
2774           DAG->getRegionCriticalPSets(),
2775           DAG->getRegPressure().MaxSetPressure);
2776       }
2777     }
2778   }
2779   DEBUG(if (Cand.RPDelta.Excess.isValid())
2780           dbgs() << "  Try  SU(" << Cand.SU->NodeNum << ") "
2781                  << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
2782                  << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
2783 }
2784 
2785 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2786 /// hierarchical. This may be more efficient than a graduated cost model because
2787 /// we don't need to evaluate all aspects of the model for each node in the
2788 /// queue. But it's really done to make the heuristics easier to debug and
2789 /// statistically analyze.
2790 ///
2791 /// \param Cand provides the policy and current best candidate.
2792 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2793 /// \param Zone describes the scheduled zone that we are extending, or nullptr
2794 //              if Cand is from a different zone than TryCand.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand,SchedBoundary * Zone)2795 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2796                                     SchedCandidate &TryCand,
2797                                     SchedBoundary *Zone) {
2798   // Initialize the candidate if needed.
2799   if (!Cand.isValid()) {
2800     TryCand.Reason = NodeOrder;
2801     return;
2802   }
2803 
2804   if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop),
2805                  biasPhysRegCopy(Cand.SU, Cand.AtTop),
2806                  TryCand, Cand, PhysRegCopy))
2807     return;
2808 
2809   // Avoid exceeding the target's limit.
2810   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2811                                                Cand.RPDelta.Excess,
2812                                                TryCand, Cand, RegExcess, TRI,
2813                                                DAG->MF))
2814     return;
2815 
2816   // Avoid increasing the max critical pressure in the scheduled region.
2817   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2818                                                Cand.RPDelta.CriticalMax,
2819                                                TryCand, Cand, RegCritical, TRI,
2820                                                DAG->MF))
2821     return;
2822 
2823   // We only compare a subset of features when comparing nodes between
2824   // Top and Bottom boundary. Some properties are simply incomparable, in many
2825   // other instances we should only override the other boundary if something
2826   // is a clear good pick on one boundary. Skip heuristics that are more
2827   // "tie-breaking" in nature.
2828   bool SameBoundary = Zone != nullptr;
2829   if (SameBoundary) {
2830     // For loops that are acyclic path limited, aggressively schedule for
2831     // latency.  This can result in very long dependence chains scheduled in
2832     // sequence, so once every cycle (when CurrMOps == 0), switch to normal
2833     // heuristics.
2834     if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
2835         tryLatency(TryCand, Cand, *Zone))
2836       return;
2837 
2838     // Prioritize instructions that read unbuffered resources by stall cycles.
2839     if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
2840                 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2841       return;
2842   }
2843 
2844   // Keep clustered nodes together to encourage downstream peephole
2845   // optimizations which may reduce resource requirements.
2846   //
2847   // This is a best effort to set things up for a post-RA pass. Optimizations
2848   // like generating loads of multiple registers should ideally be done within
2849   // the scheduler pass by combining the loads during DAG postprocessing.
2850   const SUnit *CandNextClusterSU =
2851     Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2852   const SUnit *TryCandNextClusterSU =
2853     TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2854   if (tryGreater(TryCand.SU == TryCandNextClusterSU,
2855                  Cand.SU == CandNextClusterSU,
2856                  TryCand, Cand, Cluster))
2857     return;
2858 
2859   if (SameBoundary) {
2860     // Weak edges are for clustering and other constraints.
2861     if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
2862                 getWeakLeft(Cand.SU, Cand.AtTop),
2863                 TryCand, Cand, Weak))
2864       return;
2865   }
2866 
2867   // Avoid increasing the max pressure of the entire region.
2868   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2869                                                Cand.RPDelta.CurrentMax,
2870                                                TryCand, Cand, RegMax, TRI,
2871                                                DAG->MF))
2872     return;
2873 
2874   if (SameBoundary) {
2875     // Avoid critical resource consumption and balance the schedule.
2876     TryCand.initResourceDelta(DAG, SchedModel);
2877     if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2878                 TryCand, Cand, ResourceReduce))
2879       return;
2880     if (tryGreater(TryCand.ResDelta.DemandedResources,
2881                    Cand.ResDelta.DemandedResources,
2882                    TryCand, Cand, ResourceDemand))
2883       return;
2884 
2885     // Avoid serializing long latency dependence chains.
2886     // For acyclic path limited loops, latency was already checked above.
2887     if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
2888         !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
2889       return;
2890 
2891     // Prefer immediate defs/users of the last scheduled instruction. This is a
2892     // local pressure avoidance strategy that also makes the machine code
2893     // readable.
2894     if (tryGreater(Zone->isNextSU(TryCand.SU), Zone->isNextSU(Cand.SU),
2895                    TryCand, Cand, NextDefUse))
2896       return;
2897 
2898     // Fall through to original instruction order.
2899     if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2900         || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2901       TryCand.Reason = NodeOrder;
2902     }
2903   }
2904 }
2905 
2906 /// Pick the best candidate from the queue.
2907 ///
2908 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2909 /// DAG building. To adjust for the current scheduling location we need to
2910 /// maintain the number of vreg uses remaining to be top-scheduled.
pickNodeFromQueue(SchedBoundary & Zone,const CandPolicy & ZonePolicy,const RegPressureTracker & RPTracker,SchedCandidate & Cand)2911 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2912                                          const CandPolicy &ZonePolicy,
2913                                          const RegPressureTracker &RPTracker,
2914                                          SchedCandidate &Cand) {
2915   // getMaxPressureDelta temporarily modifies the tracker.
2916   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2917 
2918   ReadyQueue &Q = Zone.Available;
2919   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2920 
2921     SchedCandidate TryCand(ZonePolicy);
2922     initCandidate(TryCand, *I, Zone.isTop(), RPTracker, TempTracker);
2923     // Pass SchedBoundary only when comparing nodes from the same boundary.
2924     SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
2925     tryCandidate(Cand, TryCand, ZoneArg);
2926     if (TryCand.Reason != NoCand) {
2927       // Initialize resource delta if needed in case future heuristics query it.
2928       if (TryCand.ResDelta == SchedResourceDelta())
2929         TryCand.initResourceDelta(DAG, SchedModel);
2930       Cand.setBest(TryCand);
2931       DEBUG(traceCandidate(Cand));
2932     }
2933   }
2934 }
2935 
2936 /// Pick the best candidate node from either the top or bottom queue.
pickNodeBidirectional(bool & IsTopNode)2937 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2938   // Schedule as far as possible in the direction of no choice. This is most
2939   // efficient, but also provides the best heuristics for CriticalPSets.
2940   if (SUnit *SU = Bot.pickOnlyChoice()) {
2941     IsTopNode = false;
2942     tracePick(Only1, false);
2943     return SU;
2944   }
2945   if (SUnit *SU = Top.pickOnlyChoice()) {
2946     IsTopNode = true;
2947     tracePick(Only1, true);
2948     return SU;
2949   }
2950   // Set the bottom-up policy based on the state of the current bottom zone and
2951   // the instructions outside the zone, including the top zone.
2952   CandPolicy BotPolicy;
2953   setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
2954   // Set the top-down policy based on the state of the current top zone and
2955   // the instructions outside the zone, including the bottom zone.
2956   CandPolicy TopPolicy;
2957   setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
2958 
2959   // See if BotCand is still valid (because we previously scheduled from Top).
2960   DEBUG(dbgs() << "Picking from Bot:\n");
2961   if (!BotCand.isValid() || BotCand.SU->isScheduled ||
2962       BotCand.Policy != BotPolicy) {
2963     BotCand.reset(CandPolicy());
2964     pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
2965     assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2966   } else {
2967     DEBUG(traceCandidate(BotCand));
2968 #ifndef NDEBUG
2969     if (VerifyScheduling) {
2970       SchedCandidate TCand;
2971       TCand.reset(CandPolicy());
2972       pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
2973       assert(TCand.SU == BotCand.SU &&
2974              "Last pick result should correspond to re-picking right now");
2975     }
2976 #endif
2977   }
2978 
2979   // Check if the top Q has a better candidate.
2980   DEBUG(dbgs() << "Picking from Top:\n");
2981   if (!TopCand.isValid() || TopCand.SU->isScheduled ||
2982       TopCand.Policy != TopPolicy) {
2983     TopCand.reset(CandPolicy());
2984     pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
2985     assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2986   } else {
2987     DEBUG(traceCandidate(TopCand));
2988 #ifndef NDEBUG
2989     if (VerifyScheduling) {
2990       SchedCandidate TCand;
2991       TCand.reset(CandPolicy());
2992       pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
2993       assert(TCand.SU == TopCand.SU &&
2994            "Last pick result should correspond to re-picking right now");
2995     }
2996 #endif
2997   }
2998 
2999   // Pick best from BotCand and TopCand.
3000   assert(BotCand.isValid());
3001   assert(TopCand.isValid());
3002   SchedCandidate Cand = BotCand;
3003   TopCand.Reason = NoCand;
3004   tryCandidate(Cand, TopCand, nullptr);
3005   if (TopCand.Reason != NoCand) {
3006     Cand.setBest(TopCand);
3007     DEBUG(traceCandidate(Cand));
3008   }
3009 
3010   IsTopNode = Cand.AtTop;
3011   tracePick(Cand);
3012   return Cand.SU;
3013 }
3014 
3015 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
pickNode(bool & IsTopNode)3016 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
3017   if (DAG->top() == DAG->bottom()) {
3018     assert(Top.Available.empty() && Top.Pending.empty() &&
3019            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
3020     return nullptr;
3021   }
3022   SUnit *SU;
3023   do {
3024     if (RegionPolicy.OnlyTopDown) {
3025       SU = Top.pickOnlyChoice();
3026       if (!SU) {
3027         CandPolicy NoPolicy;
3028         TopCand.reset(NoPolicy);
3029         pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
3030         assert(TopCand.Reason != NoCand && "failed to find a candidate");
3031         tracePick(TopCand);
3032         SU = TopCand.SU;
3033       }
3034       IsTopNode = true;
3035     } else if (RegionPolicy.OnlyBottomUp) {
3036       SU = Bot.pickOnlyChoice();
3037       if (!SU) {
3038         CandPolicy NoPolicy;
3039         BotCand.reset(NoPolicy);
3040         pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
3041         assert(BotCand.Reason != NoCand && "failed to find a candidate");
3042         tracePick(BotCand);
3043         SU = BotCand.SU;
3044       }
3045       IsTopNode = false;
3046     } else {
3047       SU = pickNodeBidirectional(IsTopNode);
3048     }
3049   } while (SU->isScheduled);
3050 
3051   if (SU->isTopReady())
3052     Top.removeReady(SU);
3053   if (SU->isBottomReady())
3054     Bot.removeReady(SU);
3055 
3056   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3057   return SU;
3058 }
3059 
reschedulePhysRegCopies(SUnit * SU,bool isTop)3060 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
3061 
3062   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3063   if (!isTop)
3064     ++InsertPos;
3065   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3066 
3067   // Find already scheduled copies with a single physreg dependence and move
3068   // them just above the scheduled instruction.
3069   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3070        I != E; ++I) {
3071     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3072       continue;
3073     SUnit *DepSU = I->getSUnit();
3074     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3075       continue;
3076     MachineInstr *Copy = DepSU->getInstr();
3077     if (!Copy->isCopy())
3078       continue;
3079     DEBUG(dbgs() << "  Rescheduling physreg copy ";
3080           I->getSUnit()->dump(DAG));
3081     DAG->moveInstruction(Copy, InsertPos);
3082   }
3083 }
3084 
3085 /// Update the scheduler's state after scheduling a node. This is the same node
3086 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3087 /// update it's state based on the current cycle before MachineSchedStrategy
3088 /// does.
3089 ///
3090 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3091 /// them here. See comments in biasPhysRegCopy.
schedNode(SUnit * SU,bool IsTopNode)3092 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3093   if (IsTopNode) {
3094     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3095     Top.bumpNode(SU);
3096     if (SU->hasPhysRegUses)
3097       reschedulePhysRegCopies(SU, true);
3098   } else {
3099     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3100     Bot.bumpNode(SU);
3101     if (SU->hasPhysRegDefs)
3102       reschedulePhysRegCopies(SU, false);
3103   }
3104 }
3105 
3106 /// Create the standard converging machine scheduler. This will be used as the
3107 /// default scheduler if the target does not set a default.
createGenericSchedLive(MachineSchedContext * C)3108 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
3109   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
3110   // Register DAG post-processors.
3111   //
3112   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3113   // data and pass it to later mutations. Have a single mutation that gathers
3114   // the interesting nodes in one pass.
3115   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
3116   if (EnableMemOpCluster) {
3117     if (DAG->TII->enableClusterLoads())
3118       DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
3119     if (DAG->TII->enableClusterStores())
3120       DAG->addMutation(make_unique<StoreClusterMutation>(DAG->TII, DAG->TRI));
3121   }
3122   if (EnableMacroFusion)
3123     DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI));
3124   return DAG;
3125 }
3126 
3127 static MachineSchedRegistry
3128 GenericSchedRegistry("converge", "Standard converging scheduler.",
3129                      createGenericSchedLive);
3130 
3131 //===----------------------------------------------------------------------===//
3132 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3133 //===----------------------------------------------------------------------===//
3134 
initialize(ScheduleDAGMI * Dag)3135 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3136   DAG = Dag;
3137   SchedModel = DAG->getSchedModel();
3138   TRI = DAG->TRI;
3139 
3140   Rem.init(DAG, SchedModel);
3141   Top.init(DAG, SchedModel, &Rem);
3142   BotRoots.clear();
3143 
3144   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3145   // or are disabled, then these HazardRecs will be disabled.
3146   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3147   if (!Top.HazardRec) {
3148     Top.HazardRec =
3149         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3150             Itin, DAG);
3151   }
3152 }
3153 
3154 
registerRoots()3155 void PostGenericScheduler::registerRoots() {
3156   Rem.CriticalPath = DAG->ExitSU.getDepth();
3157 
3158   // Some roots may not feed into ExitSU. Check all of them in case.
3159   for (SmallVectorImpl<SUnit*>::const_iterator
3160          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3161     if ((*I)->getDepth() > Rem.CriticalPath)
3162       Rem.CriticalPath = (*I)->getDepth();
3163   }
3164   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3165   if (DumpCriticalPathLength) {
3166     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3167   }
3168 }
3169 
3170 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3171 ///
3172 /// \param Cand provides the policy and current best candidate.
3173 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand)3174 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3175                                         SchedCandidate &TryCand) {
3176 
3177   // Initialize the candidate if needed.
3178   if (!Cand.isValid()) {
3179     TryCand.Reason = NodeOrder;
3180     return;
3181   }
3182 
3183   // Prioritize instructions that read unbuffered resources by stall cycles.
3184   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3185               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3186     return;
3187 
3188   // Avoid critical resource consumption and balance the schedule.
3189   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3190               TryCand, Cand, ResourceReduce))
3191     return;
3192   if (tryGreater(TryCand.ResDelta.DemandedResources,
3193                  Cand.ResDelta.DemandedResources,
3194                  TryCand, Cand, ResourceDemand))
3195     return;
3196 
3197   // Avoid serializing long latency dependence chains.
3198   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3199     return;
3200   }
3201 
3202   // Fall through to original instruction order.
3203   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3204     TryCand.Reason = NodeOrder;
3205 }
3206 
pickNodeFromQueue(SchedCandidate & Cand)3207 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3208   ReadyQueue &Q = Top.Available;
3209   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3210     SchedCandidate TryCand(Cand.Policy);
3211     TryCand.SU = *I;
3212     TryCand.AtTop = true;
3213     TryCand.initResourceDelta(DAG, SchedModel);
3214     tryCandidate(Cand, TryCand);
3215     if (TryCand.Reason != NoCand) {
3216       Cand.setBest(TryCand);
3217       DEBUG(traceCandidate(Cand));
3218     }
3219   }
3220 }
3221 
3222 /// Pick the next node to schedule.
pickNode(bool & IsTopNode)3223 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3224   if (DAG->top() == DAG->bottom()) {
3225     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3226     return nullptr;
3227   }
3228   SUnit *SU;
3229   do {
3230     SU = Top.pickOnlyChoice();
3231     if (SU) {
3232       tracePick(Only1, true);
3233     } else {
3234       CandPolicy NoPolicy;
3235       SchedCandidate TopCand(NoPolicy);
3236       // Set the top-down policy based on the state of the current top zone and
3237       // the instructions outside the zone, including the bottom zone.
3238       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3239       pickNodeFromQueue(TopCand);
3240       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3241       tracePick(TopCand);
3242       SU = TopCand.SU;
3243     }
3244   } while (SU->isScheduled);
3245 
3246   IsTopNode = true;
3247   Top.removeReady(SU);
3248 
3249   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3250   return SU;
3251 }
3252 
3253 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3254 /// scheduled/remaining flags in the DAG nodes.
schedNode(SUnit * SU,bool IsTopNode)3255 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3256   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3257   Top.bumpNode(SU);
3258 }
3259 
3260 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
createGenericSchedPostRA(MachineSchedContext * C)3261 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3262   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3263 }
3264 
3265 //===----------------------------------------------------------------------===//
3266 // ILP Scheduler. Currently for experimental analysis of heuristics.
3267 //===----------------------------------------------------------------------===//
3268 
3269 namespace {
3270 /// \brief Order nodes by the ILP metric.
3271 struct ILPOrder {
3272   const SchedDFSResult *DFSResult;
3273   const BitVector *ScheduledTrees;
3274   bool MaximizeILP;
3275 
ILPOrder__anon5aa060a70511::ILPOrder3276   ILPOrder(bool MaxILP)
3277     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3278 
3279   /// \brief Apply a less-than relation on node priority.
3280   ///
3281   /// (Return true if A comes after B in the Q.)
operator ()__anon5aa060a70511::ILPOrder3282   bool operator()(const SUnit *A, const SUnit *B) const {
3283     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3284     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3285     if (SchedTreeA != SchedTreeB) {
3286       // Unscheduled trees have lower priority.
3287       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3288         return ScheduledTrees->test(SchedTreeB);
3289 
3290       // Trees with shallower connections have have lower priority.
3291       if (DFSResult->getSubtreeLevel(SchedTreeA)
3292           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3293         return DFSResult->getSubtreeLevel(SchedTreeA)
3294           < DFSResult->getSubtreeLevel(SchedTreeB);
3295       }
3296     }
3297     if (MaximizeILP)
3298       return DFSResult->getILP(A) < DFSResult->getILP(B);
3299     else
3300       return DFSResult->getILP(A) > DFSResult->getILP(B);
3301   }
3302 };
3303 
3304 /// \brief Schedule based on the ILP metric.
3305 class ILPScheduler : public MachineSchedStrategy {
3306   ScheduleDAGMILive *DAG;
3307   ILPOrder Cmp;
3308 
3309   std::vector<SUnit*> ReadyQ;
3310 public:
ILPScheduler(bool MaximizeILP)3311   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3312 
initialize(ScheduleDAGMI * dag)3313   void initialize(ScheduleDAGMI *dag) override {
3314     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3315     DAG = static_cast<ScheduleDAGMILive*>(dag);
3316     DAG->computeDFSResult();
3317     Cmp.DFSResult = DAG->getDFSResult();
3318     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3319     ReadyQ.clear();
3320   }
3321 
registerRoots()3322   void registerRoots() override {
3323     // Restore the heap in ReadyQ with the updated DFS results.
3324     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3325   }
3326 
3327   /// Implement MachineSchedStrategy interface.
3328   /// -----------------------------------------
3329 
3330   /// Callback to select the highest priority node from the ready Q.
pickNode(bool & IsTopNode)3331   SUnit *pickNode(bool &IsTopNode) override {
3332     if (ReadyQ.empty()) return nullptr;
3333     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3334     SUnit *SU = ReadyQ.back();
3335     ReadyQ.pop_back();
3336     IsTopNode = false;
3337     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3338           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3339           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3340           << DAG->getDFSResult()->getSubtreeLevel(
3341             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3342           << "Scheduling " << *SU->getInstr());
3343     return SU;
3344   }
3345 
3346   /// \brief Scheduler callback to notify that a new subtree is scheduled.
scheduleTree(unsigned SubtreeID)3347   void scheduleTree(unsigned SubtreeID) override {
3348     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3349   }
3350 
3351   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3352   /// DFSResults, and resort the priority Q.
schedNode(SUnit * SU,bool IsTopNode)3353   void schedNode(SUnit *SU, bool IsTopNode) override {
3354     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3355   }
3356 
releaseTopNode(SUnit *)3357   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3358 
releaseBottomNode(SUnit * SU)3359   void releaseBottomNode(SUnit *SU) override {
3360     ReadyQ.push_back(SU);
3361     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3362   }
3363 };
3364 } // namespace
3365 
createILPMaxScheduler(MachineSchedContext * C)3366 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3367   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3368 }
createILPMinScheduler(MachineSchedContext * C)3369 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3370   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3371 }
3372 static MachineSchedRegistry ILPMaxRegistry(
3373   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3374 static MachineSchedRegistry ILPMinRegistry(
3375   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3376 
3377 //===----------------------------------------------------------------------===//
3378 // Machine Instruction Shuffler for Correctness Testing
3379 //===----------------------------------------------------------------------===//
3380 
3381 #ifndef NDEBUG
3382 namespace {
3383 /// Apply a less-than relation on the node order, which corresponds to the
3384 /// instruction order prior to scheduling. IsReverse implements greater-than.
3385 template<bool IsReverse>
3386 struct SUnitOrder {
operator ()__anon5aa060a70611::SUnitOrder3387   bool operator()(SUnit *A, SUnit *B) const {
3388     if (IsReverse)
3389       return A->NodeNum > B->NodeNum;
3390     else
3391       return A->NodeNum < B->NodeNum;
3392   }
3393 };
3394 
3395 /// Reorder instructions as much as possible.
3396 class InstructionShuffler : public MachineSchedStrategy {
3397   bool IsAlternating;
3398   bool IsTopDown;
3399 
3400   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3401   // gives nodes with a higher number higher priority causing the latest
3402   // instructions to be scheduled first.
3403   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3404     TopQ;
3405   // When scheduling bottom-up, use greater-than as the queue priority.
3406   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3407     BottomQ;
3408 public:
InstructionShuffler(bool alternate,bool topdown)3409   InstructionShuffler(bool alternate, bool topdown)
3410     : IsAlternating(alternate), IsTopDown(topdown) {}
3411 
initialize(ScheduleDAGMI *)3412   void initialize(ScheduleDAGMI*) override {
3413     TopQ.clear();
3414     BottomQ.clear();
3415   }
3416 
3417   /// Implement MachineSchedStrategy interface.
3418   /// -----------------------------------------
3419 
pickNode(bool & IsTopNode)3420   SUnit *pickNode(bool &IsTopNode) override {
3421     SUnit *SU;
3422     if (IsTopDown) {
3423       do {
3424         if (TopQ.empty()) return nullptr;
3425         SU = TopQ.top();
3426         TopQ.pop();
3427       } while (SU->isScheduled);
3428       IsTopNode = true;
3429     } else {
3430       do {
3431         if (BottomQ.empty()) return nullptr;
3432         SU = BottomQ.top();
3433         BottomQ.pop();
3434       } while (SU->isScheduled);
3435       IsTopNode = false;
3436     }
3437     if (IsAlternating)
3438       IsTopDown = !IsTopDown;
3439     return SU;
3440   }
3441 
schedNode(SUnit * SU,bool IsTopNode)3442   void schedNode(SUnit *SU, bool IsTopNode) override {}
3443 
releaseTopNode(SUnit * SU)3444   void releaseTopNode(SUnit *SU) override {
3445     TopQ.push(SU);
3446   }
releaseBottomNode(SUnit * SU)3447   void releaseBottomNode(SUnit *SU) override {
3448     BottomQ.push(SU);
3449   }
3450 };
3451 } // namespace
3452 
createInstructionShuffler(MachineSchedContext * C)3453 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3454   bool Alternate = !ForceTopDown && !ForceBottomUp;
3455   bool TopDown = !ForceBottomUp;
3456   assert((TopDown || !ForceTopDown) &&
3457          "-misched-topdown incompatible with -misched-bottomup");
3458   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3459 }
3460 static MachineSchedRegistry ShufflerRegistry(
3461   "shuffle", "Shuffle machine instructions alternating directions",
3462   createInstructionShuffler);
3463 #endif // !NDEBUG
3464 
3465 //===----------------------------------------------------------------------===//
3466 // GraphWriter support for ScheduleDAGMILive.
3467 //===----------------------------------------------------------------------===//
3468 
3469 #ifndef NDEBUG
3470 namespace llvm {
3471 
3472 template<> struct GraphTraits<
3473   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3474 
3475 template<>
3476 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3477 
DOTGraphTraitsllvm::DOTGraphTraits3478   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3479 
getGraphNamellvm::DOTGraphTraits3480   static std::string getGraphName(const ScheduleDAG *G) {
3481     return G->MF.getName();
3482   }
3483 
renderGraphFromBottomUpllvm::DOTGraphTraits3484   static bool renderGraphFromBottomUp() {
3485     return true;
3486   }
3487 
isNodeHiddenllvm::DOTGraphTraits3488   static bool isNodeHidden(const SUnit *Node) {
3489     if (ViewMISchedCutoff == 0)
3490       return false;
3491     return (Node->Preds.size() > ViewMISchedCutoff
3492          || Node->Succs.size() > ViewMISchedCutoff);
3493   }
3494 
3495   /// If you want to override the dot attributes printed for a particular
3496   /// edge, override this method.
getEdgeAttributesllvm::DOTGraphTraits3497   static std::string getEdgeAttributes(const SUnit *Node,
3498                                        SUnitIterator EI,
3499                                        const ScheduleDAG *Graph) {
3500     if (EI.isArtificialDep())
3501       return "color=cyan,style=dashed";
3502     if (EI.isCtrlDep())
3503       return "color=blue,style=dashed";
3504     return "";
3505   }
3506 
getNodeLabelllvm::DOTGraphTraits3507   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3508     std::string Str;
3509     raw_string_ostream SS(Str);
3510     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3511     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3512       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3513     SS << "SU:" << SU->NodeNum;
3514     if (DFS)
3515       SS << " I:" << DFS->getNumInstrs(SU);
3516     return SS.str();
3517   }
getNodeDescriptionllvm::DOTGraphTraits3518   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3519     return G->getGraphNodeLabel(SU);
3520   }
3521 
getNodeAttributesllvm::DOTGraphTraits3522   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3523     std::string Str("shape=Mrecord");
3524     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3525     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3526       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3527     if (DFS) {
3528       Str += ",style=filled,fillcolor=\"#";
3529       Str += DOT::getColorString(DFS->getSubtreeID(N));
3530       Str += '"';
3531     }
3532     return Str;
3533   }
3534 };
3535 } // namespace llvm
3536 #endif // NDEBUG
3537 
3538 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3539 /// rendered using 'dot'.
3540 ///
viewGraph(const Twine & Name,const Twine & Title)3541 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3542 #ifndef NDEBUG
3543   ViewGraph(this, Name, false, Title);
3544 #else
3545   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3546          << "systems with Graphviz or gv!\n";
3547 #endif  // NDEBUG
3548 }
3549 
3550 /// Out-of-line implementation with no arguments is handy for gdb.
viewGraph()3551 void ScheduleDAGMI::viewGraph() {
3552   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3553 }
3554