• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/GraphWriter.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include <queue>
33 
34 using namespace llvm;
35 
36 #define DEBUG_TYPE "misched"
37 
38 namespace llvm {
39 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40                            cl::desc("Force top-down list scheduling"));
41 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42                             cl::desc("Force bottom-up list scheduling"));
43 }
44 
45 #ifndef NDEBUG
46 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
47   cl::desc("Pop up a window to show MISched dags after they are processed"));
48 
49 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
50   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
51 
52 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
53   cl::desc("Only schedule this function"));
54 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
55   cl::desc("Only schedule this MBB#"));
56 #else
57 static bool ViewMISchedDAGs = false;
58 #endif // NDEBUG
59 
60 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
61   cl::desc("Enable register pressure scheduling."), cl::init(true));
62 
63 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
64   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
65 
66 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
67   cl::desc("Enable load clustering."), cl::init(true));
68 
69 // Experimental heuristics
70 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
71   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
72 
73 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
74   cl::desc("Verify machine instrs before and after machine scheduling"));
75 
76 // DAG subtrees must have at least this many nodes.
77 static const unsigned MinSubtreeSize = 8;
78 
79 // Pin the vtables to this file.
anchor()80 void MachineSchedStrategy::anchor() {}
anchor()81 void ScheduleDAGMutation::anchor() {}
82 
83 //===----------------------------------------------------------------------===//
84 // Machine Instruction Scheduling Pass and Registry
85 //===----------------------------------------------------------------------===//
86 
MachineSchedContext()87 MachineSchedContext::MachineSchedContext():
88     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
89   RegClassInfo = new RegisterClassInfo();
90 }
91 
~MachineSchedContext()92 MachineSchedContext::~MachineSchedContext() {
93   delete RegClassInfo;
94 }
95 
96 namespace {
97 /// Base class for a machine scheduler class that can run at any point.
98 class MachineSchedulerBase : public MachineSchedContext,
99                              public MachineFunctionPass {
100 public:
MachineSchedulerBase(char & ID)101   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
102 
103   void print(raw_ostream &O, const Module* = nullptr) const override;
104 
105 protected:
106   void scheduleRegions(ScheduleDAGInstrs &Scheduler);
107 };
108 
109 /// MachineScheduler runs after coalescing and before register allocation.
110 class MachineScheduler : public MachineSchedulerBase {
111 public:
112   MachineScheduler();
113 
114   void getAnalysisUsage(AnalysisUsage &AU) const override;
115 
116   bool runOnMachineFunction(MachineFunction&) override;
117 
118   static char ID; // Class identification, replacement for typeinfo
119 
120 protected:
121   ScheduleDAGInstrs *createMachineScheduler();
122 };
123 
124 /// PostMachineScheduler runs after shortly before code emission.
125 class PostMachineScheduler : public MachineSchedulerBase {
126 public:
127   PostMachineScheduler();
128 
129   void getAnalysisUsage(AnalysisUsage &AU) const override;
130 
131   bool runOnMachineFunction(MachineFunction&) override;
132 
133   static char ID; // Class identification, replacement for typeinfo
134 
135 protected:
136   ScheduleDAGInstrs *createPostMachineScheduler();
137 };
138 } // namespace
139 
140 char MachineScheduler::ID = 0;
141 
142 char &llvm::MachineSchedulerID = MachineScheduler::ID;
143 
144 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
145                       "Machine Instruction Scheduler", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)146 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
147 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
148 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
149 INITIALIZE_PASS_END(MachineScheduler, "misched",
150                     "Machine Instruction Scheduler", false, false)
151 
152 MachineScheduler::MachineScheduler()
153 : MachineSchedulerBase(ID) {
154   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
155 }
156 
getAnalysisUsage(AnalysisUsage & AU) const157 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
158   AU.setPreservesCFG();
159   AU.addRequiredID(MachineDominatorsID);
160   AU.addRequired<MachineLoopInfo>();
161   AU.addRequired<AliasAnalysis>();
162   AU.addRequired<TargetPassConfig>();
163   AU.addRequired<SlotIndexes>();
164   AU.addPreserved<SlotIndexes>();
165   AU.addRequired<LiveIntervals>();
166   AU.addPreserved<LiveIntervals>();
167   MachineFunctionPass::getAnalysisUsage(AU);
168 }
169 
170 char PostMachineScheduler::ID = 0;
171 
172 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
173 
174 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
175                 "PostRA Machine Instruction Scheduler", false, false)
176 
PostMachineScheduler()177 PostMachineScheduler::PostMachineScheduler()
178 : MachineSchedulerBase(ID) {
179   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
180 }
181 
getAnalysisUsage(AnalysisUsage & AU) const182 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
183   AU.setPreservesCFG();
184   AU.addRequiredID(MachineDominatorsID);
185   AU.addRequired<MachineLoopInfo>();
186   AU.addRequired<TargetPassConfig>();
187   MachineFunctionPass::getAnalysisUsage(AU);
188 }
189 
190 MachinePassRegistry MachineSchedRegistry::Registry;
191 
192 /// A dummy default scheduler factory indicates whether the scheduler
193 /// is overridden on the command line.
useDefaultMachineSched(MachineSchedContext * C)194 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
195   return nullptr;
196 }
197 
198 /// MachineSchedOpt allows command line selection of the scheduler.
199 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
200                RegisterPassParser<MachineSchedRegistry> >
201 MachineSchedOpt("misched",
202                 cl::init(&useDefaultMachineSched), cl::Hidden,
203                 cl::desc("Machine instruction scheduler to use"));
204 
205 static MachineSchedRegistry
206 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
207                      useDefaultMachineSched);
208 
209 /// Forward declare the standard machine scheduler. This will be used as the
210 /// default scheduler if the target does not set a default.
211 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
212 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
213 
214 /// Decrement this iterator until reaching the top or a non-debug instr.
215 static MachineBasicBlock::const_iterator
priorNonDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator Beg)216 priorNonDebug(MachineBasicBlock::const_iterator I,
217               MachineBasicBlock::const_iterator Beg) {
218   assert(I != Beg && "reached the top of the region, cannot decrement");
219   while (--I != Beg) {
220     if (!I->isDebugValue())
221       break;
222   }
223   return I;
224 }
225 
226 /// Non-const version.
227 static MachineBasicBlock::iterator
priorNonDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator Beg)228 priorNonDebug(MachineBasicBlock::iterator I,
229               MachineBasicBlock::const_iterator Beg) {
230   return const_cast<MachineInstr*>(
231     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
232 }
233 
234 /// If this iterator is a debug value, increment until reaching the End or a
235 /// non-debug instruction.
236 static MachineBasicBlock::const_iterator
nextIfDebug(MachineBasicBlock::const_iterator I,MachineBasicBlock::const_iterator End)237 nextIfDebug(MachineBasicBlock::const_iterator I,
238             MachineBasicBlock::const_iterator End) {
239   for(; I != End; ++I) {
240     if (!I->isDebugValue())
241       break;
242   }
243   return I;
244 }
245 
246 /// Non-const version.
247 static MachineBasicBlock::iterator
nextIfDebug(MachineBasicBlock::iterator I,MachineBasicBlock::const_iterator End)248 nextIfDebug(MachineBasicBlock::iterator I,
249             MachineBasicBlock::const_iterator End) {
250   // Cast the return value to nonconst MachineInstr, then cast to an
251   // instr_iterator, which does not check for null, finally return a
252   // bundle_iterator.
253   return MachineBasicBlock::instr_iterator(
254     const_cast<MachineInstr*>(
255       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
256 }
257 
258 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
createMachineScheduler()259 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
260   // Select the scheduler, or set the default.
261   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
262   if (Ctor != useDefaultMachineSched)
263     return Ctor(this);
264 
265   // Get the default scheduler set by the target for this function.
266   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
267   if (Scheduler)
268     return Scheduler;
269 
270   // Default to GenericScheduler.
271   return createGenericSchedLive(this);
272 }
273 
274 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
275 /// the caller. We don't have a command line option to override the postRA
276 /// scheduler. The Target must configure it.
createPostMachineScheduler()277 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
278   // Get the postRA scheduler set by the target for this function.
279   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
280   if (Scheduler)
281     return Scheduler;
282 
283   // Default to GenericScheduler.
284   return createGenericSchedPostRA(this);
285 }
286 
287 /// Top-level MachineScheduler pass driver.
288 ///
289 /// Visit blocks in function order. Divide each block into scheduling regions
290 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
291 /// consistent with the DAG builder, which traverses the interior of the
292 /// scheduling regions bottom-up.
293 ///
294 /// This design avoids exposing scheduling boundaries to the DAG builder,
295 /// simplifying the DAG builder's support for "special" target instructions.
296 /// At the same time the design allows target schedulers to operate across
297 /// scheduling boundaries, for example to bundle the boudary instructions
298 /// without reordering them. This creates complexity, because the target
299 /// scheduler must update the RegionBegin and RegionEnd positions cached by
300 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
301 /// design would be to split blocks at scheduling boundaries, but LLVM has a
302 /// general bias against block splitting purely for implementation simplicity.
runOnMachineFunction(MachineFunction & mf)303 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
304   DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
305 
306   // Initialize the context of the pass.
307   MF = &mf;
308   MLI = &getAnalysis<MachineLoopInfo>();
309   MDT = &getAnalysis<MachineDominatorTree>();
310   PassConfig = &getAnalysis<TargetPassConfig>();
311   AA = &getAnalysis<AliasAnalysis>();
312 
313   LIS = &getAnalysis<LiveIntervals>();
314 
315   if (VerifyScheduling) {
316     DEBUG(LIS->dump());
317     MF->verify(this, "Before machine scheduling.");
318   }
319   RegClassInfo->runOnMachineFunction(*MF);
320 
321   // Instantiate the selected scheduler for this target, function, and
322   // optimization level.
323   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
324   scheduleRegions(*Scheduler);
325 
326   DEBUG(LIS->dump());
327   if (VerifyScheduling)
328     MF->verify(this, "After machine scheduling.");
329   return true;
330 }
331 
runOnMachineFunction(MachineFunction & mf)332 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
333   if (skipOptnoneFunction(*mf.getFunction()))
334     return false;
335 
336   const TargetSubtargetInfo &ST =
337     mf.getTarget().getSubtarget<TargetSubtargetInfo>();
338   if (!ST.enablePostMachineScheduler()) {
339     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
340     return false;
341   }
342   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
343 
344   // Initialize the context of the pass.
345   MF = &mf;
346   PassConfig = &getAnalysis<TargetPassConfig>();
347 
348   if (VerifyScheduling)
349     MF->verify(this, "Before post machine scheduling.");
350 
351   // Instantiate the selected scheduler for this target, function, and
352   // optimization level.
353   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
354   scheduleRegions(*Scheduler);
355 
356   if (VerifyScheduling)
357     MF->verify(this, "After post machine scheduling.");
358   return true;
359 }
360 
361 /// Return true of the given instruction should not be included in a scheduling
362 /// region.
363 ///
364 /// MachineScheduler does not currently support scheduling across calls. To
365 /// handle calls, the DAG builder needs to be modified to create register
366 /// anti/output dependencies on the registers clobbered by the call's regmask
367 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
368 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
369 /// the boundary, but there would be no benefit to postRA scheduling across
370 /// calls this late anyway.
isSchedBoundary(MachineBasicBlock::iterator MI,MachineBasicBlock * MBB,MachineFunction * MF,const TargetInstrInfo * TII,bool IsPostRA)371 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
372                             MachineBasicBlock *MBB,
373                             MachineFunction *MF,
374                             const TargetInstrInfo *TII,
375                             bool IsPostRA) {
376   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
377 }
378 
379 /// Main driver for both MachineScheduler and PostMachineScheduler.
scheduleRegions(ScheduleDAGInstrs & Scheduler)380 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
381   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
382   bool IsPostRA = Scheduler.isPostRA();
383 
384   // Visit all machine basic blocks.
385   //
386   // TODO: Visit blocks in global postorder or postorder within the bottom-up
387   // loop tree. Then we can optionally compute global RegPressure.
388   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
389        MBB != MBBEnd; ++MBB) {
390 
391     Scheduler.startBlock(MBB);
392 
393 #ifndef NDEBUG
394     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
395       continue;
396     if (SchedOnlyBlock.getNumOccurrences()
397         && (int)SchedOnlyBlock != MBB->getNumber())
398       continue;
399 #endif
400 
401     // Break the block into scheduling regions [I, RegionEnd), and schedule each
402     // region as soon as it is discovered. RegionEnd points the scheduling
403     // boundary at the bottom of the region. The DAG does not include RegionEnd,
404     // but the region does (i.e. the next RegionEnd is above the previous
405     // RegionBegin). If the current block has no terminator then RegionEnd ==
406     // MBB->end() for the bottom region.
407     //
408     // The Scheduler may insert instructions during either schedule() or
409     // exitRegion(), even for empty regions. So the local iterators 'I' and
410     // 'RegionEnd' are invalid across these calls.
411     //
412     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
413     // as a single instruction.
414     unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
415     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
416         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
417 
418       // Avoid decrementing RegionEnd for blocks with no terminator.
419       if (RegionEnd != MBB->end() ||
420           isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) {
421         --RegionEnd;
422         // Count the boundary instruction.
423         --RemainingInstrs;
424       }
425 
426       // The next region starts above the previous region. Look backward in the
427       // instruction stream until we find the nearest boundary.
428       unsigned NumRegionInstrs = 0;
429       MachineBasicBlock::iterator I = RegionEnd;
430       for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) {
431         if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA))
432           break;
433       }
434       // Notify the scheduler of the region, even if we may skip scheduling
435       // it. Perhaps it still needs to be bundled.
436       Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
437 
438       // Skip empty scheduling regions (0 or 1 schedulable instructions).
439       if (I == RegionEnd || I == std::prev(RegionEnd)) {
440         // Close the current region. Bundle the terminator if needed.
441         // This invalidates 'RegionEnd' and 'I'.
442         Scheduler.exitRegion();
443         continue;
444       }
445       DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "")
446             << "MI Scheduling **********\n");
447       DEBUG(dbgs() << MF->getName()
448             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
449             << "\n  From: " << *I << "    To: ";
450             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
451             else dbgs() << "End";
452             dbgs() << " RegionInstrs: " << NumRegionInstrs
453             << " Remaining: " << RemainingInstrs << "\n");
454 
455       // Schedule a region: possibly reorder instructions.
456       // This invalidates 'RegionEnd' and 'I'.
457       Scheduler.schedule();
458 
459       // Close the current region.
460       Scheduler.exitRegion();
461 
462       // Scheduling has invalidated the current iterator 'I'. Ask the
463       // scheduler for the top of it's scheduled region.
464       RegionEnd = Scheduler.begin();
465     }
466     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
467     Scheduler.finishBlock();
468     if (Scheduler.isPostRA()) {
469       // FIXME: Ideally, no further passes should rely on kill flags. However,
470       // thumb2 size reduction is currently an exception.
471       Scheduler.fixupKills(MBB);
472     }
473   }
474   Scheduler.finalizeSchedule();
475 }
476 
print(raw_ostream & O,const Module * m) const477 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
478   // unimplemented
479 }
480 
481 LLVM_DUMP_METHOD
dump()482 void ReadyQueue::dump() {
483   dbgs() << Name << ": ";
484   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
485     dbgs() << Queue[i]->NodeNum << " ";
486   dbgs() << "\n";
487 }
488 
489 //===----------------------------------------------------------------------===//
490 // ScheduleDAGMI - Basic machine instruction scheduling. This is
491 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
492 // virtual registers.
493 // ===----------------------------------------------------------------------===/
494 
495 // Provide a vtable anchor.
~ScheduleDAGMI()496 ScheduleDAGMI::~ScheduleDAGMI() {
497 }
498 
canAddEdge(SUnit * SuccSU,SUnit * PredSU)499 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
500   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
501 }
502 
addEdge(SUnit * SuccSU,const SDep & PredDep)503 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
504   if (SuccSU != &ExitSU) {
505     // Do not use WillCreateCycle, it assumes SD scheduling.
506     // If Pred is reachable from Succ, then the edge creates a cycle.
507     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
508       return false;
509     Topo.AddPred(SuccSU, PredDep.getSUnit());
510   }
511   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
512   // Return true regardless of whether a new edge needed to be inserted.
513   return true;
514 }
515 
516 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
517 /// NumPredsLeft reaches zero, release the successor node.
518 ///
519 /// FIXME: Adjust SuccSU height based on MinLatency.
releaseSucc(SUnit * SU,SDep * SuccEdge)520 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
521   SUnit *SuccSU = SuccEdge->getSUnit();
522 
523   if (SuccEdge->isWeak()) {
524     --SuccSU->WeakPredsLeft;
525     if (SuccEdge->isCluster())
526       NextClusterSucc = SuccSU;
527     return;
528   }
529 #ifndef NDEBUG
530   if (SuccSU->NumPredsLeft == 0) {
531     dbgs() << "*** Scheduling failed! ***\n";
532     SuccSU->dump(this);
533     dbgs() << " has been released too many times!\n";
534     llvm_unreachable(nullptr);
535   }
536 #endif
537   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
538   // CurrCycle may have advanced since then.
539   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
540     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
541 
542   --SuccSU->NumPredsLeft;
543   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
544     SchedImpl->releaseTopNode(SuccSU);
545 }
546 
547 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
releaseSuccessors(SUnit * SU)548 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
549   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
550        I != E; ++I) {
551     releaseSucc(SU, &*I);
552   }
553 }
554 
555 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
556 /// NumSuccsLeft reaches zero, release the predecessor node.
557 ///
558 /// FIXME: Adjust PredSU height based on MinLatency.
releasePred(SUnit * SU,SDep * PredEdge)559 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
560   SUnit *PredSU = PredEdge->getSUnit();
561 
562   if (PredEdge->isWeak()) {
563     --PredSU->WeakSuccsLeft;
564     if (PredEdge->isCluster())
565       NextClusterPred = PredSU;
566     return;
567   }
568 #ifndef NDEBUG
569   if (PredSU->NumSuccsLeft == 0) {
570     dbgs() << "*** Scheduling failed! ***\n";
571     PredSU->dump(this);
572     dbgs() << " has been released too many times!\n";
573     llvm_unreachable(nullptr);
574   }
575 #endif
576   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
577   // CurrCycle may have advanced since then.
578   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
579     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
580 
581   --PredSU->NumSuccsLeft;
582   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
583     SchedImpl->releaseBottomNode(PredSU);
584 }
585 
586 /// releasePredecessors - Call releasePred on each of SU's predecessors.
releasePredecessors(SUnit * SU)587 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
588   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
589        I != E; ++I) {
590     releasePred(SU, &*I);
591   }
592 }
593 
594 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
595 /// crossing a scheduling boundary. [begin, end) includes all instructions in
596 /// the region, including the boundary itself and single-instruction regions
597 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)598 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
599                                      MachineBasicBlock::iterator begin,
600                                      MachineBasicBlock::iterator end,
601                                      unsigned regioninstrs)
602 {
603   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
604 
605   SchedImpl->initPolicy(begin, end, regioninstrs);
606 }
607 
608 /// This is normally called from the main scheduler loop but may also be invoked
609 /// by the scheduling strategy to perform additional code motion.
moveInstruction(MachineInstr * MI,MachineBasicBlock::iterator InsertPos)610 void ScheduleDAGMI::moveInstruction(
611   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
612   // Advance RegionBegin if the first instruction moves down.
613   if (&*RegionBegin == MI)
614     ++RegionBegin;
615 
616   // Update the instruction stream.
617   BB->splice(InsertPos, BB, MI);
618 
619   // Update LiveIntervals
620   if (LIS)
621     LIS->handleMove(MI, /*UpdateFlags=*/true);
622 
623   // Recede RegionBegin if an instruction moves above the first.
624   if (RegionBegin == InsertPos)
625     RegionBegin = MI;
626 }
627 
checkSchedLimit()628 bool ScheduleDAGMI::checkSchedLimit() {
629 #ifndef NDEBUG
630   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
631     CurrentTop = CurrentBottom;
632     return false;
633   }
634   ++NumInstrsScheduled;
635 #endif
636   return true;
637 }
638 
639 /// Per-region scheduling driver, called back from
640 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
641 /// does not consider liveness or register pressure. It is useful for PostRA
642 /// scheduling and potentially other custom schedulers.
schedule()643 void ScheduleDAGMI::schedule() {
644   // Build the DAG.
645   buildSchedGraph(AA);
646 
647   Topo.InitDAGTopologicalSorting();
648 
649   postprocessDAG();
650 
651   SmallVector<SUnit*, 8> TopRoots, BotRoots;
652   findRootsAndBiasEdges(TopRoots, BotRoots);
653 
654   // Initialize the strategy before modifying the DAG.
655   // This may initialize a DFSResult to be used for queue priority.
656   SchedImpl->initialize(this);
657 
658   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
659           SUnits[su].dumpAll(this));
660   if (ViewMISchedDAGs) viewGraph();
661 
662   // Initialize ready queues now that the DAG and priority data are finalized.
663   initQueues(TopRoots, BotRoots);
664 
665   bool IsTopNode = false;
666   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
667     assert(!SU->isScheduled && "Node already scheduled");
668     if (!checkSchedLimit())
669       break;
670 
671     MachineInstr *MI = SU->getInstr();
672     if (IsTopNode) {
673       assert(SU->isTopReady() && "node still has unscheduled dependencies");
674       if (&*CurrentTop == MI)
675         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
676       else
677         moveInstruction(MI, CurrentTop);
678     }
679     else {
680       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
681       MachineBasicBlock::iterator priorII =
682         priorNonDebug(CurrentBottom, CurrentTop);
683       if (&*priorII == MI)
684         CurrentBottom = priorII;
685       else {
686         if (&*CurrentTop == MI)
687           CurrentTop = nextIfDebug(++CurrentTop, priorII);
688         moveInstruction(MI, CurrentBottom);
689         CurrentBottom = MI;
690       }
691     }
692     // Notify the scheduling strategy before updating the DAG.
693     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
694     // runs, it can then use the accurate ReadyCycle time to determine whether
695     // newly released nodes can move to the readyQ.
696     SchedImpl->schedNode(SU, IsTopNode);
697 
698     updateQueues(SU, IsTopNode);
699   }
700   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
701 
702   placeDebugValues();
703 
704   DEBUG({
705       unsigned BBNum = begin()->getParent()->getNumber();
706       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
707       dumpSchedule();
708       dbgs() << '\n';
709     });
710 }
711 
712 /// Apply each ScheduleDAGMutation step in order.
postprocessDAG()713 void ScheduleDAGMI::postprocessDAG() {
714   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
715     Mutations[i]->apply(this);
716   }
717 }
718 
719 void ScheduleDAGMI::
findRootsAndBiasEdges(SmallVectorImpl<SUnit * > & TopRoots,SmallVectorImpl<SUnit * > & BotRoots)720 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
721                       SmallVectorImpl<SUnit*> &BotRoots) {
722   for (std::vector<SUnit>::iterator
723          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
724     SUnit *SU = &(*I);
725     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
726 
727     // Order predecessors so DFSResult follows the critical path.
728     SU->biasCriticalPath();
729 
730     // A SUnit is ready to top schedule if it has no predecessors.
731     if (!I->NumPredsLeft)
732       TopRoots.push_back(SU);
733     // A SUnit is ready to bottom schedule if it has no successors.
734     if (!I->NumSuccsLeft)
735       BotRoots.push_back(SU);
736   }
737   ExitSU.biasCriticalPath();
738 }
739 
740 /// Identify DAG roots and setup scheduler queues.
initQueues(ArrayRef<SUnit * > TopRoots,ArrayRef<SUnit * > BotRoots)741 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
742                                ArrayRef<SUnit*> BotRoots) {
743   NextClusterSucc = nullptr;
744   NextClusterPred = nullptr;
745 
746   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
747   //
748   // Nodes with unreleased weak edges can still be roots.
749   // Release top roots in forward order.
750   for (SmallVectorImpl<SUnit*>::const_iterator
751          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
752     SchedImpl->releaseTopNode(*I);
753   }
754   // Release bottom roots in reverse order so the higher priority nodes appear
755   // first. This is more natural and slightly more efficient.
756   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
757          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
758     SchedImpl->releaseBottomNode(*I);
759   }
760 
761   releaseSuccessors(&EntrySU);
762   releasePredecessors(&ExitSU);
763 
764   SchedImpl->registerRoots();
765 
766   // Advance past initial DebugValues.
767   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
768   CurrentBottom = RegionEnd;
769 }
770 
771 /// Update scheduler queues after scheduling an instruction.
updateQueues(SUnit * SU,bool IsTopNode)772 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
773   // Release dependent instructions for scheduling.
774   if (IsTopNode)
775     releaseSuccessors(SU);
776   else
777     releasePredecessors(SU);
778 
779   SU->isScheduled = true;
780 }
781 
782 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
placeDebugValues()783 void ScheduleDAGMI::placeDebugValues() {
784   // If first instruction was a DBG_VALUE then put it back.
785   if (FirstDbgValue) {
786     BB->splice(RegionBegin, BB, FirstDbgValue);
787     RegionBegin = FirstDbgValue;
788   }
789 
790   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
791          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
792     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
793     MachineInstr *DbgValue = P.first;
794     MachineBasicBlock::iterator OrigPrevMI = P.second;
795     if (&*RegionBegin == DbgValue)
796       ++RegionBegin;
797     BB->splice(++OrigPrevMI, BB, DbgValue);
798     if (OrigPrevMI == std::prev(RegionEnd))
799       RegionEnd = DbgValue;
800   }
801   DbgValues.clear();
802   FirstDbgValue = nullptr;
803 }
804 
805 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dumpSchedule() const806 void ScheduleDAGMI::dumpSchedule() const {
807   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
808     if (SUnit *SU = getSUnit(&(*MI)))
809       SU->dump(this);
810     else
811       dbgs() << "Missing SUnit\n";
812   }
813 }
814 #endif
815 
816 //===----------------------------------------------------------------------===//
817 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
818 // preservation.
819 //===----------------------------------------------------------------------===//
820 
~ScheduleDAGMILive()821 ScheduleDAGMILive::~ScheduleDAGMILive() {
822   delete DFSResult;
823 }
824 
825 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
826 /// crossing a scheduling boundary. [begin, end) includes all instructions in
827 /// the region, including the boundary itself and single-instruction regions
828 /// that don't get scheduled.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned regioninstrs)829 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
830                                 MachineBasicBlock::iterator begin,
831                                 MachineBasicBlock::iterator end,
832                                 unsigned regioninstrs)
833 {
834   // ScheduleDAGMI initializes SchedImpl's per-region policy.
835   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
836 
837   // For convenience remember the end of the liveness region.
838   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
839 
840   SUPressureDiffs.clear();
841 
842   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
843 }
844 
845 // Setup the register pressure trackers for the top scheduled top and bottom
846 // scheduled regions.
initRegPressure()847 void ScheduleDAGMILive::initRegPressure() {
848   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
849   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
850 
851   // Close the RPTracker to finalize live ins.
852   RPTracker.closeRegion();
853 
854   DEBUG(RPTracker.dump());
855 
856   // Initialize the live ins and live outs.
857   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
858   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
859 
860   // Close one end of the tracker so we can call
861   // getMaxUpward/DownwardPressureDelta before advancing across any
862   // instructions. This converts currently live regs into live ins/outs.
863   TopRPTracker.closeTop();
864   BotRPTracker.closeBottom();
865 
866   BotRPTracker.initLiveThru(RPTracker);
867   if (!BotRPTracker.getLiveThru().empty()) {
868     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
869     DEBUG(dbgs() << "Live Thru: ";
870           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
871   };
872 
873   // For each live out vreg reduce the pressure change associated with other
874   // uses of the same vreg below the live-out reaching def.
875   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
876 
877   // Account for liveness generated by the region boundary.
878   if (LiveRegionEnd != RegionEnd) {
879     SmallVector<unsigned, 8> LiveUses;
880     BotRPTracker.recede(&LiveUses);
881     updatePressureDiffs(LiveUses);
882   }
883 
884   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
885 
886   // Cache the list of excess pressure sets in this region. This will also track
887   // the max pressure in the scheduled code for these sets.
888   RegionCriticalPSets.clear();
889   const std::vector<unsigned> &RegionPressure =
890     RPTracker.getPressure().MaxSetPressure;
891   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
892     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
893     if (RegionPressure[i] > Limit) {
894       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
895             << " Limit " << Limit
896             << " Actual " << RegionPressure[i] << "\n");
897       RegionCriticalPSets.push_back(PressureChange(i));
898     }
899   }
900   DEBUG(dbgs() << "Excess PSets: ";
901         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
902           dbgs() << TRI->getRegPressureSetName(
903             RegionCriticalPSets[i].getPSet()) << " ";
904         dbgs() << "\n");
905 }
906 
907 void ScheduleDAGMILive::
updateScheduledPressure(const SUnit * SU,const std::vector<unsigned> & NewMaxPressure)908 updateScheduledPressure(const SUnit *SU,
909                         const std::vector<unsigned> &NewMaxPressure) {
910   const PressureDiff &PDiff = getPressureDiff(SU);
911   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
912   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
913        I != E; ++I) {
914     if (!I->isValid())
915       break;
916     unsigned ID = I->getPSet();
917     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
918       ++CritIdx;
919     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
920       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
921           && NewMaxPressure[ID] <= INT16_MAX)
922         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
923     }
924     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
925     if (NewMaxPressure[ID] >= Limit - 2) {
926       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
927             << NewMaxPressure[ID] << " > " << Limit << "(+ "
928             << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
929     }
930   }
931 }
932 
933 /// Update the PressureDiff array for liveness after scheduling this
934 /// instruction.
updatePressureDiffs(ArrayRef<unsigned> LiveUses)935 void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
936   for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) {
937     /// FIXME: Currently assuming single-use physregs.
938     unsigned Reg = LiveUses[LUIdx];
939     DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
940     if (!TRI->isVirtualRegister(Reg))
941       continue;
942 
943     // This may be called before CurrentBottom has been initialized. However,
944     // BotRPTracker must have a valid position. We want the value live into the
945     // instruction or live out of the block, so ask for the previous
946     // instruction's live-out.
947     const LiveInterval &LI = LIS->getInterval(Reg);
948     VNInfo *VNI;
949     MachineBasicBlock::const_iterator I =
950       nextIfDebug(BotRPTracker.getPos(), BB->end());
951     if (I == BB->end())
952       VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
953     else {
954       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(I));
955       VNI = LRQ.valueIn();
956     }
957     // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
958     assert(VNI && "No live value at use.");
959     for (VReg2UseMap::iterator
960            UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
961       SUnit *SU = UI->SU;
962       DEBUG(dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
963             << *SU->getInstr());
964       // If this use comes before the reaching def, it cannot be a last use, so
965       // descrease its pressure change.
966       if (!SU->isScheduled && SU != &ExitSU) {
967         LiveQueryResult LRQ
968           = LI.Query(LIS->getInstructionIndex(SU->getInstr()));
969         if (LRQ.valueIn() == VNI)
970           getPressureDiff(SU).addPressureChange(Reg, true, &MRI);
971       }
972     }
973   }
974 }
975 
976 /// schedule - Called back from MachineScheduler::runOnMachineFunction
977 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
978 /// only includes instructions that have DAG nodes, not scheduling boundaries.
979 ///
980 /// This is a skeletal driver, with all the functionality pushed into helpers,
981 /// so that it can be easilly extended by experimental schedulers. Generally,
982 /// implementing MachineSchedStrategy should be sufficient to implement a new
983 /// scheduling algorithm. However, if a scheduler further subclasses
984 /// ScheduleDAGMILive then it will want to override this virtual method in order
985 /// to update any specialized state.
schedule()986 void ScheduleDAGMILive::schedule() {
987   buildDAGWithRegPressure();
988 
989   Topo.InitDAGTopologicalSorting();
990 
991   postprocessDAG();
992 
993   SmallVector<SUnit*, 8> TopRoots, BotRoots;
994   findRootsAndBiasEdges(TopRoots, BotRoots);
995 
996   // Initialize the strategy before modifying the DAG.
997   // This may initialize a DFSResult to be used for queue priority.
998   SchedImpl->initialize(this);
999 
1000   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
1001           SUnits[su].dumpAll(this));
1002   if (ViewMISchedDAGs) viewGraph();
1003 
1004   // Initialize ready queues now that the DAG and priority data are finalized.
1005   initQueues(TopRoots, BotRoots);
1006 
1007   if (ShouldTrackPressure) {
1008     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1009     TopRPTracker.setPos(CurrentTop);
1010   }
1011 
1012   bool IsTopNode = false;
1013   while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
1014     assert(!SU->isScheduled && "Node already scheduled");
1015     if (!checkSchedLimit())
1016       break;
1017 
1018     scheduleMI(SU, IsTopNode);
1019 
1020     updateQueues(SU, IsTopNode);
1021 
1022     if (DFSResult) {
1023       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1024       if (!ScheduledTrees.test(SubtreeID)) {
1025         ScheduledTrees.set(SubtreeID);
1026         DFSResult->scheduleTree(SubtreeID);
1027         SchedImpl->scheduleTree(SubtreeID);
1028       }
1029     }
1030 
1031     // Notify the scheduling strategy after updating the DAG.
1032     SchedImpl->schedNode(SU, IsTopNode);
1033   }
1034   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1035 
1036   placeDebugValues();
1037 
1038   DEBUG({
1039       unsigned BBNum = begin()->getParent()->getNumber();
1040       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1041       dumpSchedule();
1042       dbgs() << '\n';
1043     });
1044 }
1045 
1046 /// Build the DAG and setup three register pressure trackers.
buildDAGWithRegPressure()1047 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1048   if (!ShouldTrackPressure) {
1049     RPTracker.reset();
1050     RegionCriticalPSets.clear();
1051     buildSchedGraph(AA);
1052     return;
1053   }
1054 
1055   // Initialize the register pressure tracker used by buildSchedGraph.
1056   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1057                  /*TrackUntiedDefs=*/true);
1058 
1059   // Account for liveness generate by the region boundary.
1060   if (LiveRegionEnd != RegionEnd)
1061     RPTracker.recede();
1062 
1063   // Build the DAG, and compute current register pressure.
1064   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs);
1065 
1066   // Initialize top/bottom trackers after computing region pressure.
1067   initRegPressure();
1068 }
1069 
computeDFSResult()1070 void ScheduleDAGMILive::computeDFSResult() {
1071   if (!DFSResult)
1072     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1073   DFSResult->clear();
1074   ScheduledTrees.clear();
1075   DFSResult->resize(SUnits.size());
1076   DFSResult->compute(SUnits);
1077   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1078 }
1079 
1080 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1081 /// only provides the critical path for single block loops. To handle loops that
1082 /// span blocks, we could use the vreg path latencies provided by
1083 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1084 /// available for use in the scheduler.
1085 ///
1086 /// The cyclic path estimation identifies a def-use pair that crosses the back
1087 /// edge and considers the depth and height of the nodes. For example, consider
1088 /// the following instruction sequence where each instruction has unit latency
1089 /// and defines an epomymous virtual register:
1090 ///
1091 /// a->b(a,c)->c(b)->d(c)->exit
1092 ///
1093 /// The cyclic critical path is a two cycles: b->c->b
1094 /// The acyclic critical path is four cycles: a->b->c->d->exit
1095 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1096 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1097 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1098 /// LiveInDepth = depth(b) = len(a->b) = 1
1099 ///
1100 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1101 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1102 /// CyclicCriticalPath = min(2, 2) = 2
1103 ///
1104 /// This could be relevant to PostRA scheduling, but is currently implemented
1105 /// assuming LiveIntervals.
computeCyclicCriticalPath()1106 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1107   // This only applies to single block loop.
1108   if (!BB->isSuccessor(BB))
1109     return 0;
1110 
1111   unsigned MaxCyclicLatency = 0;
1112   // Visit each live out vreg def to find def/use pairs that cross iterations.
1113   ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs;
1114   for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end();
1115        RI != RE; ++RI) {
1116     unsigned Reg = *RI;
1117     if (!TRI->isVirtualRegister(Reg))
1118         continue;
1119     const LiveInterval &LI = LIS->getInterval(Reg);
1120     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1121     if (!DefVNI)
1122       continue;
1123 
1124     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1125     const SUnit *DefSU = getSUnit(DefMI);
1126     if (!DefSU)
1127       continue;
1128 
1129     unsigned LiveOutHeight = DefSU->getHeight();
1130     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1131     // Visit all local users of the vreg def.
1132     for (VReg2UseMap::iterator
1133            UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
1134       if (UI->SU == &ExitSU)
1135         continue;
1136 
1137       // Only consider uses of the phi.
1138       LiveQueryResult LRQ =
1139         LI.Query(LIS->getInstructionIndex(UI->SU->getInstr()));
1140       if (!LRQ.valueIn()->isPHIDef())
1141         continue;
1142 
1143       // Assume that a path spanning two iterations is a cycle, which could
1144       // overestimate in strange cases. This allows cyclic latency to be
1145       // estimated as the minimum slack of the vreg's depth or height.
1146       unsigned CyclicLatency = 0;
1147       if (LiveOutDepth > UI->SU->getDepth())
1148         CyclicLatency = LiveOutDepth - UI->SU->getDepth();
1149 
1150       unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency;
1151       if (LiveInHeight > LiveOutHeight) {
1152         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1153           CyclicLatency = LiveInHeight - LiveOutHeight;
1154       }
1155       else
1156         CyclicLatency = 0;
1157 
1158       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1159             << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n");
1160       if (CyclicLatency > MaxCyclicLatency)
1161         MaxCyclicLatency = CyclicLatency;
1162     }
1163   }
1164   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1165   return MaxCyclicLatency;
1166 }
1167 
1168 /// Move an instruction and update register pressure.
scheduleMI(SUnit * SU,bool IsTopNode)1169 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1170   // Move the instruction to its new location in the instruction stream.
1171   MachineInstr *MI = SU->getInstr();
1172 
1173   if (IsTopNode) {
1174     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1175     if (&*CurrentTop == MI)
1176       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1177     else {
1178       moveInstruction(MI, CurrentTop);
1179       TopRPTracker.setPos(MI);
1180     }
1181 
1182     if (ShouldTrackPressure) {
1183       // Update top scheduled pressure.
1184       TopRPTracker.advance();
1185       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1186       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1187     }
1188   }
1189   else {
1190     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1191     MachineBasicBlock::iterator priorII =
1192       priorNonDebug(CurrentBottom, CurrentTop);
1193     if (&*priorII == MI)
1194       CurrentBottom = priorII;
1195     else {
1196       if (&*CurrentTop == MI) {
1197         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1198         TopRPTracker.setPos(CurrentTop);
1199       }
1200       moveInstruction(MI, CurrentBottom);
1201       CurrentBottom = MI;
1202     }
1203     if (ShouldTrackPressure) {
1204       // Update bottom scheduled pressure.
1205       SmallVector<unsigned, 8> LiveUses;
1206       BotRPTracker.recede(&LiveUses);
1207       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1208       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1209       updatePressureDiffs(LiveUses);
1210     }
1211   }
1212 }
1213 
1214 //===----------------------------------------------------------------------===//
1215 // LoadClusterMutation - DAG post-processing to cluster loads.
1216 //===----------------------------------------------------------------------===//
1217 
1218 namespace {
1219 /// \brief Post-process the DAG to create cluster edges between neighboring
1220 /// loads.
1221 class LoadClusterMutation : public ScheduleDAGMutation {
1222   struct LoadInfo {
1223     SUnit *SU;
1224     unsigned BaseReg;
1225     unsigned Offset;
LoadInfo__anonfc890aee0211::LoadClusterMutation::LoadInfo1226     LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
1227       : SU(su), BaseReg(reg), Offset(ofs) {}
1228 
operator <__anonfc890aee0211::LoadClusterMutation::LoadInfo1229     bool operator<(const LoadInfo &RHS) const {
1230       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1231     }
1232   };
1233 
1234   const TargetInstrInfo *TII;
1235   const TargetRegisterInfo *TRI;
1236 public:
LoadClusterMutation(const TargetInstrInfo * tii,const TargetRegisterInfo * tri)1237   LoadClusterMutation(const TargetInstrInfo *tii,
1238                       const TargetRegisterInfo *tri)
1239     : TII(tii), TRI(tri) {}
1240 
1241   void apply(ScheduleDAGMI *DAG) override;
1242 protected:
1243   void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
1244 };
1245 } // anonymous
1246 
clusterNeighboringLoads(ArrayRef<SUnit * > Loads,ScheduleDAGMI * DAG)1247 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
1248                                                   ScheduleDAGMI *DAG) {
1249   SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
1250   for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
1251     SUnit *SU = Loads[Idx];
1252     unsigned BaseReg;
1253     unsigned Offset;
1254     if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1255       LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
1256   }
1257   if (LoadRecords.size() < 2)
1258     return;
1259   std::sort(LoadRecords.begin(), LoadRecords.end());
1260   unsigned ClusterLength = 1;
1261   for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
1262     if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
1263       ClusterLength = 1;
1264       continue;
1265     }
1266 
1267     SUnit *SUa = LoadRecords[Idx].SU;
1268     SUnit *SUb = LoadRecords[Idx+1].SU;
1269     if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1270         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1271 
1272       DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
1273             << SUb->NodeNum << ")\n");
1274       // Copy successor edges from SUa to SUb. Interleaving computation
1275       // dependent on SUa can prevent load combining due to register reuse.
1276       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1277       // loads should have effectively the same inputs.
1278       for (SUnit::const_succ_iterator
1279              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1280         if (SI->getSUnit() == SUb)
1281           continue;
1282         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1283         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1284       }
1285       ++ClusterLength;
1286     }
1287     else
1288       ClusterLength = 1;
1289   }
1290 }
1291 
1292 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
apply(ScheduleDAGMI * DAG)1293 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
1294   // Map DAG NodeNum to store chain ID.
1295   DenseMap<unsigned, unsigned> StoreChainIDs;
1296   // Map each store chain to a set of dependent loads.
1297   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1298   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1299     SUnit *SU = &DAG->SUnits[Idx];
1300     if (!SU->getInstr()->mayLoad())
1301       continue;
1302     unsigned ChainPredID = DAG->SUnits.size();
1303     for (SUnit::const_pred_iterator
1304            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1305       if (PI->isCtrl()) {
1306         ChainPredID = PI->getSUnit()->NodeNum;
1307         break;
1308       }
1309     }
1310     // Check if this chain-like pred has been seen
1311     // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
1312     unsigned NumChains = StoreChainDependents.size();
1313     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1314       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1315     if (Result.second)
1316       StoreChainDependents.resize(NumChains + 1);
1317     StoreChainDependents[Result.first->second].push_back(SU);
1318   }
1319   // Iterate over the store chains.
1320   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1321     clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
1322 }
1323 
1324 //===----------------------------------------------------------------------===//
1325 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1326 //===----------------------------------------------------------------------===//
1327 
1328 namespace {
1329 /// \brief Post-process the DAG to create cluster edges between instructions
1330 /// that may be fused by the processor into a single operation.
1331 class MacroFusion : public ScheduleDAGMutation {
1332   const TargetInstrInfo *TII;
1333 public:
MacroFusion(const TargetInstrInfo * tii)1334   MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
1335 
1336   void apply(ScheduleDAGMI *DAG) override;
1337 };
1338 } // anonymous
1339 
1340 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1341 /// fused operations.
apply(ScheduleDAGMI * DAG)1342 void MacroFusion::apply(ScheduleDAGMI *DAG) {
1343   // For now, assume targets can only fuse with the branch.
1344   MachineInstr *Branch = DAG->ExitSU.getInstr();
1345   if (!Branch)
1346     return;
1347 
1348   for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
1349     SUnit *SU = &DAG->SUnits[--Idx];
1350     if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
1351       continue;
1352 
1353     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1354     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1355     // need to copy predecessor edges from ExitSU to SU, since top-down
1356     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1357     // of SU, we could create an artificial edge from the deepest root, but it
1358     // hasn't been needed yet.
1359     bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
1360     (void)Success;
1361     assert(Success && "No DAG nodes should be reachable from ExitSU");
1362 
1363     DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
1364     break;
1365   }
1366 }
1367 
1368 //===----------------------------------------------------------------------===//
1369 // CopyConstrain - DAG post-processing to encourage copy elimination.
1370 //===----------------------------------------------------------------------===//
1371 
1372 namespace {
1373 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1374 /// the one use that defines the copy's source vreg, most likely an induction
1375 /// variable increment.
1376 class CopyConstrain : public ScheduleDAGMutation {
1377   // Transient state.
1378   SlotIndex RegionBeginIdx;
1379   // RegionEndIdx is the slot index of the last non-debug instruction in the
1380   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1381   SlotIndex RegionEndIdx;
1382 public:
CopyConstrain(const TargetInstrInfo *,const TargetRegisterInfo *)1383   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1384 
1385   void apply(ScheduleDAGMI *DAG) override;
1386 
1387 protected:
1388   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1389 };
1390 } // anonymous
1391 
1392 /// constrainLocalCopy handles two possibilities:
1393 /// 1) Local src:
1394 /// I0:     = dst
1395 /// I1: src = ...
1396 /// I2:     = dst
1397 /// I3: dst = src (copy)
1398 /// (create pred->succ edges I0->I1, I2->I1)
1399 ///
1400 /// 2) Local copy:
1401 /// I0: dst = src (copy)
1402 /// I1:     = dst
1403 /// I2: src = ...
1404 /// I3:     = dst
1405 /// (create pred->succ edges I1->I2, I3->I2)
1406 ///
1407 /// Although the MachineScheduler is currently constrained to single blocks,
1408 /// this algorithm should handle extended blocks. An EBB is a set of
1409 /// contiguously numbered blocks such that the previous block in the EBB is
1410 /// always the single predecessor.
constrainLocalCopy(SUnit * CopySU,ScheduleDAGMILive * DAG)1411 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1412   LiveIntervals *LIS = DAG->getLIS();
1413   MachineInstr *Copy = CopySU->getInstr();
1414 
1415   // Check for pure vreg copies.
1416   unsigned SrcReg = Copy->getOperand(1).getReg();
1417   if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
1418     return;
1419 
1420   unsigned DstReg = Copy->getOperand(0).getReg();
1421   if (!TargetRegisterInfo::isVirtualRegister(DstReg))
1422     return;
1423 
1424   // Check if either the dest or source is local. If it's live across a back
1425   // edge, it's not local. Note that if both vregs are live across the back
1426   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1427   unsigned LocalReg = DstReg;
1428   unsigned GlobalReg = SrcReg;
1429   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1430   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1431     LocalReg = SrcReg;
1432     GlobalReg = DstReg;
1433     LocalLI = &LIS->getInterval(LocalReg);
1434     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1435       return;
1436   }
1437   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1438 
1439   // Find the global segment after the start of the local LI.
1440   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1441   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1442   // local live range. We could create edges from other global uses to the local
1443   // start, but the coalescer should have already eliminated these cases, so
1444   // don't bother dealing with it.
1445   if (GlobalSegment == GlobalLI->end())
1446     return;
1447 
1448   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1449   // returned the next global segment. But if GlobalSegment overlaps with
1450   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1451   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1452   if (GlobalSegment->contains(LocalLI->beginIndex()))
1453     ++GlobalSegment;
1454 
1455   if (GlobalSegment == GlobalLI->end())
1456     return;
1457 
1458   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1459   if (GlobalSegment != GlobalLI->begin()) {
1460     // Two address defs have no hole.
1461     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1462                                GlobalSegment->start)) {
1463       return;
1464     }
1465     // If the prior global segment may be defined by the same two-address
1466     // instruction that also defines LocalLI, then can't make a hole here.
1467     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1468                                LocalLI->beginIndex())) {
1469       return;
1470     }
1471     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1472     // it would be a disconnected component in the live range.
1473     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1474            "Disconnected LRG within the scheduling region.");
1475   }
1476   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1477   if (!GlobalDef)
1478     return;
1479 
1480   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1481   if (!GlobalSU)
1482     return;
1483 
1484   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1485   // constraining the uses of the last local def to precede GlobalDef.
1486   SmallVector<SUnit*,8> LocalUses;
1487   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1488   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1489   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1490   for (SUnit::const_succ_iterator
1491          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1492        I != E; ++I) {
1493     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1494       continue;
1495     if (I->getSUnit() == GlobalSU)
1496       continue;
1497     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1498       return;
1499     LocalUses.push_back(I->getSUnit());
1500   }
1501   // Open the top of the GlobalLI hole by constraining any earlier global uses
1502   // to precede the start of LocalLI.
1503   SmallVector<SUnit*,8> GlobalUses;
1504   MachineInstr *FirstLocalDef =
1505     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1506   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1507   for (SUnit::const_pred_iterator
1508          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1509     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1510       continue;
1511     if (I->getSUnit() == FirstLocalSU)
1512       continue;
1513     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1514       return;
1515     GlobalUses.push_back(I->getSUnit());
1516   }
1517   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1518   // Add the weak edges.
1519   for (SmallVectorImpl<SUnit*>::const_iterator
1520          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1521     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1522           << GlobalSU->NodeNum << ")\n");
1523     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1524   }
1525   for (SmallVectorImpl<SUnit*>::const_iterator
1526          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1527     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1528           << FirstLocalSU->NodeNum << ")\n");
1529     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1530   }
1531 }
1532 
1533 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1534 /// copy elimination.
apply(ScheduleDAGMI * DAG)1535 void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1536   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1537 
1538   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1539   if (FirstPos == DAG->end())
1540     return;
1541   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1542   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1543     &*priorNonDebug(DAG->end(), DAG->begin()));
1544 
1545   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1546     SUnit *SU = &DAG->SUnits[Idx];
1547     if (!SU->getInstr()->isCopy())
1548       continue;
1549 
1550     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1551   }
1552 }
1553 
1554 //===----------------------------------------------------------------------===//
1555 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1556 // and possibly other custom schedulers.
1557 //===----------------------------------------------------------------------===//
1558 
1559 static const unsigned InvalidCycle = ~0U;
1560 
~SchedBoundary()1561 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1562 
reset()1563 void SchedBoundary::reset() {
1564   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1565   // Destroying and reconstructing it is very expensive though. So keep
1566   // invalid, placeholder HazardRecs.
1567   if (HazardRec && HazardRec->isEnabled()) {
1568     delete HazardRec;
1569     HazardRec = nullptr;
1570   }
1571   Available.clear();
1572   Pending.clear();
1573   CheckPending = false;
1574   NextSUs.clear();
1575   CurrCycle = 0;
1576   CurrMOps = 0;
1577   MinReadyCycle = UINT_MAX;
1578   ExpectedLatency = 0;
1579   DependentLatency = 0;
1580   RetiredMOps = 0;
1581   MaxExecutedResCount = 0;
1582   ZoneCritResIdx = 0;
1583   IsResourceLimited = false;
1584   ReservedCycles.clear();
1585 #ifndef NDEBUG
1586   // Track the maximum number of stall cycles that could arise either from the
1587   // latency of a DAG edge or the number of cycles that a processor resource is
1588   // reserved (SchedBoundary::ReservedCycles).
1589   MaxObservedStall = 0;
1590 #endif
1591   // Reserve a zero-count for invalid CritResIdx.
1592   ExecutedResCounts.resize(1);
1593   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1594 }
1595 
1596 void SchedRemainder::
init(ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)1597 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1598   reset();
1599   if (!SchedModel->hasInstrSchedModel())
1600     return;
1601   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1602   for (std::vector<SUnit>::iterator
1603          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1604     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1605     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1606       * SchedModel->getMicroOpFactor();
1607     for (TargetSchedModel::ProcResIter
1608            PI = SchedModel->getWriteProcResBegin(SC),
1609            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1610       unsigned PIdx = PI->ProcResourceIdx;
1611       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1612       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1613     }
1614   }
1615 }
1616 
1617 void SchedBoundary::
init(ScheduleDAGMI * dag,const TargetSchedModel * smodel,SchedRemainder * rem)1618 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1619   reset();
1620   DAG = dag;
1621   SchedModel = smodel;
1622   Rem = rem;
1623   if (SchedModel->hasInstrSchedModel()) {
1624     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1625     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1626   }
1627 }
1628 
1629 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1630 /// these "soft stalls" differently than the hard stall cycles based on CPU
1631 /// resources and computed by checkHazard(). A fully in-order model
1632 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1633 /// available for scheduling until they are ready. However, a weaker in-order
1634 /// model may use this for heuristics. For example, if a processor has in-order
1635 /// behavior when reading certain resources, this may come into play.
getLatencyStallCycles(SUnit * SU)1636 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1637   if (!SU->isUnbuffered)
1638     return 0;
1639 
1640   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1641   if (ReadyCycle > CurrCycle)
1642     return ReadyCycle - CurrCycle;
1643   return 0;
1644 }
1645 
1646 /// Compute the next cycle at which the given processor resource can be
1647 /// scheduled.
1648 unsigned SchedBoundary::
getNextResourceCycle(unsigned PIdx,unsigned Cycles)1649 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1650   unsigned NextUnreserved = ReservedCycles[PIdx];
1651   // If this resource has never been used, always return cycle zero.
1652   if (NextUnreserved == InvalidCycle)
1653     return 0;
1654   // For bottom-up scheduling add the cycles needed for the current operation.
1655   if (!isTop())
1656     NextUnreserved += Cycles;
1657   return NextUnreserved;
1658 }
1659 
1660 /// Does this SU have a hazard within the current instruction group.
1661 ///
1662 /// The scheduler supports two modes of hazard recognition. The first is the
1663 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1664 /// supports highly complicated in-order reservation tables
1665 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1666 ///
1667 /// The second is a streamlined mechanism that checks for hazards based on
1668 /// simple counters that the scheduler itself maintains. It explicitly checks
1669 /// for instruction dispatch limitations, including the number of micro-ops that
1670 /// can dispatch per cycle.
1671 ///
1672 /// TODO: Also check whether the SU must start a new group.
checkHazard(SUnit * SU)1673 bool SchedBoundary::checkHazard(SUnit *SU) {
1674   if (HazardRec->isEnabled()
1675       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1676     return true;
1677   }
1678   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1679   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1680     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1681           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1682     return true;
1683   }
1684   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1685     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1686     for (TargetSchedModel::ProcResIter
1687            PI = SchedModel->getWriteProcResBegin(SC),
1688            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1689       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1690       if (NRCycle > CurrCycle) {
1691 #ifndef NDEBUG
1692         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1693 #endif
1694         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1695               << SchedModel->getResourceName(PI->ProcResourceIdx)
1696               << "=" << NRCycle << "c\n");
1697         return true;
1698       }
1699     }
1700   }
1701   return false;
1702 }
1703 
1704 // Find the unscheduled node in ReadySUs with the highest latency.
1705 unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit * > ReadySUs)1706 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1707   SUnit *LateSU = nullptr;
1708   unsigned RemLatency = 0;
1709   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1710        I != E; ++I) {
1711     unsigned L = getUnscheduledLatency(*I);
1712     if (L > RemLatency) {
1713       RemLatency = L;
1714       LateSU = *I;
1715     }
1716   }
1717   if (LateSU) {
1718     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1719           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1720   }
1721   return RemLatency;
1722 }
1723 
1724 // Count resources in this zone and the remaining unscheduled
1725 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1726 // resource index, or zero if the zone is issue limited.
1727 unsigned SchedBoundary::
getOtherResourceCount(unsigned & OtherCritIdx)1728 getOtherResourceCount(unsigned &OtherCritIdx) {
1729   OtherCritIdx = 0;
1730   if (!SchedModel->hasInstrSchedModel())
1731     return 0;
1732 
1733   unsigned OtherCritCount = Rem->RemIssueCount
1734     + (RetiredMOps * SchedModel->getMicroOpFactor());
1735   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1736         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1737   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1738        PIdx != PEnd; ++PIdx) {
1739     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1740     if (OtherCount > OtherCritCount) {
1741       OtherCritCount = OtherCount;
1742       OtherCritIdx = PIdx;
1743     }
1744   }
1745   if (OtherCritIdx) {
1746     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1747           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1748           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1749   }
1750   return OtherCritCount;
1751 }
1752 
releaseNode(SUnit * SU,unsigned ReadyCycle)1753 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1754   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1755 
1756 #ifndef NDEBUG
1757   // ReadyCycle was been bumped up to the CurrCycle when this node was
1758   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1759   // scheduling, so may now be greater than ReadyCycle.
1760   if (ReadyCycle > CurrCycle)
1761     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1762 #endif
1763 
1764   if (ReadyCycle < MinReadyCycle)
1765     MinReadyCycle = ReadyCycle;
1766 
1767   // Check for interlocks first. For the purpose of other heuristics, an
1768   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1769   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1770   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1771     Pending.push(SU);
1772   else
1773     Available.push(SU);
1774 
1775   // Record this node as an immediate dependent of the scheduled node.
1776   NextSUs.insert(SU);
1777 }
1778 
releaseTopNode(SUnit * SU)1779 void SchedBoundary::releaseTopNode(SUnit *SU) {
1780   if (SU->isScheduled)
1781     return;
1782 
1783   releaseNode(SU, SU->TopReadyCycle);
1784 }
1785 
releaseBottomNode(SUnit * SU)1786 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1787   if (SU->isScheduled)
1788     return;
1789 
1790   releaseNode(SU, SU->BotReadyCycle);
1791 }
1792 
1793 /// Move the boundary of scheduled code by one cycle.
bumpCycle(unsigned NextCycle)1794 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1795   if (SchedModel->getMicroOpBufferSize() == 0) {
1796     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1797     if (MinReadyCycle > NextCycle)
1798       NextCycle = MinReadyCycle;
1799   }
1800   // Update the current micro-ops, which will issue in the next cycle.
1801   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1802   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1803 
1804   // Decrement DependentLatency based on the next cycle.
1805   if ((NextCycle - CurrCycle) > DependentLatency)
1806     DependentLatency = 0;
1807   else
1808     DependentLatency -= (NextCycle - CurrCycle);
1809 
1810   if (!HazardRec->isEnabled()) {
1811     // Bypass HazardRec virtual calls.
1812     CurrCycle = NextCycle;
1813   }
1814   else {
1815     // Bypass getHazardType calls in case of long latency.
1816     for (; CurrCycle != NextCycle; ++CurrCycle) {
1817       if (isTop())
1818         HazardRec->AdvanceCycle();
1819       else
1820         HazardRec->RecedeCycle();
1821     }
1822   }
1823   CheckPending = true;
1824   unsigned LFactor = SchedModel->getLatencyFactor();
1825   IsResourceLimited =
1826     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1827     > (int)LFactor;
1828 
1829   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
1830 }
1831 
incExecutedResources(unsigned PIdx,unsigned Count)1832 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
1833   ExecutedResCounts[PIdx] += Count;
1834   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
1835     MaxExecutedResCount = ExecutedResCounts[PIdx];
1836 }
1837 
1838 /// Add the given processor resource to this scheduled zone.
1839 ///
1840 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
1841 /// during which this resource is consumed.
1842 ///
1843 /// \return the next cycle at which the instruction may execute without
1844 /// oversubscribing resources.
1845 unsigned SchedBoundary::
countResource(unsigned PIdx,unsigned Cycles,unsigned NextCycle)1846 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
1847   unsigned Factor = SchedModel->getResourceFactor(PIdx);
1848   unsigned Count = Factor * Cycles;
1849   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
1850         << " +" << Cycles << "x" << Factor << "u\n");
1851 
1852   // Update Executed resources counts.
1853   incExecutedResources(PIdx, Count);
1854   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1855   Rem->RemainingCounts[PIdx] -= Count;
1856 
1857   // Check if this resource exceeds the current critical resource. If so, it
1858   // becomes the critical resource.
1859   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
1860     ZoneCritResIdx = PIdx;
1861     DEBUG(dbgs() << "  *** Critical resource "
1862           << SchedModel->getResourceName(PIdx) << ": "
1863           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
1864   }
1865   // For reserved resources, record the highest cycle using the resource.
1866   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
1867   if (NextAvailable > CurrCycle) {
1868     DEBUG(dbgs() << "  Resource conflict: "
1869           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
1870           << NextAvailable << "\n");
1871   }
1872   return NextAvailable;
1873 }
1874 
1875 /// Move the boundary of scheduled code by one SUnit.
bumpNode(SUnit * SU)1876 void SchedBoundary::bumpNode(SUnit *SU) {
1877   // Update the reservation table.
1878   if (HazardRec->isEnabled()) {
1879     if (!isTop() && SU->isCall) {
1880       // Calls are scheduled with their preceding instructions. For bottom-up
1881       // scheduling, clear the pipeline state before emitting.
1882       HazardRec->Reset();
1883     }
1884     HazardRec->EmitInstruction(SU);
1885   }
1886   // checkHazard should prevent scheduling multiple instructions per cycle that
1887   // exceed the issue width.
1888   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1889   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
1890   assert(
1891       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
1892       "Cannot schedule this instruction's MicroOps in the current cycle.");
1893 
1894   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1895   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
1896 
1897   unsigned NextCycle = CurrCycle;
1898   switch (SchedModel->getMicroOpBufferSize()) {
1899   case 0:
1900     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
1901     break;
1902   case 1:
1903     if (ReadyCycle > NextCycle) {
1904       NextCycle = ReadyCycle;
1905       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
1906     }
1907     break;
1908   default:
1909     // We don't currently model the OOO reorder buffer, so consider all
1910     // scheduled MOps to be "retired". We do loosely model in-order resource
1911     // latency. If this instruction uses an in-order resource, account for any
1912     // likely stall cycles.
1913     if (SU->isUnbuffered && ReadyCycle > NextCycle)
1914       NextCycle = ReadyCycle;
1915     break;
1916   }
1917   RetiredMOps += IncMOps;
1918 
1919   // Update resource counts and critical resource.
1920   if (SchedModel->hasInstrSchedModel()) {
1921     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
1922     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
1923     Rem->RemIssueCount -= DecRemIssue;
1924     if (ZoneCritResIdx) {
1925       // Scale scheduled micro-ops for comparing with the critical resource.
1926       unsigned ScaledMOps =
1927         RetiredMOps * SchedModel->getMicroOpFactor();
1928 
1929       // If scaled micro-ops are now more than the previous critical resource by
1930       // a full cycle, then micro-ops issue becomes critical.
1931       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
1932           >= (int)SchedModel->getLatencyFactor()) {
1933         ZoneCritResIdx = 0;
1934         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
1935               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
1936       }
1937     }
1938     for (TargetSchedModel::ProcResIter
1939            PI = SchedModel->getWriteProcResBegin(SC),
1940            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1941       unsigned RCycle =
1942         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
1943       if (RCycle > NextCycle)
1944         NextCycle = RCycle;
1945     }
1946     if (SU->hasReservedResource) {
1947       // For reserved resources, record the highest cycle using the resource.
1948       // For top-down scheduling, this is the cycle in which we schedule this
1949       // instruction plus the number of cycles the operations reserves the
1950       // resource. For bottom-up is it simply the instruction's cycle.
1951       for (TargetSchedModel::ProcResIter
1952              PI = SchedModel->getWriteProcResBegin(SC),
1953              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1954         unsigned PIdx = PI->ProcResourceIdx;
1955         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
1956           if (isTop()) {
1957             ReservedCycles[PIdx] =
1958               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
1959           }
1960           else
1961             ReservedCycles[PIdx] = NextCycle;
1962         }
1963       }
1964     }
1965   }
1966   // Update ExpectedLatency and DependentLatency.
1967   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
1968   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
1969   if (SU->getDepth() > TopLatency) {
1970     TopLatency = SU->getDepth();
1971     DEBUG(dbgs() << "  " << Available.getName()
1972           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
1973   }
1974   if (SU->getHeight() > BotLatency) {
1975     BotLatency = SU->getHeight();
1976     DEBUG(dbgs() << "  " << Available.getName()
1977           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
1978   }
1979   // If we stall for any reason, bump the cycle.
1980   if (NextCycle > CurrCycle) {
1981     bumpCycle(NextCycle);
1982   }
1983   else {
1984     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
1985     // resource limited. If a stall occurred, bumpCycle does this.
1986     unsigned LFactor = SchedModel->getLatencyFactor();
1987     IsResourceLimited =
1988       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1989       > (int)LFactor;
1990   }
1991   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
1992   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
1993   // one cycle.  Since we commonly reach the max MOps here, opportunistically
1994   // bump the cycle to avoid uselessly checking everything in the readyQ.
1995   CurrMOps += IncMOps;
1996   while (CurrMOps >= SchedModel->getIssueWidth()) {
1997     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
1998           << " at cycle " << CurrCycle << '\n');
1999     bumpCycle(++NextCycle);
2000   }
2001   DEBUG(dumpScheduledState());
2002 }
2003 
2004 /// Release pending ready nodes in to the available queue. This makes them
2005 /// visible to heuristics.
releasePending()2006 void SchedBoundary::releasePending() {
2007   // If the available queue is empty, it is safe to reset MinReadyCycle.
2008   if (Available.empty())
2009     MinReadyCycle = UINT_MAX;
2010 
2011   // Check to see if any of the pending instructions are ready to issue.  If
2012   // so, add them to the available queue.
2013   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2014   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2015     SUnit *SU = *(Pending.begin()+i);
2016     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2017 
2018     if (ReadyCycle < MinReadyCycle)
2019       MinReadyCycle = ReadyCycle;
2020 
2021     if (!IsBuffered && ReadyCycle > CurrCycle)
2022       continue;
2023 
2024     if (checkHazard(SU))
2025       continue;
2026 
2027     Available.push(SU);
2028     Pending.remove(Pending.begin()+i);
2029     --i; --e;
2030   }
2031   DEBUG(if (!Pending.empty()) Pending.dump());
2032   CheckPending = false;
2033 }
2034 
2035 /// Remove SU from the ready set for this boundary.
removeReady(SUnit * SU)2036 void SchedBoundary::removeReady(SUnit *SU) {
2037   if (Available.isInQueue(SU))
2038     Available.remove(Available.find(SU));
2039   else {
2040     assert(Pending.isInQueue(SU) && "bad ready count");
2041     Pending.remove(Pending.find(SU));
2042   }
2043 }
2044 
2045 /// If this queue only has one ready candidate, return it. As a side effect,
2046 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2047 /// one node is ready. If multiple instructions are ready, return NULL.
pickOnlyChoice()2048 SUnit *SchedBoundary::pickOnlyChoice() {
2049   if (CheckPending)
2050     releasePending();
2051 
2052   if (CurrMOps > 0) {
2053     // Defer any ready instrs that now have a hazard.
2054     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2055       if (checkHazard(*I)) {
2056         Pending.push(*I);
2057         I = Available.remove(I);
2058         continue;
2059       }
2060       ++I;
2061     }
2062   }
2063   for (unsigned i = 0; Available.empty(); ++i) {
2064 //  FIXME: Re-enable assert once PR20057 is resolved.
2065 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2066 //           "permanent hazard");
2067     (void)i;
2068     bumpCycle(CurrCycle + 1);
2069     releasePending();
2070   }
2071   if (Available.size() == 1)
2072     return *Available.begin();
2073   return nullptr;
2074 }
2075 
2076 #ifndef NDEBUG
2077 // This is useful information to dump after bumpNode.
2078 // Note that the Queue contents are more useful before pickNodeFromQueue.
dumpScheduledState()2079 void SchedBoundary::dumpScheduledState() {
2080   unsigned ResFactor;
2081   unsigned ResCount;
2082   if (ZoneCritResIdx) {
2083     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2084     ResCount = getResourceCount(ZoneCritResIdx);
2085   }
2086   else {
2087     ResFactor = SchedModel->getMicroOpFactor();
2088     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2089   }
2090   unsigned LFactor = SchedModel->getLatencyFactor();
2091   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2092          << "  Retired: " << RetiredMOps;
2093   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2094   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2095          << ResCount / ResFactor << " "
2096          << SchedModel->getResourceName(ZoneCritResIdx)
2097          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2098          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2099          << " limited.\n";
2100 }
2101 #endif
2102 
2103 //===----------------------------------------------------------------------===//
2104 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2105 //===----------------------------------------------------------------------===//
2106 
2107 void GenericSchedulerBase::SchedCandidate::
initResourceDelta(const ScheduleDAGMI * DAG,const TargetSchedModel * SchedModel)2108 initResourceDelta(const ScheduleDAGMI *DAG,
2109                   const TargetSchedModel *SchedModel) {
2110   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2111     return;
2112 
2113   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2114   for (TargetSchedModel::ProcResIter
2115          PI = SchedModel->getWriteProcResBegin(SC),
2116          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2117     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2118       ResDelta.CritResources += PI->Cycles;
2119     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2120       ResDelta.DemandedResources += PI->Cycles;
2121   }
2122 }
2123 
2124 /// Set the CandPolicy given a scheduling zone given the current resources and
2125 /// latencies inside and outside the zone.
setPolicy(CandPolicy & Policy,bool IsPostRA,SchedBoundary & CurrZone,SchedBoundary * OtherZone)2126 void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
2127                                      bool IsPostRA,
2128                                      SchedBoundary &CurrZone,
2129                                      SchedBoundary *OtherZone) {
2130   // Apply preemptive heuristics based on the the total latency and resources
2131   // inside and outside this zone. Potential stalls should be considered before
2132   // following this policy.
2133 
2134   // Compute remaining latency. We need this both to determine whether the
2135   // overall schedule has become latency-limited and whether the instructions
2136   // outside this zone are resource or latency limited.
2137   //
2138   // The "dependent" latency is updated incrementally during scheduling as the
2139   // max height/depth of scheduled nodes minus the cycles since it was
2140   // scheduled:
2141   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2142   //
2143   // The "independent" latency is the max ready queue depth:
2144   //   ILat = max N.depth for N in Available|Pending
2145   //
2146   // RemainingLatency is the greater of independent and dependent latency.
2147   unsigned RemLatency = CurrZone.getDependentLatency();
2148   RemLatency = std::max(RemLatency,
2149                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2150   RemLatency = std::max(RemLatency,
2151                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2152 
2153   // Compute the critical resource outside the zone.
2154   unsigned OtherCritIdx = 0;
2155   unsigned OtherCount =
2156     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2157 
2158   bool OtherResLimited = false;
2159   if (SchedModel->hasInstrSchedModel()) {
2160     unsigned LFactor = SchedModel->getLatencyFactor();
2161     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2162   }
2163   // Schedule aggressively for latency in PostRA mode. We don't check for
2164   // acyclic latency during PostRA, and highly out-of-order processors will
2165   // skip PostRA scheduling.
2166   if (!OtherResLimited) {
2167     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2168       Policy.ReduceLatency |= true;
2169       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2170             << " RemainingLatency " << RemLatency << " + "
2171             << CurrZone.getCurrCycle() << "c > CritPath "
2172             << Rem.CriticalPath << "\n");
2173     }
2174   }
2175   // If the same resource is limiting inside and outside the zone, do nothing.
2176   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2177     return;
2178 
2179   DEBUG(
2180     if (CurrZone.isResourceLimited()) {
2181       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2182              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2183              << "\n";
2184     }
2185     if (OtherResLimited)
2186       dbgs() << "  RemainingLimit: "
2187              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2188     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2189       dbgs() << "  Latency limited both directions.\n");
2190 
2191   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2192     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2193 
2194   if (OtherResLimited)
2195     Policy.DemandResIdx = OtherCritIdx;
2196 }
2197 
2198 #ifndef NDEBUG
getReasonStr(GenericSchedulerBase::CandReason Reason)2199 const char *GenericSchedulerBase::getReasonStr(
2200   GenericSchedulerBase::CandReason Reason) {
2201   switch (Reason) {
2202   case NoCand:         return "NOCAND    ";
2203   case PhysRegCopy:    return "PREG-COPY";
2204   case RegExcess:      return "REG-EXCESS";
2205   case RegCritical:    return "REG-CRIT  ";
2206   case Stall:          return "STALL     ";
2207   case Cluster:        return "CLUSTER   ";
2208   case Weak:           return "WEAK      ";
2209   case RegMax:         return "REG-MAX   ";
2210   case ResourceReduce: return "RES-REDUCE";
2211   case ResourceDemand: return "RES-DEMAND";
2212   case TopDepthReduce: return "TOP-DEPTH ";
2213   case TopPathReduce:  return "TOP-PATH  ";
2214   case BotHeightReduce:return "BOT-HEIGHT";
2215   case BotPathReduce:  return "BOT-PATH  ";
2216   case NextDefUse:     return "DEF-USE   ";
2217   case NodeOrder:      return "ORDER     ";
2218   };
2219   llvm_unreachable("Unknown reason!");
2220 }
2221 
traceCandidate(const SchedCandidate & Cand)2222 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2223   PressureChange P;
2224   unsigned ResIdx = 0;
2225   unsigned Latency = 0;
2226   switch (Cand.Reason) {
2227   default:
2228     break;
2229   case RegExcess:
2230     P = Cand.RPDelta.Excess;
2231     break;
2232   case RegCritical:
2233     P = Cand.RPDelta.CriticalMax;
2234     break;
2235   case RegMax:
2236     P = Cand.RPDelta.CurrentMax;
2237     break;
2238   case ResourceReduce:
2239     ResIdx = Cand.Policy.ReduceResIdx;
2240     break;
2241   case ResourceDemand:
2242     ResIdx = Cand.Policy.DemandResIdx;
2243     break;
2244   case TopDepthReduce:
2245     Latency = Cand.SU->getDepth();
2246     break;
2247   case TopPathReduce:
2248     Latency = Cand.SU->getHeight();
2249     break;
2250   case BotHeightReduce:
2251     Latency = Cand.SU->getHeight();
2252     break;
2253   case BotPathReduce:
2254     Latency = Cand.SU->getDepth();
2255     break;
2256   }
2257   dbgs() << "  SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2258   if (P.isValid())
2259     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2260            << ":" << P.getUnitInc() << " ";
2261   else
2262     dbgs() << "      ";
2263   if (ResIdx)
2264     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2265   else
2266     dbgs() << "         ";
2267   if (Latency)
2268     dbgs() << " " << Latency << " cycles ";
2269   else
2270     dbgs() << "          ";
2271   dbgs() << '\n';
2272 }
2273 #endif
2274 
2275 /// Return true if this heuristic determines order.
tryLess(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2276 static bool tryLess(int TryVal, int CandVal,
2277                     GenericSchedulerBase::SchedCandidate &TryCand,
2278                     GenericSchedulerBase::SchedCandidate &Cand,
2279                     GenericSchedulerBase::CandReason Reason) {
2280   if (TryVal < CandVal) {
2281     TryCand.Reason = Reason;
2282     return true;
2283   }
2284   if (TryVal > CandVal) {
2285     if (Cand.Reason > Reason)
2286       Cand.Reason = Reason;
2287     return true;
2288   }
2289   Cand.setRepeat(Reason);
2290   return false;
2291 }
2292 
tryGreater(int TryVal,int CandVal,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2293 static bool tryGreater(int TryVal, int CandVal,
2294                        GenericSchedulerBase::SchedCandidate &TryCand,
2295                        GenericSchedulerBase::SchedCandidate &Cand,
2296                        GenericSchedulerBase::CandReason Reason) {
2297   if (TryVal > CandVal) {
2298     TryCand.Reason = Reason;
2299     return true;
2300   }
2301   if (TryVal < CandVal) {
2302     if (Cand.Reason > Reason)
2303       Cand.Reason = Reason;
2304     return true;
2305   }
2306   Cand.setRepeat(Reason);
2307   return false;
2308 }
2309 
tryLatency(GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,SchedBoundary & Zone)2310 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2311                        GenericSchedulerBase::SchedCandidate &Cand,
2312                        SchedBoundary &Zone) {
2313   if (Zone.isTop()) {
2314     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2315       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2316                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2317         return true;
2318     }
2319     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2320                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2321       return true;
2322   }
2323   else {
2324     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2325       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2326                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2327         return true;
2328     }
2329     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2330                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2331       return true;
2332   }
2333   return false;
2334 }
2335 
tracePick(const GenericSchedulerBase::SchedCandidate & Cand,bool IsTop)2336 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2337                       bool IsTop) {
2338   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2339         << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
2340 }
2341 
initialize(ScheduleDAGMI * dag)2342 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2343   assert(dag->hasVRegLiveness() &&
2344          "(PreRA)GenericScheduler needs vreg liveness");
2345   DAG = static_cast<ScheduleDAGMILive*>(dag);
2346   SchedModel = DAG->getSchedModel();
2347   TRI = DAG->TRI;
2348 
2349   Rem.init(DAG, SchedModel);
2350   Top.init(DAG, SchedModel, &Rem);
2351   Bot.init(DAG, SchedModel, &Rem);
2352 
2353   // Initialize resource counts.
2354 
2355   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2356   // are disabled, then these HazardRecs will be disabled.
2357   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2358   const TargetMachine &TM = DAG->MF.getTarget();
2359   if (!Top.HazardRec) {
2360     Top.HazardRec =
2361       TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
2362   }
2363   if (!Bot.HazardRec) {
2364     Bot.HazardRec =
2365       TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
2366   }
2367 }
2368 
2369 /// Initialize the per-region scheduling policy.
initPolicy(MachineBasicBlock::iterator Begin,MachineBasicBlock::iterator End,unsigned NumRegionInstrs)2370 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2371                                   MachineBasicBlock::iterator End,
2372                                   unsigned NumRegionInstrs) {
2373   const TargetMachine &TM = Context->MF->getTarget();
2374   const TargetLowering *TLI = TM.getTargetLowering();
2375 
2376   // Avoid setting up the register pressure tracker for small regions to save
2377   // compile time. As a rough heuristic, only track pressure when the number of
2378   // schedulable instructions exceeds half the integer register file.
2379   RegionPolicy.ShouldTrackPressure = true;
2380   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2381     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2382     if (TLI->isTypeLegal(LegalIntVT)) {
2383       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2384         TLI->getRegClassFor(LegalIntVT));
2385       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2386     }
2387   }
2388 
2389   // For generic targets, we default to bottom-up, because it's simpler and more
2390   // compile-time optimizations have been implemented in that direction.
2391   RegionPolicy.OnlyBottomUp = true;
2392 
2393   // Allow the subtarget to override default policy.
2394   const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
2395   ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs);
2396 
2397   // After subtarget overrides, apply command line options.
2398   if (!EnableRegPressure)
2399     RegionPolicy.ShouldTrackPressure = false;
2400 
2401   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2402   // e.g. -misched-bottomup=false allows scheduling in both directions.
2403   assert((!ForceTopDown || !ForceBottomUp) &&
2404          "-misched-topdown incompatible with -misched-bottomup");
2405   if (ForceBottomUp.getNumOccurrences() > 0) {
2406     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2407     if (RegionPolicy.OnlyBottomUp)
2408       RegionPolicy.OnlyTopDown = false;
2409   }
2410   if (ForceTopDown.getNumOccurrences() > 0) {
2411     RegionPolicy.OnlyTopDown = ForceTopDown;
2412     if (RegionPolicy.OnlyTopDown)
2413       RegionPolicy.OnlyBottomUp = false;
2414   }
2415 }
2416 
2417 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2418 /// critical path by more cycles than it takes to drain the instruction buffer.
2419 /// We estimate an upper bounds on in-flight instructions as:
2420 ///
2421 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2422 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2423 /// InFlightResources = InFlightIterations * LoopResources
2424 ///
2425 /// TODO: Check execution resources in addition to IssueCount.
checkAcyclicLatency()2426 void GenericScheduler::checkAcyclicLatency() {
2427   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2428     return;
2429 
2430   // Scaled number of cycles per loop iteration.
2431   unsigned IterCount =
2432     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2433              Rem.RemIssueCount);
2434   // Scaled acyclic critical path.
2435   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2436   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2437   unsigned InFlightCount =
2438     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2439   unsigned BufferLimit =
2440     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2441 
2442   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2443 
2444   DEBUG(dbgs() << "IssueCycles="
2445         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2446         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2447         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2448         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2449         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2450         if (Rem.IsAcyclicLatencyLimited)
2451           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2452 }
2453 
registerRoots()2454 void GenericScheduler::registerRoots() {
2455   Rem.CriticalPath = DAG->ExitSU.getDepth();
2456 
2457   // Some roots may not feed into ExitSU. Check all of them in case.
2458   for (std::vector<SUnit*>::const_iterator
2459          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2460     if ((*I)->getDepth() > Rem.CriticalPath)
2461       Rem.CriticalPath = (*I)->getDepth();
2462   }
2463   DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
2464 
2465   if (EnableCyclicPath) {
2466     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2467     checkAcyclicLatency();
2468   }
2469 }
2470 
tryPressure(const PressureChange & TryP,const PressureChange & CandP,GenericSchedulerBase::SchedCandidate & TryCand,GenericSchedulerBase::SchedCandidate & Cand,GenericSchedulerBase::CandReason Reason)2471 static bool tryPressure(const PressureChange &TryP,
2472                         const PressureChange &CandP,
2473                         GenericSchedulerBase::SchedCandidate &TryCand,
2474                         GenericSchedulerBase::SchedCandidate &Cand,
2475                         GenericSchedulerBase::CandReason Reason) {
2476   int TryRank = TryP.getPSetOrMax();
2477   int CandRank = CandP.getPSetOrMax();
2478   // If both candidates affect the same set, go with the smallest increase.
2479   if (TryRank == CandRank) {
2480     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2481                    Reason);
2482   }
2483   // If one candidate decreases and the other increases, go with it.
2484   // Invalid candidates have UnitInc==0.
2485   if (tryLess(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2486               Reason)) {
2487     return true;
2488   }
2489   // If the candidates are decreasing pressure, reverse priority.
2490   if (TryP.getUnitInc() < 0)
2491     std::swap(TryRank, CandRank);
2492   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2493 }
2494 
getWeakLeft(const SUnit * SU,bool isTop)2495 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2496   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2497 }
2498 
2499 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2500 /// their physreg def/use.
2501 ///
2502 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2503 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2504 /// with the operation that produces or consumes the physreg. We'll do this when
2505 /// regalloc has support for parallel copies.
biasPhysRegCopy(const SUnit * SU,bool isTop)2506 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2507   const MachineInstr *MI = SU->getInstr();
2508   if (!MI->isCopy())
2509     return 0;
2510 
2511   unsigned ScheduledOper = isTop ? 1 : 0;
2512   unsigned UnscheduledOper = isTop ? 0 : 1;
2513   // If we have already scheduled the physreg produce/consumer, immediately
2514   // schedule the copy.
2515   if (TargetRegisterInfo::isPhysicalRegister(
2516         MI->getOperand(ScheduledOper).getReg()))
2517     return 1;
2518   // If the physreg is at the boundary, defer it. Otherwise schedule it
2519   // immediately to free the dependent. We can hoist the copy later.
2520   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2521   if (TargetRegisterInfo::isPhysicalRegister(
2522         MI->getOperand(UnscheduledOper).getReg()))
2523     return AtBoundary ? -1 : 1;
2524   return 0;
2525 }
2526 
2527 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2528 /// hierarchical. This may be more efficient than a graduated cost model because
2529 /// we don't need to evaluate all aspects of the model for each node in the
2530 /// queue. But it's really done to make the heuristics easier to debug and
2531 /// statistically analyze.
2532 ///
2533 /// \param Cand provides the policy and current best candidate.
2534 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2535 /// \param Zone describes the scheduled zone that we are extending.
2536 /// \param RPTracker describes reg pressure within the scheduled zone.
2537 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand,SchedBoundary & Zone,const RegPressureTracker & RPTracker,RegPressureTracker & TempTracker)2538 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2539                                     SchedCandidate &TryCand,
2540                                     SchedBoundary &Zone,
2541                                     const RegPressureTracker &RPTracker,
2542                                     RegPressureTracker &TempTracker) {
2543 
2544   if (DAG->isTrackingPressure()) {
2545     // Always initialize TryCand's RPDelta.
2546     if (Zone.isTop()) {
2547       TempTracker.getMaxDownwardPressureDelta(
2548         TryCand.SU->getInstr(),
2549         TryCand.RPDelta,
2550         DAG->getRegionCriticalPSets(),
2551         DAG->getRegPressure().MaxSetPressure);
2552     }
2553     else {
2554       if (VerifyScheduling) {
2555         TempTracker.getMaxUpwardPressureDelta(
2556           TryCand.SU->getInstr(),
2557           &DAG->getPressureDiff(TryCand.SU),
2558           TryCand.RPDelta,
2559           DAG->getRegionCriticalPSets(),
2560           DAG->getRegPressure().MaxSetPressure);
2561       }
2562       else {
2563         RPTracker.getUpwardPressureDelta(
2564           TryCand.SU->getInstr(),
2565           DAG->getPressureDiff(TryCand.SU),
2566           TryCand.RPDelta,
2567           DAG->getRegionCriticalPSets(),
2568           DAG->getRegPressure().MaxSetPressure);
2569       }
2570     }
2571   }
2572   DEBUG(if (TryCand.RPDelta.Excess.isValid())
2573           dbgs() << "  SU(" << TryCand.SU->NodeNum << ") "
2574                  << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
2575                  << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
2576 
2577   // Initialize the candidate if needed.
2578   if (!Cand.isValid()) {
2579     TryCand.Reason = NodeOrder;
2580     return;
2581   }
2582 
2583   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2584                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2585                  TryCand, Cand, PhysRegCopy))
2586     return;
2587 
2588   // Avoid exceeding the target's limit. If signed PSetID is negative, it is
2589   // invalid; convert it to INT_MAX to give it lowest priority.
2590   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2591                                                Cand.RPDelta.Excess,
2592                                                TryCand, Cand, RegExcess))
2593     return;
2594 
2595   // Avoid increasing the max critical pressure in the scheduled region.
2596   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2597                                                Cand.RPDelta.CriticalMax,
2598                                                TryCand, Cand, RegCritical))
2599     return;
2600 
2601   // For loops that are acyclic path limited, aggressively schedule for latency.
2602   // This can result in very long dependence chains scheduled in sequence, so
2603   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2604   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2605       && tryLatency(TryCand, Cand, Zone))
2606     return;
2607 
2608   // Prioritize instructions that read unbuffered resources by stall cycles.
2609   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2610               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2611     return;
2612 
2613   // Keep clustered nodes together to encourage downstream peephole
2614   // optimizations which may reduce resource requirements.
2615   //
2616   // This is a best effort to set things up for a post-RA pass. Optimizations
2617   // like generating loads of multiple registers should ideally be done within
2618   // the scheduler pass by combining the loads during DAG postprocessing.
2619   const SUnit *NextClusterSU =
2620     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2621   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2622                  TryCand, Cand, Cluster))
2623     return;
2624 
2625   // Weak edges are for clustering and other constraints.
2626   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2627               getWeakLeft(Cand.SU, Zone.isTop()),
2628               TryCand, Cand, Weak)) {
2629     return;
2630   }
2631   // Avoid increasing the max pressure of the entire region.
2632   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2633                                                Cand.RPDelta.CurrentMax,
2634                                                TryCand, Cand, RegMax))
2635     return;
2636 
2637   // Avoid critical resource consumption and balance the schedule.
2638   TryCand.initResourceDelta(DAG, SchedModel);
2639   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2640               TryCand, Cand, ResourceReduce))
2641     return;
2642   if (tryGreater(TryCand.ResDelta.DemandedResources,
2643                  Cand.ResDelta.DemandedResources,
2644                  TryCand, Cand, ResourceDemand))
2645     return;
2646 
2647   // Avoid serializing long latency dependence chains.
2648   // For acyclic path limited loops, latency was already checked above.
2649   if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited
2650       && tryLatency(TryCand, Cand, Zone)) {
2651     return;
2652   }
2653 
2654   // Prefer immediate defs/users of the last scheduled instruction. This is a
2655   // local pressure avoidance strategy that also makes the machine code
2656   // readable.
2657   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2658                  TryCand, Cand, NextDefUse))
2659     return;
2660 
2661   // Fall through to original instruction order.
2662   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2663       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2664     TryCand.Reason = NodeOrder;
2665   }
2666 }
2667 
2668 /// Pick the best candidate from the queue.
2669 ///
2670 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2671 /// DAG building. To adjust for the current scheduling location we need to
2672 /// maintain the number of vreg uses remaining to be top-scheduled.
pickNodeFromQueue(SchedBoundary & Zone,const RegPressureTracker & RPTracker,SchedCandidate & Cand)2673 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2674                                          const RegPressureTracker &RPTracker,
2675                                          SchedCandidate &Cand) {
2676   ReadyQueue &Q = Zone.Available;
2677 
2678   DEBUG(Q.dump());
2679 
2680   // getMaxPressureDelta temporarily modifies the tracker.
2681   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2682 
2683   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2684 
2685     SchedCandidate TryCand(Cand.Policy);
2686     TryCand.SU = *I;
2687     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2688     if (TryCand.Reason != NoCand) {
2689       // Initialize resource delta if needed in case future heuristics query it.
2690       if (TryCand.ResDelta == SchedResourceDelta())
2691         TryCand.initResourceDelta(DAG, SchedModel);
2692       Cand.setBest(TryCand);
2693       DEBUG(traceCandidate(Cand));
2694     }
2695   }
2696 }
2697 
2698 /// Pick the best candidate node from either the top or bottom queue.
pickNodeBidirectional(bool & IsTopNode)2699 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2700   // Schedule as far as possible in the direction of no choice. This is most
2701   // efficient, but also provides the best heuristics for CriticalPSets.
2702   if (SUnit *SU = Bot.pickOnlyChoice()) {
2703     IsTopNode = false;
2704     DEBUG(dbgs() << "Pick Bot NOCAND\n");
2705     return SU;
2706   }
2707   if (SUnit *SU = Top.pickOnlyChoice()) {
2708     IsTopNode = true;
2709     DEBUG(dbgs() << "Pick Top NOCAND\n");
2710     return SU;
2711   }
2712   CandPolicy NoPolicy;
2713   SchedCandidate BotCand(NoPolicy);
2714   SchedCandidate TopCand(NoPolicy);
2715   // Set the bottom-up policy based on the state of the current bottom zone and
2716   // the instructions outside the zone, including the top zone.
2717   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2718   // Set the top-down policy based on the state of the current top zone and
2719   // the instructions outside the zone, including the bottom zone.
2720   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2721 
2722   // Prefer bottom scheduling when heuristics are silent.
2723   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2724   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2725 
2726   // If either Q has a single candidate that provides the least increase in
2727   // Excess pressure, we can immediately schedule from that Q.
2728   //
2729   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2730   // affects picking from either Q. If scheduling in one direction must
2731   // increase pressure for one of the excess PSets, then schedule in that
2732   // direction first to provide more freedom in the other direction.
2733   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2734       || (BotCand.Reason == RegCritical
2735           && !BotCand.isRepeat(RegCritical)))
2736   {
2737     IsTopNode = false;
2738     tracePick(BotCand, IsTopNode);
2739     return BotCand.SU;
2740   }
2741   // Check if the top Q has a better candidate.
2742   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2743   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2744 
2745   // Choose the queue with the most important (lowest enum) reason.
2746   if (TopCand.Reason < BotCand.Reason) {
2747     IsTopNode = true;
2748     tracePick(TopCand, IsTopNode);
2749     return TopCand.SU;
2750   }
2751   // Otherwise prefer the bottom candidate, in node order if all else failed.
2752   IsTopNode = false;
2753   tracePick(BotCand, IsTopNode);
2754   return BotCand.SU;
2755 }
2756 
2757 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
pickNode(bool & IsTopNode)2758 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2759   if (DAG->top() == DAG->bottom()) {
2760     assert(Top.Available.empty() && Top.Pending.empty() &&
2761            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2762     return nullptr;
2763   }
2764   SUnit *SU;
2765   do {
2766     if (RegionPolicy.OnlyTopDown) {
2767       SU = Top.pickOnlyChoice();
2768       if (!SU) {
2769         CandPolicy NoPolicy;
2770         SchedCandidate TopCand(NoPolicy);
2771         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2772         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2773         tracePick(TopCand, true);
2774         SU = TopCand.SU;
2775       }
2776       IsTopNode = true;
2777     }
2778     else if (RegionPolicy.OnlyBottomUp) {
2779       SU = Bot.pickOnlyChoice();
2780       if (!SU) {
2781         CandPolicy NoPolicy;
2782         SchedCandidate BotCand(NoPolicy);
2783         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2784         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2785         tracePick(BotCand, false);
2786         SU = BotCand.SU;
2787       }
2788       IsTopNode = false;
2789     }
2790     else {
2791       SU = pickNodeBidirectional(IsTopNode);
2792     }
2793   } while (SU->isScheduled);
2794 
2795   if (SU->isTopReady())
2796     Top.removeReady(SU);
2797   if (SU->isBottomReady())
2798     Bot.removeReady(SU);
2799 
2800   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2801   return SU;
2802 }
2803 
reschedulePhysRegCopies(SUnit * SU,bool isTop)2804 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2805 
2806   MachineBasicBlock::iterator InsertPos = SU->getInstr();
2807   if (!isTop)
2808     ++InsertPos;
2809   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2810 
2811   // Find already scheduled copies with a single physreg dependence and move
2812   // them just above the scheduled instruction.
2813   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2814        I != E; ++I) {
2815     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2816       continue;
2817     SUnit *DepSU = I->getSUnit();
2818     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2819       continue;
2820     MachineInstr *Copy = DepSU->getInstr();
2821     if (!Copy->isCopy())
2822       continue;
2823     DEBUG(dbgs() << "  Rescheduling physreg copy ";
2824           I->getSUnit()->dump(DAG));
2825     DAG->moveInstruction(Copy, InsertPos);
2826   }
2827 }
2828 
2829 /// Update the scheduler's state after scheduling a node. This is the same node
2830 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
2831 /// update it's state based on the current cycle before MachineSchedStrategy
2832 /// does.
2833 ///
2834 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2835 /// them here. See comments in biasPhysRegCopy.
schedNode(SUnit * SU,bool IsTopNode)2836 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2837   if (IsTopNode) {
2838     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
2839     Top.bumpNode(SU);
2840     if (SU->hasPhysRegUses)
2841       reschedulePhysRegCopies(SU, true);
2842   }
2843   else {
2844     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
2845     Bot.bumpNode(SU);
2846     if (SU->hasPhysRegDefs)
2847       reschedulePhysRegCopies(SU, false);
2848   }
2849 }
2850 
2851 /// Create the standard converging machine scheduler. This will be used as the
2852 /// default scheduler if the target does not set a default.
createGenericSchedLive(MachineSchedContext * C)2853 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
2854   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
2855   // Register DAG post-processors.
2856   //
2857   // FIXME: extend the mutation API to allow earlier mutations to instantiate
2858   // data and pass it to later mutations. Have a single mutation that gathers
2859   // the interesting nodes in one pass.
2860   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
2861   if (EnableLoadCluster && DAG->TII->enableClusterLoads())
2862     DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
2863   if (EnableMacroFusion)
2864     DAG->addMutation(make_unique<MacroFusion>(DAG->TII));
2865   return DAG;
2866 }
2867 
2868 static MachineSchedRegistry
2869 GenericSchedRegistry("converge", "Standard converging scheduler.",
2870                      createGenericSchedLive);
2871 
2872 //===----------------------------------------------------------------------===//
2873 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
2874 //===----------------------------------------------------------------------===//
2875 
initialize(ScheduleDAGMI * Dag)2876 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
2877   DAG = Dag;
2878   SchedModel = DAG->getSchedModel();
2879   TRI = DAG->TRI;
2880 
2881   Rem.init(DAG, SchedModel);
2882   Top.init(DAG, SchedModel, &Rem);
2883   BotRoots.clear();
2884 
2885   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
2886   // or are disabled, then these HazardRecs will be disabled.
2887   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2888   const TargetMachine &TM = DAG->MF.getTarget();
2889   if (!Top.HazardRec) {
2890     Top.HazardRec =
2891       TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
2892   }
2893 }
2894 
2895 
registerRoots()2896 void PostGenericScheduler::registerRoots() {
2897   Rem.CriticalPath = DAG->ExitSU.getDepth();
2898 
2899   // Some roots may not feed into ExitSU. Check all of them in case.
2900   for (SmallVectorImpl<SUnit*>::const_iterator
2901          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
2902     if ((*I)->getDepth() > Rem.CriticalPath)
2903       Rem.CriticalPath = (*I)->getDepth();
2904   }
2905   DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
2906 }
2907 
2908 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
2909 ///
2910 /// \param Cand provides the policy and current best candidate.
2911 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
tryCandidate(SchedCandidate & Cand,SchedCandidate & TryCand)2912 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
2913                                         SchedCandidate &TryCand) {
2914 
2915   // Initialize the candidate if needed.
2916   if (!Cand.isValid()) {
2917     TryCand.Reason = NodeOrder;
2918     return;
2919   }
2920 
2921   // Prioritize instructions that read unbuffered resources by stall cycles.
2922   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
2923               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2924     return;
2925 
2926   // Avoid critical resource consumption and balance the schedule.
2927   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2928               TryCand, Cand, ResourceReduce))
2929     return;
2930   if (tryGreater(TryCand.ResDelta.DemandedResources,
2931                  Cand.ResDelta.DemandedResources,
2932                  TryCand, Cand, ResourceDemand))
2933     return;
2934 
2935   // Avoid serializing long latency dependence chains.
2936   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
2937     return;
2938   }
2939 
2940   // Fall through to original instruction order.
2941   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
2942     TryCand.Reason = NodeOrder;
2943 }
2944 
pickNodeFromQueue(SchedCandidate & Cand)2945 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
2946   ReadyQueue &Q = Top.Available;
2947 
2948   DEBUG(Q.dump());
2949 
2950   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2951     SchedCandidate TryCand(Cand.Policy);
2952     TryCand.SU = *I;
2953     TryCand.initResourceDelta(DAG, SchedModel);
2954     tryCandidate(Cand, TryCand);
2955     if (TryCand.Reason != NoCand) {
2956       Cand.setBest(TryCand);
2957       DEBUG(traceCandidate(Cand));
2958     }
2959   }
2960 }
2961 
2962 /// Pick the next node to schedule.
pickNode(bool & IsTopNode)2963 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
2964   if (DAG->top() == DAG->bottom()) {
2965     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
2966     return nullptr;
2967   }
2968   SUnit *SU;
2969   do {
2970     SU = Top.pickOnlyChoice();
2971     if (!SU) {
2972       CandPolicy NoPolicy;
2973       SchedCandidate TopCand(NoPolicy);
2974       // Set the top-down policy based on the state of the current top zone and
2975       // the instructions outside the zone, including the bottom zone.
2976       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
2977       pickNodeFromQueue(TopCand);
2978       assert(TopCand.Reason != NoCand && "failed to find a candidate");
2979       tracePick(TopCand, true);
2980       SU = TopCand.SU;
2981     }
2982   } while (SU->isScheduled);
2983 
2984   IsTopNode = true;
2985   Top.removeReady(SU);
2986 
2987   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2988   return SU;
2989 }
2990 
2991 /// Called after ScheduleDAGMI has scheduled an instruction and updated
2992 /// scheduled/remaining flags in the DAG nodes.
schedNode(SUnit * SU,bool IsTopNode)2993 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2994   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
2995   Top.bumpNode(SU);
2996 }
2997 
2998 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
createGenericSchedPostRA(MachineSchedContext * C)2999 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3000   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3001 }
3002 
3003 //===----------------------------------------------------------------------===//
3004 // ILP Scheduler. Currently for experimental analysis of heuristics.
3005 //===----------------------------------------------------------------------===//
3006 
3007 namespace {
3008 /// \brief Order nodes by the ILP metric.
3009 struct ILPOrder {
3010   const SchedDFSResult *DFSResult;
3011   const BitVector *ScheduledTrees;
3012   bool MaximizeILP;
3013 
ILPOrder__anonfc890aee0511::ILPOrder3014   ILPOrder(bool MaxILP)
3015     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3016 
3017   /// \brief Apply a less-than relation on node priority.
3018   ///
3019   /// (Return true if A comes after B in the Q.)
operator ()__anonfc890aee0511::ILPOrder3020   bool operator()(const SUnit *A, const SUnit *B) const {
3021     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3022     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3023     if (SchedTreeA != SchedTreeB) {
3024       // Unscheduled trees have lower priority.
3025       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3026         return ScheduledTrees->test(SchedTreeB);
3027 
3028       // Trees with shallower connections have have lower priority.
3029       if (DFSResult->getSubtreeLevel(SchedTreeA)
3030           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3031         return DFSResult->getSubtreeLevel(SchedTreeA)
3032           < DFSResult->getSubtreeLevel(SchedTreeB);
3033       }
3034     }
3035     if (MaximizeILP)
3036       return DFSResult->getILP(A) < DFSResult->getILP(B);
3037     else
3038       return DFSResult->getILP(A) > DFSResult->getILP(B);
3039   }
3040 };
3041 
3042 /// \brief Schedule based on the ILP metric.
3043 class ILPScheduler : public MachineSchedStrategy {
3044   ScheduleDAGMILive *DAG;
3045   ILPOrder Cmp;
3046 
3047   std::vector<SUnit*> ReadyQ;
3048 public:
ILPScheduler(bool MaximizeILP)3049   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3050 
initialize(ScheduleDAGMI * dag)3051   void initialize(ScheduleDAGMI *dag) override {
3052     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3053     DAG = static_cast<ScheduleDAGMILive*>(dag);
3054     DAG->computeDFSResult();
3055     Cmp.DFSResult = DAG->getDFSResult();
3056     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3057     ReadyQ.clear();
3058   }
3059 
registerRoots()3060   void registerRoots() override {
3061     // Restore the heap in ReadyQ with the updated DFS results.
3062     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3063   }
3064 
3065   /// Implement MachineSchedStrategy interface.
3066   /// -----------------------------------------
3067 
3068   /// Callback to select the highest priority node from the ready Q.
pickNode(bool & IsTopNode)3069   SUnit *pickNode(bool &IsTopNode) override {
3070     if (ReadyQ.empty()) return nullptr;
3071     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3072     SUnit *SU = ReadyQ.back();
3073     ReadyQ.pop_back();
3074     IsTopNode = false;
3075     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3076           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3077           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3078           << DAG->getDFSResult()->getSubtreeLevel(
3079             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3080           << "Scheduling " << *SU->getInstr());
3081     return SU;
3082   }
3083 
3084   /// \brief Scheduler callback to notify that a new subtree is scheduled.
scheduleTree(unsigned SubtreeID)3085   void scheduleTree(unsigned SubtreeID) override {
3086     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3087   }
3088 
3089   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3090   /// DFSResults, and resort the priority Q.
schedNode(SUnit * SU,bool IsTopNode)3091   void schedNode(SUnit *SU, bool IsTopNode) override {
3092     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3093   }
3094 
releaseTopNode(SUnit *)3095   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3096 
releaseBottomNode(SUnit * SU)3097   void releaseBottomNode(SUnit *SU) override {
3098     ReadyQ.push_back(SU);
3099     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3100   }
3101 };
3102 } // namespace
3103 
createILPMaxScheduler(MachineSchedContext * C)3104 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3105   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3106 }
createILPMinScheduler(MachineSchedContext * C)3107 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3108   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3109 }
3110 static MachineSchedRegistry ILPMaxRegistry(
3111   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3112 static MachineSchedRegistry ILPMinRegistry(
3113   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3114 
3115 //===----------------------------------------------------------------------===//
3116 // Machine Instruction Shuffler for Correctness Testing
3117 //===----------------------------------------------------------------------===//
3118 
3119 #ifndef NDEBUG
3120 namespace {
3121 /// Apply a less-than relation on the node order, which corresponds to the
3122 /// instruction order prior to scheduling. IsReverse implements greater-than.
3123 template<bool IsReverse>
3124 struct SUnitOrder {
operator ()__anonfc890aee0611::SUnitOrder3125   bool operator()(SUnit *A, SUnit *B) const {
3126     if (IsReverse)
3127       return A->NodeNum > B->NodeNum;
3128     else
3129       return A->NodeNum < B->NodeNum;
3130   }
3131 };
3132 
3133 /// Reorder instructions as much as possible.
3134 class InstructionShuffler : public MachineSchedStrategy {
3135   bool IsAlternating;
3136   bool IsTopDown;
3137 
3138   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3139   // gives nodes with a higher number higher priority causing the latest
3140   // instructions to be scheduled first.
3141   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3142     TopQ;
3143   // When scheduling bottom-up, use greater-than as the queue priority.
3144   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3145     BottomQ;
3146 public:
InstructionShuffler(bool alternate,bool topdown)3147   InstructionShuffler(bool alternate, bool topdown)
3148     : IsAlternating(alternate), IsTopDown(topdown) {}
3149 
initialize(ScheduleDAGMI *)3150   void initialize(ScheduleDAGMI*) override {
3151     TopQ.clear();
3152     BottomQ.clear();
3153   }
3154 
3155   /// Implement MachineSchedStrategy interface.
3156   /// -----------------------------------------
3157 
pickNode(bool & IsTopNode)3158   SUnit *pickNode(bool &IsTopNode) override {
3159     SUnit *SU;
3160     if (IsTopDown) {
3161       do {
3162         if (TopQ.empty()) return nullptr;
3163         SU = TopQ.top();
3164         TopQ.pop();
3165       } while (SU->isScheduled);
3166       IsTopNode = true;
3167     }
3168     else {
3169       do {
3170         if (BottomQ.empty()) return nullptr;
3171         SU = BottomQ.top();
3172         BottomQ.pop();
3173       } while (SU->isScheduled);
3174       IsTopNode = false;
3175     }
3176     if (IsAlternating)
3177       IsTopDown = !IsTopDown;
3178     return SU;
3179   }
3180 
schedNode(SUnit * SU,bool IsTopNode)3181   void schedNode(SUnit *SU, bool IsTopNode) override {}
3182 
releaseTopNode(SUnit * SU)3183   void releaseTopNode(SUnit *SU) override {
3184     TopQ.push(SU);
3185   }
releaseBottomNode(SUnit * SU)3186   void releaseBottomNode(SUnit *SU) override {
3187     BottomQ.push(SU);
3188   }
3189 };
3190 } // namespace
3191 
createInstructionShuffler(MachineSchedContext * C)3192 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3193   bool Alternate = !ForceTopDown && !ForceBottomUp;
3194   bool TopDown = !ForceBottomUp;
3195   assert((TopDown || !ForceTopDown) &&
3196          "-misched-topdown incompatible with -misched-bottomup");
3197   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3198 }
3199 static MachineSchedRegistry ShufflerRegistry(
3200   "shuffle", "Shuffle machine instructions alternating directions",
3201   createInstructionShuffler);
3202 #endif // !NDEBUG
3203 
3204 //===----------------------------------------------------------------------===//
3205 // GraphWriter support for ScheduleDAGMILive.
3206 //===----------------------------------------------------------------------===//
3207 
3208 #ifndef NDEBUG
3209 namespace llvm {
3210 
3211 template<> struct GraphTraits<
3212   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3213 
3214 template<>
3215 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3216 
DOTGraphTraitsllvm::DOTGraphTraits3217   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3218 
getGraphNamellvm::DOTGraphTraits3219   static std::string getGraphName(const ScheduleDAG *G) {
3220     return G->MF.getName();
3221   }
3222 
renderGraphFromBottomUpllvm::DOTGraphTraits3223   static bool renderGraphFromBottomUp() {
3224     return true;
3225   }
3226 
isNodeHiddenllvm::DOTGraphTraits3227   static bool isNodeHidden(const SUnit *Node) {
3228     return (Node->Preds.size() > 10 || Node->Succs.size() > 10);
3229   }
3230 
hasNodeAddressLabelllvm::DOTGraphTraits3231   static bool hasNodeAddressLabel(const SUnit *Node,
3232                                   const ScheduleDAG *Graph) {
3233     return false;
3234   }
3235 
3236   /// If you want to override the dot attributes printed for a particular
3237   /// edge, override this method.
getEdgeAttributesllvm::DOTGraphTraits3238   static std::string getEdgeAttributes(const SUnit *Node,
3239                                        SUnitIterator EI,
3240                                        const ScheduleDAG *Graph) {
3241     if (EI.isArtificialDep())
3242       return "color=cyan,style=dashed";
3243     if (EI.isCtrlDep())
3244       return "color=blue,style=dashed";
3245     return "";
3246   }
3247 
getNodeLabelllvm::DOTGraphTraits3248   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3249     std::string Str;
3250     raw_string_ostream SS(Str);
3251     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3252     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3253       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3254     SS << "SU:" << SU->NodeNum;
3255     if (DFS)
3256       SS << " I:" << DFS->getNumInstrs(SU);
3257     return SS.str();
3258   }
getNodeDescriptionllvm::DOTGraphTraits3259   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3260     return G->getGraphNodeLabel(SU);
3261   }
3262 
getNodeAttributesllvm::DOTGraphTraits3263   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3264     std::string Str("shape=Mrecord");
3265     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3266     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3267       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3268     if (DFS) {
3269       Str += ",style=filled,fillcolor=\"#";
3270       Str += DOT::getColorString(DFS->getSubtreeID(N));
3271       Str += '"';
3272     }
3273     return Str;
3274   }
3275 };
3276 } // namespace llvm
3277 #endif // NDEBUG
3278 
3279 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3280 /// rendered using 'dot'.
3281 ///
viewGraph(const Twine & Name,const Twine & Title)3282 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3283 #ifndef NDEBUG
3284   ViewGraph(this, Name, false, Title);
3285 #else
3286   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3287          << "systems with Graphviz or gv!\n";
3288 #endif  // NDEBUG
3289 }
3290 
3291 /// Out-of-line implementation with no arguments is handy for gdb.
viewGraph()3292 void ScheduleDAGMI::viewGraph() {
3293   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3294 }
3295