1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
14 //
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
18 //
19 //===----------------------------------------------------------------------===//
20
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "llvm/CodeGen/Passes.h"
23 #include "AggressiveAntiDepBreaker.h"
24 #include "AntiDepBreaker.h"
25 #include "CriticalAntiDepBreaker.h"
26 #include "llvm/ADT/BitVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/CodeGen/LatencyPriorityQueue.h"
30 #include "llvm/CodeGen/MachineDominators.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/RegisterClassInfo.h"
37 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
38 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
39 #include "llvm/CodeGen/SchedulerRegistry.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetLowering.h"
46 #include "llvm/Target/TargetMachine.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSubtargetInfo.h"
49 using namespace llvm;
50
51 STATISTIC(NumNoops, "Number of noops inserted");
52 STATISTIC(NumStalls, "Number of pipeline stalls");
53 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
54
55 // Post-RA scheduling is enabled with
56 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
57 // override the target.
58 static cl::opt<bool>
59 EnablePostRAScheduler("post-RA-scheduler",
60 cl::desc("Enable scheduling after register allocation"),
61 cl::init(false), cl::Hidden);
62 static cl::opt<std::string>
63 EnableAntiDepBreaking("break-anti-dependencies",
64 cl::desc("Break post-RA scheduling anti-dependencies: "
65 "\"critical\", \"all\", or \"none\""),
66 cl::init("none"), cl::Hidden);
67
68 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
69 static cl::opt<int>
70 DebugDiv("postra-sched-debugdiv",
71 cl::desc("Debug control MBBs that are scheduled"),
72 cl::init(0), cl::Hidden);
73 static cl::opt<int>
74 DebugMod("postra-sched-debugmod",
75 cl::desc("Debug control MBBs that are scheduled"),
76 cl::init(0), cl::Hidden);
77
~AntiDepBreaker()78 AntiDepBreaker::~AntiDepBreaker() { }
79
80 namespace {
81 class PostRAScheduler : public MachineFunctionPass {
82 const TargetInstrInfo *TII;
83 RegisterClassInfo RegClassInfo;
84
85 public:
86 static char ID;
PostRAScheduler()87 PostRAScheduler() : MachineFunctionPass(ID) {}
88
getAnalysisUsage(AnalysisUsage & AU) const89 void getAnalysisUsage(AnalysisUsage &AU) const {
90 AU.setPreservesCFG();
91 AU.addRequired<AliasAnalysis>();
92 AU.addRequired<TargetPassConfig>();
93 AU.addRequired<MachineDominatorTree>();
94 AU.addPreserved<MachineDominatorTree>();
95 AU.addRequired<MachineLoopInfo>();
96 AU.addPreserved<MachineLoopInfo>();
97 MachineFunctionPass::getAnalysisUsage(AU);
98 }
99
100 bool runOnMachineFunction(MachineFunction &Fn);
101 };
102 char PostRAScheduler::ID = 0;
103
104 class SchedulePostRATDList : public ScheduleDAGInstrs {
105 /// AvailableQueue - The priority queue to use for the available SUnits.
106 ///
107 LatencyPriorityQueue AvailableQueue;
108
109 /// PendingQueue - This contains all of the instructions whose operands have
110 /// been issued, but their results are not ready yet (due to the latency of
111 /// the operation). Once the operands becomes available, the instruction is
112 /// added to the AvailableQueue.
113 std::vector<SUnit*> PendingQueue;
114
115 /// HazardRec - The hazard recognizer to use.
116 ScheduleHazardRecognizer *HazardRec;
117
118 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
119 AntiDepBreaker *AntiDepBreak;
120
121 /// AA - AliasAnalysis for making memory reference queries.
122 AliasAnalysis *AA;
123
124 /// LiveRegs - true if the register is live.
125 BitVector LiveRegs;
126
127 /// The schedule. Null SUnit*'s represent noop instructions.
128 std::vector<SUnit*> Sequence;
129
130 public:
131 SchedulePostRATDList(
132 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
133 AliasAnalysis *AA, const RegisterClassInfo&,
134 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
135 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs);
136
137 ~SchedulePostRATDList();
138
139 /// startBlock - Initialize register live-range state for scheduling in
140 /// this block.
141 ///
142 void startBlock(MachineBasicBlock *BB);
143
144 /// Initialize the scheduler state for the next scheduling region.
145 virtual void enterRegion(MachineBasicBlock *bb,
146 MachineBasicBlock::iterator begin,
147 MachineBasicBlock::iterator end,
148 unsigned endcount);
149
150 /// Notify that the scheduler has finished scheduling the current region.
151 virtual void exitRegion();
152
153 /// Schedule - Schedule the instruction range using list scheduling.
154 ///
155 void schedule();
156
157 void EmitSchedule();
158
159 /// Observe - Update liveness information to account for the current
160 /// instruction, which will not be scheduled.
161 ///
162 void Observe(MachineInstr *MI, unsigned Count);
163
164 /// finishBlock - Clean up register live-range state.
165 ///
166 void finishBlock();
167
168 /// FixupKills - Fix register kill flags that have been made
169 /// invalid due to scheduling
170 ///
171 void FixupKills(MachineBasicBlock *MBB);
172
173 private:
174 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
175 void ReleaseSuccessors(SUnit *SU);
176 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
177 void ListScheduleTopDown();
178 void StartBlockForKills(MachineBasicBlock *BB);
179
180 // ToggleKillFlag - Toggle a register operand kill flag. Other
181 // adjustments may be made to the instruction if necessary. Return
182 // true if the operand has been deleted, false if not.
183 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
184
185 void dumpSchedule() const;
186 };
187 }
188
189 char &llvm::PostRASchedulerID = PostRAScheduler::ID;
190
191 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched",
192 "Post RA top-down list latency scheduler", false, false)
193
SchedulePostRATDList(MachineFunction & MF,MachineLoopInfo & MLI,MachineDominatorTree & MDT,AliasAnalysis * AA,const RegisterClassInfo & RCI,TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,SmallVectorImpl<const TargetRegisterClass * > & CriticalPathRCs)194 SchedulePostRATDList::SchedulePostRATDList(
195 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
196 AliasAnalysis *AA, const RegisterClassInfo &RCI,
197 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
198 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs)
199 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA),
200 LiveRegs(TRI->getNumRegs())
201 {
202 const TargetMachine &TM = MF.getTarget();
203 const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
204 HazardRec =
205 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this);
206
207 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
208 MRI.tracksLiveness()) &&
209 "Live-ins must be accurate for anti-dependency breaking");
210 AntiDepBreak =
211 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
212 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
213 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
214 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL));
215 }
216
~SchedulePostRATDList()217 SchedulePostRATDList::~SchedulePostRATDList() {
218 delete HazardRec;
219 delete AntiDepBreak;
220 }
221
222 /// Initialize state associated with the next scheduling region.
enterRegion(MachineBasicBlock * bb,MachineBasicBlock::iterator begin,MachineBasicBlock::iterator end,unsigned endcount)223 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
224 MachineBasicBlock::iterator begin,
225 MachineBasicBlock::iterator end,
226 unsigned endcount) {
227 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
228 Sequence.clear();
229 }
230
231 /// Print the schedule before exiting the region.
exitRegion()232 void SchedulePostRATDList::exitRegion() {
233 DEBUG({
234 dbgs() << "*** Final schedule ***\n";
235 dumpSchedule();
236 dbgs() << '\n';
237 });
238 ScheduleDAGInstrs::exitRegion();
239 }
240
241 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
242 /// dumpSchedule - dump the scheduled Sequence.
dumpSchedule() const243 void SchedulePostRATDList::dumpSchedule() const {
244 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
245 if (SUnit *SU = Sequence[i])
246 SU->dump(this);
247 else
248 dbgs() << "**** NOOP ****\n";
249 }
250 }
251 #endif
252
runOnMachineFunction(MachineFunction & Fn)253 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
254 TII = Fn.getTarget().getInstrInfo();
255 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
256 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
257 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
258 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
259
260 RegClassInfo.runOnMachineFunction(Fn);
261
262 // Check for explicit enable/disable of post-ra scheduling.
263 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
264 TargetSubtargetInfo::ANTIDEP_NONE;
265 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
266 if (EnablePostRAScheduler.getPosition() > 0) {
267 if (!EnablePostRAScheduler)
268 return false;
269 } else {
270 // Check that post-RA scheduling is enabled for this target.
271 // This may upgrade the AntiDepMode.
272 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>();
273 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode,
274 CriticalPathRCs))
275 return false;
276 }
277
278 // Check for antidep breaking override...
279 if (EnableAntiDepBreaking.getPosition() > 0) {
280 AntiDepMode = (EnableAntiDepBreaking == "all")
281 ? TargetSubtargetInfo::ANTIDEP_ALL
282 : ((EnableAntiDepBreaking == "critical")
283 ? TargetSubtargetInfo::ANTIDEP_CRITICAL
284 : TargetSubtargetInfo::ANTIDEP_NONE);
285 }
286
287 DEBUG(dbgs() << "PostRAScheduler\n");
288
289 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode,
290 CriticalPathRCs);
291
292 // Loop over all of the basic blocks
293 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
294 MBB != MBBe; ++MBB) {
295 #ifndef NDEBUG
296 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
297 if (DebugDiv > 0) {
298 static int bbcnt = 0;
299 if (bbcnt++ % DebugDiv != DebugMod)
300 continue;
301 dbgs() << "*** DEBUG scheduling " << Fn.getName()
302 << ":BB#" << MBB->getNumber() << " ***\n";
303 }
304 #endif
305
306 // Initialize register live-range state for scheduling in this block.
307 Scheduler.startBlock(MBB);
308
309 // Schedule each sequence of instructions not interrupted by a label
310 // or anything else that effectively needs to shut down scheduling.
311 MachineBasicBlock::iterator Current = MBB->end();
312 unsigned Count = MBB->size(), CurrentCount = Count;
313 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
314 MachineInstr *MI = llvm::prior(I);
315 // Calls are not scheduling boundaries before register allocation, but
316 // post-ra we don't gain anything by scheduling across calls since we
317 // don't need to worry about register pressure.
318 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) {
319 Scheduler.enterRegion(MBB, I, Current, CurrentCount);
320 Scheduler.schedule();
321 Scheduler.exitRegion();
322 Scheduler.EmitSchedule();
323 Current = MI;
324 CurrentCount = Count - 1;
325 Scheduler.Observe(MI, CurrentCount);
326 }
327 I = MI;
328 --Count;
329 if (MI->isBundle())
330 Count -= MI->getBundleSize();
331 }
332 assert(Count == 0 && "Instruction count mismatch!");
333 assert((MBB->begin() == Current || CurrentCount != 0) &&
334 "Instruction count mismatch!");
335 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount);
336 Scheduler.schedule();
337 Scheduler.exitRegion();
338 Scheduler.EmitSchedule();
339
340 // Clean up register live-range state.
341 Scheduler.finishBlock();
342
343 // Update register kills
344 Scheduler.FixupKills(MBB);
345 }
346
347 return true;
348 }
349
350 /// StartBlock - Initialize register live-range state for scheduling in
351 /// this block.
352 ///
startBlock(MachineBasicBlock * BB)353 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
354 // Call the superclass.
355 ScheduleDAGInstrs::startBlock(BB);
356
357 // Reset the hazard recognizer and anti-dep breaker.
358 HazardRec->Reset();
359 if (AntiDepBreak != NULL)
360 AntiDepBreak->StartBlock(BB);
361 }
362
363 /// Schedule - Schedule the instruction range using list scheduling.
364 ///
schedule()365 void SchedulePostRATDList::schedule() {
366 // Build the scheduling graph.
367 buildSchedGraph(AA);
368
369 if (AntiDepBreak != NULL) {
370 unsigned Broken =
371 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
372 EndIndex, DbgValues);
373
374 if (Broken != 0) {
375 // We made changes. Update the dependency graph.
376 // Theoretically we could update the graph in place:
377 // When a live range is changed to use a different register, remove
378 // the def's anti-dependence *and* output-dependence edges due to
379 // that register, and add new anti-dependence and output-dependence
380 // edges based on the next live range of the register.
381 ScheduleDAG::clearDAG();
382 buildSchedGraph(AA);
383
384 NumFixedAnti += Broken;
385 }
386 }
387
388 DEBUG(dbgs() << "********** List Scheduling **********\n");
389 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
390 SUnits[su].dumpAll(this));
391
392 AvailableQueue.initNodes(SUnits);
393 ListScheduleTopDown();
394 AvailableQueue.releaseState();
395 }
396
397 /// Observe - Update liveness information to account for the current
398 /// instruction, which will not be scheduled.
399 ///
Observe(MachineInstr * MI,unsigned Count)400 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
401 if (AntiDepBreak != NULL)
402 AntiDepBreak->Observe(MI, Count, EndIndex);
403 }
404
405 /// FinishBlock - Clean up register live-range state.
406 ///
finishBlock()407 void SchedulePostRATDList::finishBlock() {
408 if (AntiDepBreak != NULL)
409 AntiDepBreak->FinishBlock();
410
411 // Call the superclass.
412 ScheduleDAGInstrs::finishBlock();
413 }
414
415 /// StartBlockForKills - Initialize register live-range state for updating kills
416 ///
StartBlockForKills(MachineBasicBlock * BB)417 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
418 // Start with no live registers.
419 LiveRegs.reset();
420
421 // Examine the live-in regs of all successors.
422 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
423 SE = BB->succ_end(); SI != SE; ++SI) {
424 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
425 E = (*SI)->livein_end(); I != E; ++I) {
426 unsigned Reg = *I;
427 LiveRegs.set(Reg);
428 // Repeat, for all subregs.
429 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
430 LiveRegs.set(*SubRegs);
431 }
432 }
433 }
434
ToggleKillFlag(MachineInstr * MI,MachineOperand & MO)435 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
436 MachineOperand &MO) {
437 // Setting kill flag...
438 if (!MO.isKill()) {
439 MO.setIsKill(true);
440 return false;
441 }
442
443 // If MO itself is live, clear the kill flag...
444 if (LiveRegs.test(MO.getReg())) {
445 MO.setIsKill(false);
446 return false;
447 }
448
449 // If any subreg of MO is live, then create an imp-def for that
450 // subreg and keep MO marked as killed.
451 MO.setIsKill(false);
452 bool AllDead = true;
453 const unsigned SuperReg = MO.getReg();
454 MachineInstrBuilder MIB(MF, MI);
455 for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) {
456 if (LiveRegs.test(*SubRegs)) {
457 MIB.addReg(*SubRegs, RegState::ImplicitDefine);
458 AllDead = false;
459 }
460 }
461
462 if(AllDead)
463 MO.setIsKill(true);
464 return false;
465 }
466
467 /// FixupKills - Fix the register kill flags, they may have been made
468 /// incorrect by instruction reordering.
469 ///
FixupKills(MachineBasicBlock * MBB)470 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
471 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
472
473 BitVector killedRegs(TRI->getNumRegs());
474
475 StartBlockForKills(MBB);
476
477 // Examine block from end to start...
478 unsigned Count = MBB->size();
479 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
480 I != E; --Count) {
481 MachineInstr *MI = --I;
482 if (MI->isDebugValue())
483 continue;
484
485 // Update liveness. Registers that are defed but not used in this
486 // instruction are now dead. Mark register and all subregs as they
487 // are completely defined.
488 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
489 MachineOperand &MO = MI->getOperand(i);
490 if (MO.isRegMask())
491 LiveRegs.clearBitsNotInMask(MO.getRegMask());
492 if (!MO.isReg()) continue;
493 unsigned Reg = MO.getReg();
494 if (Reg == 0) continue;
495 if (!MO.isDef()) continue;
496 // Ignore two-addr defs.
497 if (MI->isRegTiedToUseOperand(i)) continue;
498
499 LiveRegs.reset(Reg);
500
501 // Repeat for all subregs.
502 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
503 LiveRegs.reset(*SubRegs);
504 }
505
506 // Examine all used registers and set/clear kill flag. When a
507 // register is used multiple times we only set the kill flag on
508 // the first use.
509 killedRegs.reset();
510 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
511 MachineOperand &MO = MI->getOperand(i);
512 if (!MO.isReg() || !MO.isUse()) continue;
513 unsigned Reg = MO.getReg();
514 if ((Reg == 0) || MRI.isReserved(Reg)) continue;
515
516 bool kill = false;
517 if (!killedRegs.test(Reg)) {
518 kill = true;
519 // A register is not killed if any subregs are live...
520 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
521 if (LiveRegs.test(*SubRegs)) {
522 kill = false;
523 break;
524 }
525 }
526
527 // If subreg is not live, then register is killed if it became
528 // live in this instruction
529 if (kill)
530 kill = !LiveRegs.test(Reg);
531 }
532
533 if (MO.isKill() != kill) {
534 DEBUG(dbgs() << "Fixing " << MO << " in ");
535 // Warning: ToggleKillFlag may invalidate MO.
536 ToggleKillFlag(MI, MO);
537 DEBUG(MI->dump());
538 }
539
540 killedRegs.set(Reg);
541 }
542
543 // Mark any used register (that is not using undef) and subregs as
544 // now live...
545 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
546 MachineOperand &MO = MI->getOperand(i);
547 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
548 unsigned Reg = MO.getReg();
549 if ((Reg == 0) || MRI.isReserved(Reg)) continue;
550
551 LiveRegs.set(Reg);
552
553 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
554 LiveRegs.set(*SubRegs);
555 }
556 }
557 }
558
559 //===----------------------------------------------------------------------===//
560 // Top-Down Scheduling
561 //===----------------------------------------------------------------------===//
562
563 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
564 /// the PendingQueue if the count reaches zero.
ReleaseSucc(SUnit * SU,SDep * SuccEdge)565 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
566 SUnit *SuccSU = SuccEdge->getSUnit();
567
568 if (SuccEdge->isWeak()) {
569 --SuccSU->WeakPredsLeft;
570 return;
571 }
572 #ifndef NDEBUG
573 if (SuccSU->NumPredsLeft == 0) {
574 dbgs() << "*** Scheduling failed! ***\n";
575 SuccSU->dump(this);
576 dbgs() << " has been released too many times!\n";
577 llvm_unreachable(0);
578 }
579 #endif
580 --SuccSU->NumPredsLeft;
581
582 // Standard scheduler algorithms will recompute the depth of the successor
583 // here as such:
584 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
585 //
586 // However, we lazily compute node depth instead. Note that
587 // ScheduleNodeTopDown has already updated the depth of this node which causes
588 // all descendents to be marked dirty. Setting the successor depth explicitly
589 // here would cause depth to be recomputed for all its ancestors. If the
590 // successor is not yet ready (because of a transitively redundant edge) then
591 // this causes depth computation to be quadratic in the size of the DAG.
592
593 // If all the node's predecessors are scheduled, this node is ready
594 // to be scheduled. Ignore the special ExitSU node.
595 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
596 PendingQueue.push_back(SuccSU);
597 }
598
599 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
ReleaseSuccessors(SUnit * SU)600 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
601 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
602 I != E; ++I) {
603 ReleaseSucc(SU, &*I);
604 }
605 }
606
607 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
608 /// count of its successors. If a successor pending count is zero, add it to
609 /// the Available queue.
ScheduleNodeTopDown(SUnit * SU,unsigned CurCycle)610 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
611 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
612 DEBUG(SU->dump(this));
613
614 Sequence.push_back(SU);
615 assert(CurCycle >= SU->getDepth() &&
616 "Node scheduled above its depth!");
617 SU->setDepthToAtLeast(CurCycle);
618
619 ReleaseSuccessors(SU);
620 SU->isScheduled = true;
621 AvailableQueue.scheduledNode(SU);
622 }
623
624 /// ListScheduleTopDown - The main loop of list scheduling for top-down
625 /// schedulers.
ListScheduleTopDown()626 void SchedulePostRATDList::ListScheduleTopDown() {
627 unsigned CurCycle = 0;
628
629 // We're scheduling top-down but we're visiting the regions in
630 // bottom-up order, so we don't know the hazards at the start of a
631 // region. So assume no hazards (this should usually be ok as most
632 // blocks are a single region).
633 HazardRec->Reset();
634
635 // Release any successors of the special Entry node.
636 ReleaseSuccessors(&EntrySU);
637
638 // Add all leaves to Available queue.
639 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
640 // It is available if it has no predecessors.
641 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
642 AvailableQueue.push(&SUnits[i]);
643 SUnits[i].isAvailable = true;
644 }
645 }
646
647 // In any cycle where we can't schedule any instructions, we must
648 // stall or emit a noop, depending on the target.
649 bool CycleHasInsts = false;
650
651 // While Available queue is not empty, grab the node with the highest
652 // priority. If it is not ready put it back. Schedule the node.
653 std::vector<SUnit*> NotReady;
654 Sequence.reserve(SUnits.size());
655 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
656 // Check to see if any of the pending instructions are ready to issue. If
657 // so, add them to the available queue.
658 unsigned MinDepth = ~0u;
659 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
660 if (PendingQueue[i]->getDepth() <= CurCycle) {
661 AvailableQueue.push(PendingQueue[i]);
662 PendingQueue[i]->isAvailable = true;
663 PendingQueue[i] = PendingQueue.back();
664 PendingQueue.pop_back();
665 --i; --e;
666 } else if (PendingQueue[i]->getDepth() < MinDepth)
667 MinDepth = PendingQueue[i]->getDepth();
668 }
669
670 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
671
672 SUnit *FoundSUnit = 0;
673 bool HasNoopHazards = false;
674 while (!AvailableQueue.empty()) {
675 SUnit *CurSUnit = AvailableQueue.pop();
676
677 ScheduleHazardRecognizer::HazardType HT =
678 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
679 if (HT == ScheduleHazardRecognizer::NoHazard) {
680 FoundSUnit = CurSUnit;
681 break;
682 }
683
684 // Remember if this is a noop hazard.
685 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
686
687 NotReady.push_back(CurSUnit);
688 }
689
690 // Add the nodes that aren't ready back onto the available list.
691 if (!NotReady.empty()) {
692 AvailableQueue.push_all(NotReady);
693 NotReady.clear();
694 }
695
696 // If we found a node to schedule...
697 if (FoundSUnit) {
698 // ... schedule the node...
699 ScheduleNodeTopDown(FoundSUnit, CurCycle);
700 HazardRec->EmitInstruction(FoundSUnit);
701 CycleHasInsts = true;
702 if (HazardRec->atIssueLimit()) {
703 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n');
704 HazardRec->AdvanceCycle();
705 ++CurCycle;
706 CycleHasInsts = false;
707 }
708 } else {
709 if (CycleHasInsts) {
710 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
711 HazardRec->AdvanceCycle();
712 } else if (!HasNoopHazards) {
713 // Otherwise, we have a pipeline stall, but no other problem,
714 // just advance the current cycle and try again.
715 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
716 HazardRec->AdvanceCycle();
717 ++NumStalls;
718 } else {
719 // Otherwise, we have no instructions to issue and we have instructions
720 // that will fault if we don't do this right. This is the case for
721 // processors without pipeline interlocks and other cases.
722 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
723 HazardRec->EmitNoop();
724 Sequence.push_back(0); // NULL here means noop
725 ++NumNoops;
726 }
727
728 ++CurCycle;
729 CycleHasInsts = false;
730 }
731 }
732
733 #ifndef NDEBUG
734 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
735 unsigned Noops = 0;
736 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
737 if (!Sequence[i])
738 ++Noops;
739 assert(Sequence.size() - Noops == ScheduledNodes &&
740 "The number of nodes scheduled doesn't match the expected number!");
741 #endif // NDEBUG
742 }
743
744 // EmitSchedule - Emit the machine code in scheduled order.
EmitSchedule()745 void SchedulePostRATDList::EmitSchedule() {
746 RegionBegin = RegionEnd;
747
748 // If first instruction was a DBG_VALUE then put it back.
749 if (FirstDbgValue)
750 BB->splice(RegionEnd, BB, FirstDbgValue);
751
752 // Then re-insert them according to the given schedule.
753 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
754 if (SUnit *SU = Sequence[i])
755 BB->splice(RegionEnd, BB, SU->getInstr());
756 else
757 // Null SUnit* is a noop.
758 TII->insertNoop(*BB, RegionEnd);
759
760 // Update the Begin iterator, as the first instruction in the block
761 // may have been scheduled later.
762 if (i == 0)
763 RegionBegin = prior(RegionEnd);
764 }
765
766 // Reinsert any remaining debug_values.
767 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
768 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
769 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
770 MachineInstr *DbgValue = P.first;
771 MachineBasicBlock::iterator OrigPrivMI = P.second;
772 BB->splice(++OrigPrivMI, BB, DbgValue);
773 }
774 DbgValues.clear();
775 FirstDbgValue = NULL;
776 }
777