1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/CodeGen/SchedulerRegistry.h"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetLowering.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetRegisterInfo.h"
35 #include <climits>
36 using namespace llvm;
37
38 #define DEBUG_TYPE "pre-RA-sched"
39
40 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
41 STATISTIC(NumUnfolds, "Number of nodes unfolded");
42 STATISTIC(NumDups, "Number of duplicated nodes");
43 STATISTIC(NumPRCopies, "Number of physical register copies");
44
45 static RegisterScheduler
46 burrListDAGScheduler("list-burr",
47 "Bottom-up register reduction list scheduling",
48 createBURRListDAGScheduler);
49 static RegisterScheduler
50 sourceListDAGScheduler("source",
51 "Similar to list-burr but schedules in source "
52 "order when possible",
53 createSourceListDAGScheduler);
54
55 static RegisterScheduler
56 hybridListDAGScheduler("list-hybrid",
57 "Bottom-up register pressure aware list scheduling "
58 "which tries to balance latency and register pressure",
59 createHybridListDAGScheduler);
60
61 static RegisterScheduler
62 ILPListDAGScheduler("list-ilp",
63 "Bottom-up register pressure aware list scheduling "
64 "which tries to balance ILP and register pressure",
65 createILPListDAGScheduler);
66
67 static cl::opt<bool> DisableSchedCycles(
68 "disable-sched-cycles", cl::Hidden, cl::init(false),
69 cl::desc("Disable cycle-level precision during preRA scheduling"));
70
71 // Temporary sched=list-ilp flags until the heuristics are robust.
72 // Some options are also available under sched=list-hybrid.
73 static cl::opt<bool> DisableSchedRegPressure(
74 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
75 cl::desc("Disable regpressure priority in sched=list-ilp"));
76 static cl::opt<bool> DisableSchedLiveUses(
77 "disable-sched-live-uses", cl::Hidden, cl::init(true),
78 cl::desc("Disable live use priority in sched=list-ilp"));
79 static cl::opt<bool> DisableSchedVRegCycle(
80 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
81 cl::desc("Disable virtual register cycle interference checks"));
82 static cl::opt<bool> DisableSchedPhysRegJoin(
83 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
84 cl::desc("Disable physreg def-use affinity"));
85 static cl::opt<bool> DisableSchedStalls(
86 "disable-sched-stalls", cl::Hidden, cl::init(true),
87 cl::desc("Disable no-stall priority in sched=list-ilp"));
88 static cl::opt<bool> DisableSchedCriticalPath(
89 "disable-sched-critical-path", cl::Hidden, cl::init(false),
90 cl::desc("Disable critical path priority in sched=list-ilp"));
91 static cl::opt<bool> DisableSchedHeight(
92 "disable-sched-height", cl::Hidden, cl::init(false),
93 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
94 static cl::opt<bool> Disable2AddrHack(
95 "disable-2addr-hack", cl::Hidden, cl::init(true),
96 cl::desc("Disable scheduler's two-address hack"));
97
98 static cl::opt<int> MaxReorderWindow(
99 "max-sched-reorder", cl::Hidden, cl::init(6),
100 cl::desc("Number of instructions to allow ahead of the critical path "
101 "in sched=list-ilp"));
102
103 static cl::opt<unsigned> AvgIPC(
104 "sched-avg-ipc", cl::Hidden, cl::init(1),
105 cl::desc("Average inst/cycle whan no target itinerary exists."));
106
107 namespace {
108 //===----------------------------------------------------------------------===//
109 /// ScheduleDAGRRList - The actual register reduction list scheduler
110 /// implementation. This supports both top-down and bottom-up scheduling.
111 ///
112 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
113 private:
114 /// NeedLatency - True if the scheduler will make use of latency information.
115 ///
116 bool NeedLatency;
117
118 /// AvailableQueue - The priority queue to use for the available SUnits.
119 SchedulingPriorityQueue *AvailableQueue;
120
121 /// PendingQueue - This contains all of the instructions whose operands have
122 /// been issued, but their results are not ready yet (due to the latency of
123 /// the operation). Once the operands becomes available, the instruction is
124 /// added to the AvailableQueue.
125 std::vector<SUnit*> PendingQueue;
126
127 /// HazardRec - The hazard recognizer to use.
128 ScheduleHazardRecognizer *HazardRec;
129
130 /// CurCycle - The current scheduler state corresponds to this cycle.
131 unsigned CurCycle;
132
133 /// MinAvailableCycle - Cycle of the soonest available instruction.
134 unsigned MinAvailableCycle;
135
136 /// IssueCount - Count instructions issued in this cycle
137 /// Currently valid only for bottom-up scheduling.
138 unsigned IssueCount;
139
140 /// LiveRegDefs - A set of physical registers and their definition
141 /// that are "live". These nodes must be scheduled before any other nodes that
142 /// modifies the registers can be scheduled.
143 unsigned NumLiveRegs;
144 std::vector<SUnit*> LiveRegDefs;
145 std::vector<SUnit*> LiveRegGens;
146
147 // Collect interferences between physical register use/defs.
148 // Each interference is an SUnit and set of physical registers.
149 SmallVector<SUnit*, 4> Interferences;
150 typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
151 LRegsMapT LRegsMap;
152
153 /// Topo - A topological ordering for SUnits which permits fast IsReachable
154 /// and similar queries.
155 ScheduleDAGTopologicalSort Topo;
156
157 // Hack to keep track of the inverse of FindCallSeqStart without more crazy
158 // DAG crawling.
159 DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
160
161 public:
ScheduleDAGRRList(MachineFunction & mf,bool needlatency,SchedulingPriorityQueue * availqueue,CodeGenOpt::Level OptLevel)162 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
163 SchedulingPriorityQueue *availqueue,
164 CodeGenOpt::Level OptLevel)
165 : ScheduleDAGSDNodes(mf),
166 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
167 Topo(SUnits, nullptr) {
168
169 const TargetMachine &tm = mf.getTarget();
170 if (DisableSchedCycles || !NeedLatency)
171 HazardRec = new ScheduleHazardRecognizer();
172 else
173 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(
174 tm.getSubtargetImpl(), this);
175 }
176
~ScheduleDAGRRList()177 ~ScheduleDAGRRList() {
178 delete HazardRec;
179 delete AvailableQueue;
180 }
181
182 void Schedule() override;
183
getHazardRec()184 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
185
186 /// IsReachable - Checks if SU is reachable from TargetSU.
IsReachable(const SUnit * SU,const SUnit * TargetSU)187 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
188 return Topo.IsReachable(SU, TargetSU);
189 }
190
191 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
192 /// create a cycle.
WillCreateCycle(SUnit * SU,SUnit * TargetSU)193 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
194 return Topo.WillCreateCycle(SU, TargetSU);
195 }
196
197 /// AddPred - adds a predecessor edge to SUnit SU.
198 /// This returns true if this is a new predecessor.
199 /// Updates the topological ordering if required.
AddPred(SUnit * SU,const SDep & D)200 void AddPred(SUnit *SU, const SDep &D) {
201 Topo.AddPred(SU, D.getSUnit());
202 SU->addPred(D);
203 }
204
205 /// RemovePred - removes a predecessor edge from SUnit SU.
206 /// This returns true if an edge was removed.
207 /// Updates the topological ordering if required.
RemovePred(SUnit * SU,const SDep & D)208 void RemovePred(SUnit *SU, const SDep &D) {
209 Topo.RemovePred(SU, D.getSUnit());
210 SU->removePred(D);
211 }
212
213 private:
isReady(SUnit * SU)214 bool isReady(SUnit *SU) {
215 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
216 AvailableQueue->isReady(SU);
217 }
218
219 void ReleasePred(SUnit *SU, const SDep *PredEdge);
220 void ReleasePredecessors(SUnit *SU);
221 void ReleasePending();
222 void AdvanceToCycle(unsigned NextCycle);
223 void AdvancePastStalls(SUnit *SU);
224 void EmitNode(SUnit *SU);
225 void ScheduleNodeBottomUp(SUnit*);
226 void CapturePred(SDep *PredEdge);
227 void UnscheduleNodeBottomUp(SUnit*);
228 void RestoreHazardCheckerBottomUp();
229 void BacktrackBottomUp(SUnit*, SUnit*);
230 SUnit *CopyAndMoveSuccessors(SUnit*);
231 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
232 const TargetRegisterClass*,
233 const TargetRegisterClass*,
234 SmallVectorImpl<SUnit*>&);
235 bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
236
237 void releaseInterferences(unsigned Reg = 0);
238
239 SUnit *PickNodeToScheduleBottomUp();
240 void ListScheduleBottomUp();
241
242 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
243 /// Updates the topological ordering if required.
CreateNewSUnit(SDNode * N)244 SUnit *CreateNewSUnit(SDNode *N) {
245 unsigned NumSUnits = SUnits.size();
246 SUnit *NewNode = newSUnit(N);
247 // Update the topological ordering.
248 if (NewNode->NodeNum >= NumSUnits)
249 Topo.InitDAGTopologicalSorting();
250 return NewNode;
251 }
252
253 /// CreateClone - Creates a new SUnit from an existing one.
254 /// Updates the topological ordering if required.
CreateClone(SUnit * N)255 SUnit *CreateClone(SUnit *N) {
256 unsigned NumSUnits = SUnits.size();
257 SUnit *NewNode = Clone(N);
258 // Update the topological ordering.
259 if (NewNode->NodeNum >= NumSUnits)
260 Topo.InitDAGTopologicalSorting();
261 return NewNode;
262 }
263
264 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
265 /// need actual latency information but the hybrid scheduler does.
forceUnitLatencies() const266 bool forceUnitLatencies() const override {
267 return !NeedLatency;
268 }
269 };
270 } // end anonymous namespace
271
272 /// GetCostForDef - Looks up the register class and cost for a given definition.
273 /// Typically this just means looking up the representative register class,
274 /// but for untyped values (MVT::Untyped) it means inspecting the node's
275 /// opcode to determine what register class is being generated.
GetCostForDef(const ScheduleDAGSDNodes::RegDefIter & RegDefPos,const TargetLowering * TLI,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI,unsigned & RegClass,unsigned & Cost,const MachineFunction & MF)276 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
277 const TargetLowering *TLI,
278 const TargetInstrInfo *TII,
279 const TargetRegisterInfo *TRI,
280 unsigned &RegClass, unsigned &Cost,
281 const MachineFunction &MF) {
282 MVT VT = RegDefPos.GetValue();
283
284 // Special handling for untyped values. These values can only come from
285 // the expansion of custom DAG-to-DAG patterns.
286 if (VT == MVT::Untyped) {
287 const SDNode *Node = RegDefPos.GetNode();
288
289 // Special handling for CopyFromReg of untyped values.
290 if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
291 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
292 const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
293 RegClass = RC->getID();
294 Cost = 1;
295 return;
296 }
297
298 unsigned Opcode = Node->getMachineOpcode();
299 if (Opcode == TargetOpcode::REG_SEQUENCE) {
300 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
301 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
302 RegClass = RC->getID();
303 Cost = 1;
304 return;
305 }
306
307 unsigned Idx = RegDefPos.GetIdx();
308 const MCInstrDesc Desc = TII->get(Opcode);
309 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
310 RegClass = RC->getID();
311 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
312 // better way to determine it.
313 Cost = 1;
314 } else {
315 RegClass = TLI->getRepRegClassFor(VT)->getID();
316 Cost = TLI->getRepRegClassCostFor(VT);
317 }
318 }
319
320 /// Schedule - Schedule the DAG using list scheduling.
Schedule()321 void ScheduleDAGRRList::Schedule() {
322 DEBUG(dbgs()
323 << "********** List Scheduling BB#" << BB->getNumber()
324 << " '" << BB->getName() << "' **********\n");
325
326 CurCycle = 0;
327 IssueCount = 0;
328 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
329 NumLiveRegs = 0;
330 // Allocate slots for each physical register, plus one for a special register
331 // to track the virtual resource of a calling sequence.
332 LiveRegDefs.resize(TRI->getNumRegs() + 1, nullptr);
333 LiveRegGens.resize(TRI->getNumRegs() + 1, nullptr);
334 CallSeqEndForStart.clear();
335 assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
336
337 // Build the scheduling graph.
338 BuildSchedGraph(nullptr);
339
340 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
341 SUnits[su].dumpAll(this));
342 Topo.InitDAGTopologicalSorting();
343
344 AvailableQueue->initNodes(SUnits);
345
346 HazardRec->Reset();
347
348 // Execute the actual scheduling loop.
349 ListScheduleBottomUp();
350
351 AvailableQueue->releaseState();
352
353 DEBUG({
354 dbgs() << "*** Final schedule ***\n";
355 dumpSchedule();
356 dbgs() << '\n';
357 });
358 }
359
360 //===----------------------------------------------------------------------===//
361 // Bottom-Up Scheduling
362 //===----------------------------------------------------------------------===//
363
364 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
365 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
ReleasePred(SUnit * SU,const SDep * PredEdge)366 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
367 SUnit *PredSU = PredEdge->getSUnit();
368
369 #ifndef NDEBUG
370 if (PredSU->NumSuccsLeft == 0) {
371 dbgs() << "*** Scheduling failed! ***\n";
372 PredSU->dump(this);
373 dbgs() << " has been released too many times!\n";
374 llvm_unreachable(nullptr);
375 }
376 #endif
377 --PredSU->NumSuccsLeft;
378
379 if (!forceUnitLatencies()) {
380 // Updating predecessor's height. This is now the cycle when the
381 // predecessor can be scheduled without causing a pipeline stall.
382 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
383 }
384
385 // If all the node's successors are scheduled, this node is ready
386 // to be scheduled. Ignore the special EntrySU node.
387 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
388 PredSU->isAvailable = true;
389
390 unsigned Height = PredSU->getHeight();
391 if (Height < MinAvailableCycle)
392 MinAvailableCycle = Height;
393
394 if (isReady(PredSU)) {
395 AvailableQueue->push(PredSU);
396 }
397 // CapturePred and others may have left the node in the pending queue, avoid
398 // adding it twice.
399 else if (!PredSU->isPending) {
400 PredSU->isPending = true;
401 PendingQueue.push_back(PredSU);
402 }
403 }
404 }
405
406 /// IsChainDependent - Test if Outer is reachable from Inner through
407 /// chain dependencies.
IsChainDependent(SDNode * Outer,SDNode * Inner,unsigned NestLevel,const TargetInstrInfo * TII)408 static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
409 unsigned NestLevel,
410 const TargetInstrInfo *TII) {
411 SDNode *N = Outer;
412 for (;;) {
413 if (N == Inner)
414 return true;
415 // For a TokenFactor, examine each operand. There may be multiple ways
416 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
417 // most nesting in order to ensure that we find the corresponding match.
418 if (N->getOpcode() == ISD::TokenFactor) {
419 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
420 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
421 return true;
422 return false;
423 }
424 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
425 if (N->isMachineOpcode()) {
426 if (N->getMachineOpcode() ==
427 (unsigned)TII->getCallFrameDestroyOpcode()) {
428 ++NestLevel;
429 } else if (N->getMachineOpcode() ==
430 (unsigned)TII->getCallFrameSetupOpcode()) {
431 if (NestLevel == 0)
432 return false;
433 --NestLevel;
434 }
435 }
436 // Otherwise, find the chain and continue climbing.
437 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
438 if (N->getOperand(i).getValueType() == MVT::Other) {
439 N = N->getOperand(i).getNode();
440 goto found_chain_operand;
441 }
442 return false;
443 found_chain_operand:;
444 if (N->getOpcode() == ISD::EntryToken)
445 return false;
446 }
447 }
448
449 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
450 /// the corresponding (lowered) CALLSEQ_BEGIN node.
451 ///
452 /// NestLevel and MaxNested are used in recursion to indcate the current level
453 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
454 /// level seen so far.
455 ///
456 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
457 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
458 static SDNode *
FindCallSeqStart(SDNode * N,unsigned & NestLevel,unsigned & MaxNest,const TargetInstrInfo * TII)459 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
460 const TargetInstrInfo *TII) {
461 for (;;) {
462 // For a TokenFactor, examine each operand. There may be multiple ways
463 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
464 // most nesting in order to ensure that we find the corresponding match.
465 if (N->getOpcode() == ISD::TokenFactor) {
466 SDNode *Best = nullptr;
467 unsigned BestMaxNest = MaxNest;
468 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
469 unsigned MyNestLevel = NestLevel;
470 unsigned MyMaxNest = MaxNest;
471 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
472 MyNestLevel, MyMaxNest, TII))
473 if (!Best || (MyMaxNest > BestMaxNest)) {
474 Best = New;
475 BestMaxNest = MyMaxNest;
476 }
477 }
478 assert(Best);
479 MaxNest = BestMaxNest;
480 return Best;
481 }
482 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
483 if (N->isMachineOpcode()) {
484 if (N->getMachineOpcode() ==
485 (unsigned)TII->getCallFrameDestroyOpcode()) {
486 ++NestLevel;
487 MaxNest = std::max(MaxNest, NestLevel);
488 } else if (N->getMachineOpcode() ==
489 (unsigned)TII->getCallFrameSetupOpcode()) {
490 assert(NestLevel != 0);
491 --NestLevel;
492 if (NestLevel == 0)
493 return N;
494 }
495 }
496 // Otherwise, find the chain and continue climbing.
497 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
498 if (N->getOperand(i).getValueType() == MVT::Other) {
499 N = N->getOperand(i).getNode();
500 goto found_chain_operand;
501 }
502 return nullptr;
503 found_chain_operand:;
504 if (N->getOpcode() == ISD::EntryToken)
505 return nullptr;
506 }
507 }
508
509 /// Call ReleasePred for each predecessor, then update register live def/gen.
510 /// Always update LiveRegDefs for a register dependence even if the current SU
511 /// also defines the register. This effectively create one large live range
512 /// across a sequence of two-address node. This is important because the
513 /// entire chain must be scheduled together. Example:
514 ///
515 /// flags = (3) add
516 /// flags = (2) addc flags
517 /// flags = (1) addc flags
518 ///
519 /// results in
520 ///
521 /// LiveRegDefs[flags] = 3
522 /// LiveRegGens[flags] = 1
523 ///
524 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
525 /// interference on flags.
ReleasePredecessors(SUnit * SU)526 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
527 // Bottom up: release predecessors
528 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
529 I != E; ++I) {
530 ReleasePred(SU, &*I);
531 if (I->isAssignedRegDep()) {
532 // This is a physical register dependency and it's impossible or
533 // expensive to copy the register. Make sure nothing that can
534 // clobber the register is scheduled between the predecessor and
535 // this node.
536 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
537 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
538 "interference on register dependence");
539 LiveRegDefs[I->getReg()] = I->getSUnit();
540 if (!LiveRegGens[I->getReg()]) {
541 ++NumLiveRegs;
542 LiveRegGens[I->getReg()] = SU;
543 }
544 }
545 }
546
547 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
548 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
549 // these nodes, to prevent other calls from being interscheduled with them.
550 unsigned CallResource = TRI->getNumRegs();
551 if (!LiveRegDefs[CallResource])
552 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
553 if (Node->isMachineOpcode() &&
554 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
555 unsigned NestLevel = 0;
556 unsigned MaxNest = 0;
557 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
558
559 SUnit *Def = &SUnits[N->getNodeId()];
560 CallSeqEndForStart[Def] = SU;
561
562 ++NumLiveRegs;
563 LiveRegDefs[CallResource] = Def;
564 LiveRegGens[CallResource] = SU;
565 break;
566 }
567 }
568
569 /// Check to see if any of the pending instructions are ready to issue. If
570 /// so, add them to the available queue.
ReleasePending()571 void ScheduleDAGRRList::ReleasePending() {
572 if (DisableSchedCycles) {
573 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
574 return;
575 }
576
577 // If the available queue is empty, it is safe to reset MinAvailableCycle.
578 if (AvailableQueue->empty())
579 MinAvailableCycle = UINT_MAX;
580
581 // Check to see if any of the pending instructions are ready to issue. If
582 // so, add them to the available queue.
583 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
584 unsigned ReadyCycle = PendingQueue[i]->getHeight();
585 if (ReadyCycle < MinAvailableCycle)
586 MinAvailableCycle = ReadyCycle;
587
588 if (PendingQueue[i]->isAvailable) {
589 if (!isReady(PendingQueue[i]))
590 continue;
591 AvailableQueue->push(PendingQueue[i]);
592 }
593 PendingQueue[i]->isPending = false;
594 PendingQueue[i] = PendingQueue.back();
595 PendingQueue.pop_back();
596 --i; --e;
597 }
598 }
599
600 /// Move the scheduler state forward by the specified number of Cycles.
AdvanceToCycle(unsigned NextCycle)601 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
602 if (NextCycle <= CurCycle)
603 return;
604
605 IssueCount = 0;
606 AvailableQueue->setCurCycle(NextCycle);
607 if (!HazardRec->isEnabled()) {
608 // Bypass lots of virtual calls in case of long latency.
609 CurCycle = NextCycle;
610 }
611 else {
612 for (; CurCycle != NextCycle; ++CurCycle) {
613 HazardRec->RecedeCycle();
614 }
615 }
616 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
617 // available Q to release pending nodes at least once before popping.
618 ReleasePending();
619 }
620
621 /// Move the scheduler state forward until the specified node's dependents are
622 /// ready and can be scheduled with no resource conflicts.
AdvancePastStalls(SUnit * SU)623 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
624 if (DisableSchedCycles)
625 return;
626
627 // FIXME: Nodes such as CopyFromReg probably should not advance the current
628 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
629 // has predecessors the cycle will be advanced when they are scheduled.
630 // But given the crude nature of modeling latency though such nodes, we
631 // currently need to treat these nodes like real instructions.
632 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
633
634 unsigned ReadyCycle = SU->getHeight();
635
636 // Bump CurCycle to account for latency. We assume the latency of other
637 // available instructions may be hidden by the stall (not a full pipe stall).
638 // This updates the hazard recognizer's cycle before reserving resources for
639 // this instruction.
640 AdvanceToCycle(ReadyCycle);
641
642 // Calls are scheduled in their preceding cycle, so don't conflict with
643 // hazards from instructions after the call. EmitNode will reset the
644 // scoreboard state before emitting the call.
645 if (SU->isCall)
646 return;
647
648 // FIXME: For resource conflicts in very long non-pipelined stages, we
649 // should probably skip ahead here to avoid useless scoreboard checks.
650 int Stalls = 0;
651 while (true) {
652 ScheduleHazardRecognizer::HazardType HT =
653 HazardRec->getHazardType(SU, -Stalls);
654
655 if (HT == ScheduleHazardRecognizer::NoHazard)
656 break;
657
658 ++Stalls;
659 }
660 AdvanceToCycle(CurCycle + Stalls);
661 }
662
663 /// Record this SUnit in the HazardRecognizer.
664 /// Does not update CurCycle.
EmitNode(SUnit * SU)665 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
666 if (!HazardRec->isEnabled())
667 return;
668
669 // Check for phys reg copy.
670 if (!SU->getNode())
671 return;
672
673 switch (SU->getNode()->getOpcode()) {
674 default:
675 assert(SU->getNode()->isMachineOpcode() &&
676 "This target-independent node should not be scheduled.");
677 break;
678 case ISD::MERGE_VALUES:
679 case ISD::TokenFactor:
680 case ISD::LIFETIME_START:
681 case ISD::LIFETIME_END:
682 case ISD::CopyToReg:
683 case ISD::CopyFromReg:
684 case ISD::EH_LABEL:
685 // Noops don't affect the scoreboard state. Copies are likely to be
686 // removed.
687 return;
688 case ISD::INLINEASM:
689 // For inline asm, clear the pipeline state.
690 HazardRec->Reset();
691 return;
692 }
693 if (SU->isCall) {
694 // Calls are scheduled with their preceding instructions. For bottom-up
695 // scheduling, clear the pipeline state before emitting.
696 HazardRec->Reset();
697 }
698
699 HazardRec->EmitInstruction(SU);
700 }
701
702 static void resetVRegCycle(SUnit *SU);
703
704 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
705 /// count of its predecessors. If a predecessor pending count is zero, add it to
706 /// the Available queue.
ScheduleNodeBottomUp(SUnit * SU)707 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
708 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
709 DEBUG(SU->dump(this));
710
711 #ifndef NDEBUG
712 if (CurCycle < SU->getHeight())
713 DEBUG(dbgs() << " Height [" << SU->getHeight()
714 << "] pipeline stall!\n");
715 #endif
716
717 // FIXME: Do not modify node height. It may interfere with
718 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
719 // node its ready cycle can aid heuristics, and after scheduling it can
720 // indicate the scheduled cycle.
721 SU->setHeightToAtLeast(CurCycle);
722
723 // Reserve resources for the scheduled instruction.
724 EmitNode(SU);
725
726 Sequence.push_back(SU);
727
728 AvailableQueue->scheduledNode(SU);
729
730 // If HazardRec is disabled, and each inst counts as one cycle, then
731 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
732 // PendingQueue for schedulers that implement HasReadyFilter.
733 if (!HazardRec->isEnabled() && AvgIPC < 2)
734 AdvanceToCycle(CurCycle + 1);
735
736 // Update liveness of predecessors before successors to avoid treating a
737 // two-address node as a live range def.
738 ReleasePredecessors(SU);
739
740 // Release all the implicit physical register defs that are live.
741 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
742 I != E; ++I) {
743 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
744 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
745 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
746 --NumLiveRegs;
747 LiveRegDefs[I->getReg()] = nullptr;
748 LiveRegGens[I->getReg()] = nullptr;
749 releaseInterferences(I->getReg());
750 }
751 }
752 // Release the special call resource dependence, if this is the beginning
753 // of a call.
754 unsigned CallResource = TRI->getNumRegs();
755 if (LiveRegDefs[CallResource] == SU)
756 for (const SDNode *SUNode = SU->getNode(); SUNode;
757 SUNode = SUNode->getGluedNode()) {
758 if (SUNode->isMachineOpcode() &&
759 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
760 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
761 --NumLiveRegs;
762 LiveRegDefs[CallResource] = nullptr;
763 LiveRegGens[CallResource] = nullptr;
764 releaseInterferences(CallResource);
765 }
766 }
767
768 resetVRegCycle(SU);
769
770 SU->isScheduled = true;
771
772 // Conditions under which the scheduler should eagerly advance the cycle:
773 // (1) No available instructions
774 // (2) All pipelines full, so available instructions must have hazards.
775 //
776 // If HazardRec is disabled, the cycle was pre-advanced before calling
777 // ReleasePredecessors. In that case, IssueCount should remain 0.
778 //
779 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
780 if (HazardRec->isEnabled() || AvgIPC > 1) {
781 if (SU->getNode() && SU->getNode()->isMachineOpcode())
782 ++IssueCount;
783 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
784 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
785 AdvanceToCycle(CurCycle + 1);
786 }
787 }
788
789 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
790 /// unscheduled, incrcease the succ left count of its predecessors. Remove
791 /// them from AvailableQueue if necessary.
CapturePred(SDep * PredEdge)792 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
793 SUnit *PredSU = PredEdge->getSUnit();
794 if (PredSU->isAvailable) {
795 PredSU->isAvailable = false;
796 if (!PredSU->isPending)
797 AvailableQueue->remove(PredSU);
798 }
799
800 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
801 ++PredSU->NumSuccsLeft;
802 }
803
804 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
805 /// its predecessor states to reflect the change.
UnscheduleNodeBottomUp(SUnit * SU)806 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
807 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
808 DEBUG(SU->dump(this));
809
810 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
811 I != E; ++I) {
812 CapturePred(&*I);
813 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
814 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
815 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
816 "Physical register dependency violated?");
817 --NumLiveRegs;
818 LiveRegDefs[I->getReg()] = nullptr;
819 LiveRegGens[I->getReg()] = nullptr;
820 releaseInterferences(I->getReg());
821 }
822 }
823
824 // Reclaim the special call resource dependence, if this is the beginning
825 // of a call.
826 unsigned CallResource = TRI->getNumRegs();
827 for (const SDNode *SUNode = SU->getNode(); SUNode;
828 SUNode = SUNode->getGluedNode()) {
829 if (SUNode->isMachineOpcode() &&
830 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
831 ++NumLiveRegs;
832 LiveRegDefs[CallResource] = SU;
833 LiveRegGens[CallResource] = CallSeqEndForStart[SU];
834 }
835 }
836
837 // Release the special call resource dependence, if this is the end
838 // of a call.
839 if (LiveRegGens[CallResource] == SU)
840 for (const SDNode *SUNode = SU->getNode(); SUNode;
841 SUNode = SUNode->getGluedNode()) {
842 if (SUNode->isMachineOpcode() &&
843 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
844 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
845 --NumLiveRegs;
846 LiveRegDefs[CallResource] = nullptr;
847 LiveRegGens[CallResource] = nullptr;
848 releaseInterferences(CallResource);
849 }
850 }
851
852 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
853 I != E; ++I) {
854 if (I->isAssignedRegDep()) {
855 if (!LiveRegDefs[I->getReg()])
856 ++NumLiveRegs;
857 // This becomes the nearest def. Note that an earlier def may still be
858 // pending if this is a two-address node.
859 LiveRegDefs[I->getReg()] = SU;
860 if (LiveRegGens[I->getReg()] == nullptr ||
861 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
862 LiveRegGens[I->getReg()] = I->getSUnit();
863 }
864 }
865 if (SU->getHeight() < MinAvailableCycle)
866 MinAvailableCycle = SU->getHeight();
867
868 SU->setHeightDirty();
869 SU->isScheduled = false;
870 SU->isAvailable = true;
871 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
872 // Don't make available until backtracking is complete.
873 SU->isPending = true;
874 PendingQueue.push_back(SU);
875 }
876 else {
877 AvailableQueue->push(SU);
878 }
879 AvailableQueue->unscheduledNode(SU);
880 }
881
882 /// After backtracking, the hazard checker needs to be restored to a state
883 /// corresponding the current cycle.
RestoreHazardCheckerBottomUp()884 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
885 HazardRec->Reset();
886
887 unsigned LookAhead = std::min((unsigned)Sequence.size(),
888 HazardRec->getMaxLookAhead());
889 if (LookAhead == 0)
890 return;
891
892 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
893 unsigned HazardCycle = (*I)->getHeight();
894 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
895 SUnit *SU = *I;
896 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
897 HazardRec->RecedeCycle();
898 }
899 EmitNode(SU);
900 }
901 }
902
903 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
904 /// BTCycle in order to schedule a specific node.
BacktrackBottomUp(SUnit * SU,SUnit * BtSU)905 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
906 SUnit *OldSU = Sequence.back();
907 while (true) {
908 Sequence.pop_back();
909 // FIXME: use ready cycle instead of height
910 CurCycle = OldSU->getHeight();
911 UnscheduleNodeBottomUp(OldSU);
912 AvailableQueue->setCurCycle(CurCycle);
913 if (OldSU == BtSU)
914 break;
915 OldSU = Sequence.back();
916 }
917
918 assert(!SU->isSucc(OldSU) && "Something is wrong!");
919
920 RestoreHazardCheckerBottomUp();
921
922 ReleasePending();
923
924 ++NumBacktracks;
925 }
926
isOperandOf(const SUnit * SU,SDNode * N)927 static bool isOperandOf(const SUnit *SU, SDNode *N) {
928 for (const SDNode *SUNode = SU->getNode(); SUNode;
929 SUNode = SUNode->getGluedNode()) {
930 if (SUNode->isOperandOf(N))
931 return true;
932 }
933 return false;
934 }
935
936 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
937 /// successors to the newly created node.
CopyAndMoveSuccessors(SUnit * SU)938 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
939 SDNode *N = SU->getNode();
940 if (!N)
941 return nullptr;
942
943 if (SU->getNode()->getGluedNode())
944 return nullptr;
945
946 SUnit *NewSU;
947 bool TryUnfold = false;
948 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
949 EVT VT = N->getValueType(i);
950 if (VT == MVT::Glue)
951 return nullptr;
952 else if (VT == MVT::Other)
953 TryUnfold = true;
954 }
955 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
956 const SDValue &Op = N->getOperand(i);
957 EVT VT = Op.getNode()->getValueType(Op.getResNo());
958 if (VT == MVT::Glue)
959 return nullptr;
960 }
961
962 if (TryUnfold) {
963 SmallVector<SDNode*, 2> NewNodes;
964 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
965 return nullptr;
966
967 // unfolding an x86 DEC64m operation results in store, dec, load which
968 // can't be handled here so quit
969 if (NewNodes.size() == 3)
970 return nullptr;
971
972 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
973 assert(NewNodes.size() == 2 && "Expected a load folding node!");
974
975 N = NewNodes[1];
976 SDNode *LoadNode = NewNodes[0];
977 unsigned NumVals = N->getNumValues();
978 unsigned OldNumVals = SU->getNode()->getNumValues();
979 for (unsigned i = 0; i != NumVals; ++i)
980 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
981 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
982 SDValue(LoadNode, 1));
983
984 // LoadNode may already exist. This can happen when there is another
985 // load from the same location and producing the same type of value
986 // but it has different alignment or volatileness.
987 bool isNewLoad = true;
988 SUnit *LoadSU;
989 if (LoadNode->getNodeId() != -1) {
990 LoadSU = &SUnits[LoadNode->getNodeId()];
991 isNewLoad = false;
992 } else {
993 LoadSU = CreateNewSUnit(LoadNode);
994 LoadNode->setNodeId(LoadSU->NodeNum);
995
996 InitNumRegDefsLeft(LoadSU);
997 computeLatency(LoadSU);
998 }
999
1000 SUnit *NewSU = CreateNewSUnit(N);
1001 assert(N->getNodeId() == -1 && "Node already inserted!");
1002 N->setNodeId(NewSU->NodeNum);
1003
1004 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1005 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
1006 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
1007 NewSU->isTwoAddress = true;
1008 break;
1009 }
1010 }
1011 if (MCID.isCommutable())
1012 NewSU->isCommutable = true;
1013
1014 InitNumRegDefsLeft(NewSU);
1015 computeLatency(NewSU);
1016
1017 // Record all the edges to and from the old SU, by category.
1018 SmallVector<SDep, 4> ChainPreds;
1019 SmallVector<SDep, 4> ChainSuccs;
1020 SmallVector<SDep, 4> LoadPreds;
1021 SmallVector<SDep, 4> NodePreds;
1022 SmallVector<SDep, 4> NodeSuccs;
1023 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1024 I != E; ++I) {
1025 if (I->isCtrl())
1026 ChainPreds.push_back(*I);
1027 else if (isOperandOf(I->getSUnit(), LoadNode))
1028 LoadPreds.push_back(*I);
1029 else
1030 NodePreds.push_back(*I);
1031 }
1032 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1033 I != E; ++I) {
1034 if (I->isCtrl())
1035 ChainSuccs.push_back(*I);
1036 else
1037 NodeSuccs.push_back(*I);
1038 }
1039
1040 // Now assign edges to the newly-created nodes.
1041 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1042 const SDep &Pred = ChainPreds[i];
1043 RemovePred(SU, Pred);
1044 if (isNewLoad)
1045 AddPred(LoadSU, Pred);
1046 }
1047 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1048 const SDep &Pred = LoadPreds[i];
1049 RemovePred(SU, Pred);
1050 if (isNewLoad)
1051 AddPred(LoadSU, Pred);
1052 }
1053 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1054 const SDep &Pred = NodePreds[i];
1055 RemovePred(SU, Pred);
1056 AddPred(NewSU, Pred);
1057 }
1058 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1059 SDep D = NodeSuccs[i];
1060 SUnit *SuccDep = D.getSUnit();
1061 D.setSUnit(SU);
1062 RemovePred(SuccDep, D);
1063 D.setSUnit(NewSU);
1064 AddPred(SuccDep, D);
1065 // Balance register pressure.
1066 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1067 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1068 --NewSU->NumRegDefsLeft;
1069 }
1070 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1071 SDep D = ChainSuccs[i];
1072 SUnit *SuccDep = D.getSUnit();
1073 D.setSUnit(SU);
1074 RemovePred(SuccDep, D);
1075 if (isNewLoad) {
1076 D.setSUnit(LoadSU);
1077 AddPred(SuccDep, D);
1078 }
1079 }
1080
1081 // Add a data dependency to reflect that NewSU reads the value defined
1082 // by LoadSU.
1083 SDep D(LoadSU, SDep::Data, 0);
1084 D.setLatency(LoadSU->Latency);
1085 AddPred(NewSU, D);
1086
1087 if (isNewLoad)
1088 AvailableQueue->addNode(LoadSU);
1089 AvailableQueue->addNode(NewSU);
1090
1091 ++NumUnfolds;
1092
1093 if (NewSU->NumSuccsLeft == 0) {
1094 NewSU->isAvailable = true;
1095 return NewSU;
1096 }
1097 SU = NewSU;
1098 }
1099
1100 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1101 NewSU = CreateClone(SU);
1102
1103 // New SUnit has the exact same predecessors.
1104 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1105 I != E; ++I)
1106 if (!I->isArtificial())
1107 AddPred(NewSU, *I);
1108
1109 // Only copy scheduled successors. Cut them from old node's successor
1110 // list and move them over.
1111 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1112 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1113 I != E; ++I) {
1114 if (I->isArtificial())
1115 continue;
1116 SUnit *SuccSU = I->getSUnit();
1117 if (SuccSU->isScheduled) {
1118 SDep D = *I;
1119 D.setSUnit(NewSU);
1120 AddPred(SuccSU, D);
1121 D.setSUnit(SU);
1122 DelDeps.push_back(std::make_pair(SuccSU, D));
1123 }
1124 }
1125 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1126 RemovePred(DelDeps[i].first, DelDeps[i].second);
1127
1128 AvailableQueue->updateNode(SU);
1129 AvailableQueue->addNode(NewSU);
1130
1131 ++NumDups;
1132 return NewSU;
1133 }
1134
1135 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1136 /// scheduled successors of the given SUnit to the last copy.
InsertCopiesAndMoveSuccs(SUnit * SU,unsigned Reg,const TargetRegisterClass * DestRC,const TargetRegisterClass * SrcRC,SmallVectorImpl<SUnit * > & Copies)1137 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1138 const TargetRegisterClass *DestRC,
1139 const TargetRegisterClass *SrcRC,
1140 SmallVectorImpl<SUnit*> &Copies) {
1141 SUnit *CopyFromSU = CreateNewSUnit(nullptr);
1142 CopyFromSU->CopySrcRC = SrcRC;
1143 CopyFromSU->CopyDstRC = DestRC;
1144
1145 SUnit *CopyToSU = CreateNewSUnit(nullptr);
1146 CopyToSU->CopySrcRC = DestRC;
1147 CopyToSU->CopyDstRC = SrcRC;
1148
1149 // Only copy scheduled successors. Cut them from old node's successor
1150 // list and move them over.
1151 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1152 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1153 I != E; ++I) {
1154 if (I->isArtificial())
1155 continue;
1156 SUnit *SuccSU = I->getSUnit();
1157 if (SuccSU->isScheduled) {
1158 SDep D = *I;
1159 D.setSUnit(CopyToSU);
1160 AddPred(SuccSU, D);
1161 DelDeps.push_back(std::make_pair(SuccSU, *I));
1162 }
1163 else {
1164 // Avoid scheduling the def-side copy before other successors. Otherwise
1165 // we could introduce another physreg interference on the copy and
1166 // continue inserting copies indefinitely.
1167 AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
1168 }
1169 }
1170 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1171 RemovePred(DelDeps[i].first, DelDeps[i].second);
1172
1173 SDep FromDep(SU, SDep::Data, Reg);
1174 FromDep.setLatency(SU->Latency);
1175 AddPred(CopyFromSU, FromDep);
1176 SDep ToDep(CopyFromSU, SDep::Data, 0);
1177 ToDep.setLatency(CopyFromSU->Latency);
1178 AddPred(CopyToSU, ToDep);
1179
1180 AvailableQueue->updateNode(SU);
1181 AvailableQueue->addNode(CopyFromSU);
1182 AvailableQueue->addNode(CopyToSU);
1183 Copies.push_back(CopyFromSU);
1184 Copies.push_back(CopyToSU);
1185
1186 ++NumPRCopies;
1187 }
1188
1189 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1190 /// definition of the specified node.
1191 /// FIXME: Move to SelectionDAG?
getPhysicalRegisterVT(SDNode * N,unsigned Reg,const TargetInstrInfo * TII)1192 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1193 const TargetInstrInfo *TII) {
1194 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1195 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1196 unsigned NumRes = MCID.getNumDefs();
1197 for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1198 if (Reg == *ImpDef)
1199 break;
1200 ++NumRes;
1201 }
1202 return N->getValueType(NumRes);
1203 }
1204
1205 /// CheckForLiveRegDef - Return true and update live register vector if the
1206 /// specified register def of the specified SUnit clobbers any "live" registers.
CheckForLiveRegDef(SUnit * SU,unsigned Reg,std::vector<SUnit * > & LiveRegDefs,SmallSet<unsigned,4> & RegAdded,SmallVectorImpl<unsigned> & LRegs,const TargetRegisterInfo * TRI)1207 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1208 std::vector<SUnit*> &LiveRegDefs,
1209 SmallSet<unsigned, 4> &RegAdded,
1210 SmallVectorImpl<unsigned> &LRegs,
1211 const TargetRegisterInfo *TRI) {
1212 for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
1213
1214 // Check if Ref is live.
1215 if (!LiveRegDefs[*AliasI]) continue;
1216
1217 // Allow multiple uses of the same def.
1218 if (LiveRegDefs[*AliasI] == SU) continue;
1219
1220 // Add Reg to the set of interfering live regs.
1221 if (RegAdded.insert(*AliasI)) {
1222 LRegs.push_back(*AliasI);
1223 }
1224 }
1225 }
1226
1227 /// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
1228 /// by RegMask, and add them to LRegs.
CheckForLiveRegDefMasked(SUnit * SU,const uint32_t * RegMask,std::vector<SUnit * > & LiveRegDefs,SmallSet<unsigned,4> & RegAdded,SmallVectorImpl<unsigned> & LRegs)1229 static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
1230 std::vector<SUnit*> &LiveRegDefs,
1231 SmallSet<unsigned, 4> &RegAdded,
1232 SmallVectorImpl<unsigned> &LRegs) {
1233 // Look at all live registers. Skip Reg0 and the special CallResource.
1234 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1235 if (!LiveRegDefs[i]) continue;
1236 if (LiveRegDefs[i] == SU) continue;
1237 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
1238 if (RegAdded.insert(i))
1239 LRegs.push_back(i);
1240 }
1241 }
1242
1243 /// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
getNodeRegMask(const SDNode * N)1244 static const uint32_t *getNodeRegMask(const SDNode *N) {
1245 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1246 if (const RegisterMaskSDNode *Op =
1247 dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode()))
1248 return Op->getRegMask();
1249 return nullptr;
1250 }
1251
1252 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1253 /// scheduling of the given node to satisfy live physical register dependencies.
1254 /// If the specific node is the last one that's available to schedule, do
1255 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1256 bool ScheduleDAGRRList::
DelayForLiveRegsBottomUp(SUnit * SU,SmallVectorImpl<unsigned> & LRegs)1257 DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
1258 if (NumLiveRegs == 0)
1259 return false;
1260
1261 SmallSet<unsigned, 4> RegAdded;
1262 // If this node would clobber any "live" register, then it's not ready.
1263 //
1264 // If SU is the currently live definition of the same register that it uses,
1265 // then we are free to schedule it.
1266 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1267 I != E; ++I) {
1268 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1269 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1270 RegAdded, LRegs, TRI);
1271 }
1272
1273 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1274 if (Node->getOpcode() == ISD::INLINEASM) {
1275 // Inline asm can clobber physical defs.
1276 unsigned NumOps = Node->getNumOperands();
1277 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1278 --NumOps; // Ignore the glue operand.
1279
1280 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1281 unsigned Flags =
1282 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1283 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1284
1285 ++i; // Skip the ID value.
1286 if (InlineAsm::isRegDefKind(Flags) ||
1287 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1288 InlineAsm::isClobberKind(Flags)) {
1289 // Check for def of register or earlyclobber register.
1290 for (; NumVals; --NumVals, ++i) {
1291 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1292 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1293 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1294 }
1295 } else
1296 i += NumVals;
1297 }
1298 continue;
1299 }
1300
1301 if (!Node->isMachineOpcode())
1302 continue;
1303 // If we're in the middle of scheduling a call, don't begin scheduling
1304 // another call. Also, don't allow any physical registers to be live across
1305 // the call.
1306 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1307 // Check the special calling-sequence resource.
1308 unsigned CallResource = TRI->getNumRegs();
1309 if (LiveRegDefs[CallResource]) {
1310 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1311 while (SDNode *Glued = Gen->getGluedNode())
1312 Gen = Glued;
1313 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1314 LRegs.push_back(CallResource);
1315 }
1316 }
1317 if (const uint32_t *RegMask = getNodeRegMask(Node))
1318 CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs);
1319
1320 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1321 if (!MCID.ImplicitDefs)
1322 continue;
1323 for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
1324 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1325 }
1326
1327 return !LRegs.empty();
1328 }
1329
releaseInterferences(unsigned Reg)1330 void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
1331 // Add the nodes that aren't ready back onto the available list.
1332 for (unsigned i = Interferences.size(); i > 0; --i) {
1333 SUnit *SU = Interferences[i-1];
1334 LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1335 if (Reg) {
1336 SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
1337 if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
1338 continue;
1339 }
1340 SU->isPending = false;
1341 // The interfering node may no longer be available due to backtracking.
1342 // Furthermore, it may have been made available again, in which case it is
1343 // now already in the AvailableQueue.
1344 if (SU->isAvailable && !SU->NodeQueueId) {
1345 DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n');
1346 AvailableQueue->push(SU);
1347 }
1348 if (i < Interferences.size())
1349 Interferences[i-1] = Interferences.back();
1350 Interferences.pop_back();
1351 LRegsMap.erase(LRegsPos);
1352 }
1353 }
1354
1355 /// Return a node that can be scheduled in this cycle. Requirements:
1356 /// (1) Ready: latency has been satisfied
1357 /// (2) No Hazards: resources are available
1358 /// (3) No Interferences: may unschedule to break register interferences.
PickNodeToScheduleBottomUp()1359 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1360 SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
1361 while (CurSU) {
1362 SmallVector<unsigned, 4> LRegs;
1363 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1364 break;
1365 DEBUG(dbgs() << " Interfering reg " <<
1366 (LRegs[0] == TRI->getNumRegs() ? "CallResource"
1367 : TRI->getName(LRegs[0]))
1368 << " SU #" << CurSU->NodeNum << '\n');
1369 std::pair<LRegsMapT::iterator, bool> LRegsPair =
1370 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1371 if (LRegsPair.second) {
1372 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1373 Interferences.push_back(CurSU);
1374 }
1375 else {
1376 assert(CurSU->isPending && "Intereferences are pending");
1377 // Update the interference with current live regs.
1378 LRegsPair.first->second = LRegs;
1379 }
1380 CurSU = AvailableQueue->pop();
1381 }
1382 if (CurSU)
1383 return CurSU;
1384
1385 // All candidates are delayed due to live physical reg dependencies.
1386 // Try backtracking, code duplication, or inserting cross class copies
1387 // to resolve it.
1388 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1389 SUnit *TrySU = Interferences[i];
1390 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1391
1392 // Try unscheduling up to the point where it's safe to schedule
1393 // this node.
1394 SUnit *BtSU = nullptr;
1395 unsigned LiveCycle = UINT_MAX;
1396 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1397 unsigned Reg = LRegs[j];
1398 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1399 BtSU = LiveRegGens[Reg];
1400 LiveCycle = BtSU->getHeight();
1401 }
1402 }
1403 if (!WillCreateCycle(TrySU, BtSU)) {
1404 // BacktrackBottomUp mutates Interferences!
1405 BacktrackBottomUp(TrySU, BtSU);
1406
1407 // Force the current node to be scheduled before the node that
1408 // requires the physical reg dep.
1409 if (BtSU->isAvailable) {
1410 BtSU->isAvailable = false;
1411 if (!BtSU->isPending)
1412 AvailableQueue->remove(BtSU);
1413 }
1414 DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("
1415 << TrySU->NodeNum << ")\n");
1416 AddPred(TrySU, SDep(BtSU, SDep::Artificial));
1417
1418 // If one or more successors has been unscheduled, then the current
1419 // node is no longer available.
1420 if (!TrySU->isAvailable)
1421 CurSU = AvailableQueue->pop();
1422 else {
1423 AvailableQueue->remove(TrySU);
1424 CurSU = TrySU;
1425 }
1426 // Interferences has been mutated. We must break.
1427 break;
1428 }
1429 }
1430
1431 if (!CurSU) {
1432 // Can't backtrack. If it's too expensive to copy the value, then try
1433 // duplicate the nodes that produces these "too expensive to copy"
1434 // values to break the dependency. In case even that doesn't work,
1435 // insert cross class copies.
1436 // If it's not too expensive, i.e. cost != -1, issue copies.
1437 SUnit *TrySU = Interferences[0];
1438 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1439 assert(LRegs.size() == 1 && "Can't handle this yet!");
1440 unsigned Reg = LRegs[0];
1441 SUnit *LRDef = LiveRegDefs[Reg];
1442 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1443 const TargetRegisterClass *RC =
1444 TRI->getMinimalPhysRegClass(Reg, VT);
1445 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1446
1447 // If cross copy register class is the same as RC, then it must be possible
1448 // copy the value directly. Do not try duplicate the def.
1449 // If cross copy register class is not the same as RC, then it's possible to
1450 // copy the value but it require cross register class copies and it is
1451 // expensive.
1452 // If cross copy register class is null, then it's not possible to copy
1453 // the value at all.
1454 SUnit *NewDef = nullptr;
1455 if (DestRC != RC) {
1456 NewDef = CopyAndMoveSuccessors(LRDef);
1457 if (!DestRC && !NewDef)
1458 report_fatal_error("Can't handle live physical register dependency!");
1459 }
1460 if (!NewDef) {
1461 // Issue copies, these can be expensive cross register class copies.
1462 SmallVector<SUnit*, 2> Copies;
1463 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1464 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1465 << " to SU #" << Copies.front()->NodeNum << "\n");
1466 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
1467 NewDef = Copies.back();
1468 }
1469
1470 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1471 << " to SU #" << TrySU->NodeNum << "\n");
1472 LiveRegDefs[Reg] = NewDef;
1473 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
1474 TrySU->isAvailable = false;
1475 CurSU = NewDef;
1476 }
1477 assert(CurSU && "Unable to resolve live physical register dependencies!");
1478 return CurSU;
1479 }
1480
1481 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1482 /// schedulers.
ListScheduleBottomUp()1483 void ScheduleDAGRRList::ListScheduleBottomUp() {
1484 // Release any predecessors of the special Exit node.
1485 ReleasePredecessors(&ExitSU);
1486
1487 // Add root to Available queue.
1488 if (!SUnits.empty()) {
1489 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1490 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1491 RootSU->isAvailable = true;
1492 AvailableQueue->push(RootSU);
1493 }
1494
1495 // While Available queue is not empty, grab the node with the highest
1496 // priority. If it is not ready put it back. Schedule the node.
1497 Sequence.reserve(SUnits.size());
1498 while (!AvailableQueue->empty() || !Interferences.empty()) {
1499 DEBUG(dbgs() << "\nExamining Available:\n";
1500 AvailableQueue->dump(this));
1501
1502 // Pick the best node to schedule taking all constraints into
1503 // consideration.
1504 SUnit *SU = PickNodeToScheduleBottomUp();
1505
1506 AdvancePastStalls(SU);
1507
1508 ScheduleNodeBottomUp(SU);
1509
1510 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1511 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1512 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1513 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1514 }
1515 }
1516
1517 // Reverse the order if it is bottom up.
1518 std::reverse(Sequence.begin(), Sequence.end());
1519
1520 #ifndef NDEBUG
1521 VerifyScheduledSequence(/*isBottomUp=*/true);
1522 #endif
1523 }
1524
1525 //===----------------------------------------------------------------------===//
1526 // RegReductionPriorityQueue Definition
1527 //===----------------------------------------------------------------------===//
1528 //
1529 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1530 // to reduce register pressure.
1531 //
1532 namespace {
1533 class RegReductionPQBase;
1534
1535 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
isReady__anon9b4c64940211::queue_sort1536 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1537 };
1538
1539 #ifndef NDEBUG
1540 template<class SF>
1541 struct reverse_sort : public queue_sort {
1542 SF &SortFunc;
reverse_sort__anon9b4c64940211::reverse_sort1543 reverse_sort(SF &sf) : SortFunc(sf) {}
1544
operator ()__anon9b4c64940211::reverse_sort1545 bool operator()(SUnit* left, SUnit* right) const {
1546 // reverse left/right rather than simply !SortFunc(left, right)
1547 // to expose different paths in the comparison logic.
1548 return SortFunc(right, left);
1549 }
1550 };
1551 #endif // NDEBUG
1552
1553 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1554 // reduction scheduler.
1555 struct bu_ls_rr_sort : public queue_sort {
1556 enum {
1557 IsBottomUp = true,
1558 HasReadyFilter = false
1559 };
1560
1561 RegReductionPQBase *SPQ;
bu_ls_rr_sort__anon9b4c64940211::bu_ls_rr_sort1562 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1563
1564 bool operator()(SUnit* left, SUnit* right) const;
1565 };
1566
1567 // src_ls_rr_sort - Priority function for source order scheduler.
1568 struct src_ls_rr_sort : public queue_sort {
1569 enum {
1570 IsBottomUp = true,
1571 HasReadyFilter = false
1572 };
1573
1574 RegReductionPQBase *SPQ;
src_ls_rr_sort__anon9b4c64940211::src_ls_rr_sort1575 src_ls_rr_sort(RegReductionPQBase *spq)
1576 : SPQ(spq) {}
1577
1578 bool operator()(SUnit* left, SUnit* right) const;
1579 };
1580
1581 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1582 struct hybrid_ls_rr_sort : public queue_sort {
1583 enum {
1584 IsBottomUp = true,
1585 HasReadyFilter = false
1586 };
1587
1588 RegReductionPQBase *SPQ;
hybrid_ls_rr_sort__anon9b4c64940211::hybrid_ls_rr_sort1589 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1590 : SPQ(spq) {}
1591
1592 bool isReady(SUnit *SU, unsigned CurCycle) const;
1593
1594 bool operator()(SUnit* left, SUnit* right) const;
1595 };
1596
1597 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1598 // scheduler.
1599 struct ilp_ls_rr_sort : public queue_sort {
1600 enum {
1601 IsBottomUp = true,
1602 HasReadyFilter = false
1603 };
1604
1605 RegReductionPQBase *SPQ;
ilp_ls_rr_sort__anon9b4c64940211::ilp_ls_rr_sort1606 ilp_ls_rr_sort(RegReductionPQBase *spq)
1607 : SPQ(spq) {}
1608
1609 bool isReady(SUnit *SU, unsigned CurCycle) const;
1610
1611 bool operator()(SUnit* left, SUnit* right) const;
1612 };
1613
1614 class RegReductionPQBase : public SchedulingPriorityQueue {
1615 protected:
1616 std::vector<SUnit*> Queue;
1617 unsigned CurQueueId;
1618 bool TracksRegPressure;
1619 bool SrcOrder;
1620
1621 // SUnits - The SUnits for the current graph.
1622 std::vector<SUnit> *SUnits;
1623
1624 MachineFunction &MF;
1625 const TargetInstrInfo *TII;
1626 const TargetRegisterInfo *TRI;
1627 const TargetLowering *TLI;
1628 ScheduleDAGRRList *scheduleDAG;
1629
1630 // SethiUllmanNumbers - The SethiUllman number for each node.
1631 std::vector<unsigned> SethiUllmanNumbers;
1632
1633 /// RegPressure - Tracking current reg pressure per register class.
1634 ///
1635 std::vector<unsigned> RegPressure;
1636
1637 /// RegLimit - Tracking the number of allocatable registers per register
1638 /// class.
1639 std::vector<unsigned> RegLimit;
1640
1641 public:
RegReductionPQBase(MachineFunction & mf,bool hasReadyFilter,bool tracksrp,bool srcorder,const TargetInstrInfo * tii,const TargetRegisterInfo * tri,const TargetLowering * tli)1642 RegReductionPQBase(MachineFunction &mf,
1643 bool hasReadyFilter,
1644 bool tracksrp,
1645 bool srcorder,
1646 const TargetInstrInfo *tii,
1647 const TargetRegisterInfo *tri,
1648 const TargetLowering *tli)
1649 : SchedulingPriorityQueue(hasReadyFilter),
1650 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1651 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
1652 if (TracksRegPressure) {
1653 unsigned NumRC = TRI->getNumRegClasses();
1654 RegLimit.resize(NumRC);
1655 RegPressure.resize(NumRC);
1656 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1657 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1658 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1659 E = TRI->regclass_end(); I != E; ++I)
1660 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1661 }
1662 }
1663
setScheduleDAG(ScheduleDAGRRList * scheduleDag)1664 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1665 scheduleDAG = scheduleDag;
1666 }
1667
getHazardRec()1668 ScheduleHazardRecognizer* getHazardRec() {
1669 return scheduleDAG->getHazardRec();
1670 }
1671
1672 void initNodes(std::vector<SUnit> &sunits) override;
1673
1674 void addNode(const SUnit *SU) override;
1675
1676 void updateNode(const SUnit *SU) override;
1677
releaseState()1678 void releaseState() override {
1679 SUnits = nullptr;
1680 SethiUllmanNumbers.clear();
1681 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1682 }
1683
1684 unsigned getNodePriority(const SUnit *SU) const;
1685
getNodeOrdering(const SUnit * SU) const1686 unsigned getNodeOrdering(const SUnit *SU) const {
1687 if (!SU->getNode()) return 0;
1688
1689 return SU->getNode()->getIROrder();
1690 }
1691
empty() const1692 bool empty() const override { return Queue.empty(); }
1693
push(SUnit * U)1694 void push(SUnit *U) override {
1695 assert(!U->NodeQueueId && "Node in the queue already");
1696 U->NodeQueueId = ++CurQueueId;
1697 Queue.push_back(U);
1698 }
1699
remove(SUnit * SU)1700 void remove(SUnit *SU) override {
1701 assert(!Queue.empty() && "Queue is empty!");
1702 assert(SU->NodeQueueId != 0 && "Not in queue!");
1703 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1704 SU);
1705 if (I != std::prev(Queue.end()))
1706 std::swap(*I, Queue.back());
1707 Queue.pop_back();
1708 SU->NodeQueueId = 0;
1709 }
1710
tracksRegPressure() const1711 bool tracksRegPressure() const override { return TracksRegPressure; }
1712
1713 void dumpRegPressure() const;
1714
1715 bool HighRegPressure(const SUnit *SU) const;
1716
1717 bool MayReduceRegPressure(SUnit *SU) const;
1718
1719 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1720
1721 void scheduledNode(SUnit *SU) override;
1722
1723 void unscheduledNode(SUnit *SU) override;
1724
1725 protected:
1726 bool canClobber(const SUnit *SU, const SUnit *Op);
1727 void AddPseudoTwoAddrDeps();
1728 void PrescheduleNodesWithMultipleUses();
1729 void CalculateSethiUllmanNumbers();
1730 };
1731
1732 template<class SF>
popFromQueueImpl(std::vector<SUnit * > & Q,SF & Picker)1733 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1734 std::vector<SUnit *>::iterator Best = Q.begin();
1735 for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
1736 E = Q.end(); I != E; ++I)
1737 if (Picker(*Best, *I))
1738 Best = I;
1739 SUnit *V = *Best;
1740 if (Best != std::prev(Q.end()))
1741 std::swap(*Best, Q.back());
1742 Q.pop_back();
1743 return V;
1744 }
1745
1746 template<class SF>
popFromQueue(std::vector<SUnit * > & Q,SF & Picker,ScheduleDAG * DAG)1747 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1748 #ifndef NDEBUG
1749 if (DAG->StressSched) {
1750 reverse_sort<SF> RPicker(Picker);
1751 return popFromQueueImpl(Q, RPicker);
1752 }
1753 #endif
1754 (void)DAG;
1755 return popFromQueueImpl(Q, Picker);
1756 }
1757
1758 template<class SF>
1759 class RegReductionPriorityQueue : public RegReductionPQBase {
1760 SF Picker;
1761
1762 public:
RegReductionPriorityQueue(MachineFunction & mf,bool tracksrp,bool srcorder,const TargetInstrInfo * tii,const TargetRegisterInfo * tri,const TargetLowering * tli)1763 RegReductionPriorityQueue(MachineFunction &mf,
1764 bool tracksrp,
1765 bool srcorder,
1766 const TargetInstrInfo *tii,
1767 const TargetRegisterInfo *tri,
1768 const TargetLowering *tli)
1769 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1770 tii, tri, tli),
1771 Picker(this) {}
1772
isBottomUp() const1773 bool isBottomUp() const override { return SF::IsBottomUp; }
1774
isReady(SUnit * U) const1775 bool isReady(SUnit *U) const override {
1776 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1777 }
1778
pop()1779 SUnit *pop() override {
1780 if (Queue.empty()) return nullptr;
1781
1782 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1783 V->NodeQueueId = 0;
1784 return V;
1785 }
1786
1787 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(ScheduleDAG * DAG) const1788 void dump(ScheduleDAG *DAG) const override {
1789 // Emulate pop() without clobbering NodeQueueIds.
1790 std::vector<SUnit*> DumpQueue = Queue;
1791 SF DumpPicker = Picker;
1792 while (!DumpQueue.empty()) {
1793 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1794 dbgs() << "Height " << SU->getHeight() << ": ";
1795 SU->dump(DAG);
1796 }
1797 }
1798 #endif
1799 };
1800
1801 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1802 BURegReductionPriorityQueue;
1803
1804 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1805 SrcRegReductionPriorityQueue;
1806
1807 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1808 HybridBURRPriorityQueue;
1809
1810 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1811 ILPBURRPriorityQueue;
1812 } // end anonymous namespace
1813
1814 //===----------------------------------------------------------------------===//
1815 // Static Node Priority for Register Pressure Reduction
1816 //===----------------------------------------------------------------------===//
1817
1818 // Check for special nodes that bypass scheduling heuristics.
1819 // Currently this pushes TokenFactor nodes down, but may be used for other
1820 // pseudo-ops as well.
1821 //
1822 // Return -1 to schedule right above left, 1 for left above right.
1823 // Return 0 if no bias exists.
checkSpecialNodes(const SUnit * left,const SUnit * right)1824 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1825 bool LSchedLow = left->isScheduleLow;
1826 bool RSchedLow = right->isScheduleLow;
1827 if (LSchedLow != RSchedLow)
1828 return LSchedLow < RSchedLow ? 1 : -1;
1829 return 0;
1830 }
1831
1832 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1833 /// Smaller number is the higher priority.
1834 static unsigned
CalcNodeSethiUllmanNumber(const SUnit * SU,std::vector<unsigned> & SUNumbers)1835 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1836 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1837 if (SethiUllmanNumber != 0)
1838 return SethiUllmanNumber;
1839
1840 unsigned Extra = 0;
1841 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1842 I != E; ++I) {
1843 if (I->isCtrl()) continue; // ignore chain preds
1844 SUnit *PredSU = I->getSUnit();
1845 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1846 if (PredSethiUllman > SethiUllmanNumber) {
1847 SethiUllmanNumber = PredSethiUllman;
1848 Extra = 0;
1849 } else if (PredSethiUllman == SethiUllmanNumber)
1850 ++Extra;
1851 }
1852
1853 SethiUllmanNumber += Extra;
1854
1855 if (SethiUllmanNumber == 0)
1856 SethiUllmanNumber = 1;
1857
1858 return SethiUllmanNumber;
1859 }
1860
1861 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1862 /// scheduling units.
CalculateSethiUllmanNumbers()1863 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1864 SethiUllmanNumbers.assign(SUnits->size(), 0);
1865
1866 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1867 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1868 }
1869
addNode(const SUnit * SU)1870 void RegReductionPQBase::addNode(const SUnit *SU) {
1871 unsigned SUSize = SethiUllmanNumbers.size();
1872 if (SUnits->size() > SUSize)
1873 SethiUllmanNumbers.resize(SUSize*2, 0);
1874 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1875 }
1876
updateNode(const SUnit * SU)1877 void RegReductionPQBase::updateNode(const SUnit *SU) {
1878 SethiUllmanNumbers[SU->NodeNum] = 0;
1879 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1880 }
1881
1882 // Lower priority means schedule further down. For bottom-up scheduling, lower
1883 // priority SUs are scheduled before higher priority SUs.
getNodePriority(const SUnit * SU) const1884 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1885 assert(SU->NodeNum < SethiUllmanNumbers.size());
1886 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1887 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1888 // CopyToReg should be close to its uses to facilitate coalescing and
1889 // avoid spilling.
1890 return 0;
1891 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1892 Opc == TargetOpcode::SUBREG_TO_REG ||
1893 Opc == TargetOpcode::INSERT_SUBREG)
1894 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1895 // close to their uses to facilitate coalescing.
1896 return 0;
1897 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1898 // If SU does not have a register use, i.e. it doesn't produce a value
1899 // that would be consumed (e.g. store), then it terminates a chain of
1900 // computation. Give it a large SethiUllman number so it will be
1901 // scheduled right before its predecessors that it doesn't lengthen
1902 // their live ranges.
1903 return 0xffff;
1904 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1905 // If SU does not have a register def, schedule it close to its uses
1906 // because it does not lengthen any live ranges.
1907 return 0;
1908 #if 1
1909 return SethiUllmanNumbers[SU->NodeNum];
1910 #else
1911 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1912 if (SU->isCallOp) {
1913 // FIXME: This assumes all of the defs are used as call operands.
1914 int NP = (int)Priority - SU->getNode()->getNumValues();
1915 return (NP > 0) ? NP : 0;
1916 }
1917 return Priority;
1918 #endif
1919 }
1920
1921 //===----------------------------------------------------------------------===//
1922 // Register Pressure Tracking
1923 //===----------------------------------------------------------------------===//
1924
dumpRegPressure() const1925 void RegReductionPQBase::dumpRegPressure() const {
1926 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1927 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1928 E = TRI->regclass_end(); I != E; ++I) {
1929 const TargetRegisterClass *RC = *I;
1930 unsigned Id = RC->getID();
1931 unsigned RP = RegPressure[Id];
1932 if (!RP) continue;
1933 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1934 << '\n');
1935 }
1936 #endif
1937 }
1938
HighRegPressure(const SUnit * SU) const1939 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1940 if (!TLI)
1941 return false;
1942
1943 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1944 I != E; ++I) {
1945 if (I->isCtrl())
1946 continue;
1947 SUnit *PredSU = I->getSUnit();
1948 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1949 // to cover the number of registers defined (they are all live).
1950 if (PredSU->NumRegDefsLeft == 0) {
1951 continue;
1952 }
1953 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1954 RegDefPos.IsValid(); RegDefPos.Advance()) {
1955 unsigned RCId, Cost;
1956 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
1957
1958 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1959 return true;
1960 }
1961 }
1962 return false;
1963 }
1964
MayReduceRegPressure(SUnit * SU) const1965 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1966 const SDNode *N = SU->getNode();
1967
1968 if (!N->isMachineOpcode() || !SU->NumSuccs)
1969 return false;
1970
1971 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1972 for (unsigned i = 0; i != NumDefs; ++i) {
1973 MVT VT = N->getSimpleValueType(i);
1974 if (!N->hasAnyUseOfValue(i))
1975 continue;
1976 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1977 if (RegPressure[RCId] >= RegLimit[RCId])
1978 return true;
1979 }
1980 return false;
1981 }
1982
1983 // Compute the register pressure contribution by this instruction by count up
1984 // for uses that are not live and down for defs. Only count register classes
1985 // that are already under high pressure. As a side effect, compute the number of
1986 // uses of registers that are already live.
1987 //
1988 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1989 // so could probably be factored.
RegPressureDiff(SUnit * SU,unsigned & LiveUses) const1990 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1991 LiveUses = 0;
1992 int PDiff = 0;
1993 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1994 I != E; ++I) {
1995 if (I->isCtrl())
1996 continue;
1997 SUnit *PredSU = I->getSUnit();
1998 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1999 // to cover the number of registers defined (they are all live).
2000 if (PredSU->NumRegDefsLeft == 0) {
2001 if (PredSU->getNode()->isMachineOpcode())
2002 ++LiveUses;
2003 continue;
2004 }
2005 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2006 RegDefPos.IsValid(); RegDefPos.Advance()) {
2007 MVT VT = RegDefPos.GetValue();
2008 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2009 if (RegPressure[RCId] >= RegLimit[RCId])
2010 ++PDiff;
2011 }
2012 }
2013 const SDNode *N = SU->getNode();
2014
2015 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
2016 return PDiff;
2017
2018 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2019 for (unsigned i = 0; i != NumDefs; ++i) {
2020 MVT VT = N->getSimpleValueType(i);
2021 if (!N->hasAnyUseOfValue(i))
2022 continue;
2023 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2024 if (RegPressure[RCId] >= RegLimit[RCId])
2025 --PDiff;
2026 }
2027 return PDiff;
2028 }
2029
scheduledNode(SUnit * SU)2030 void RegReductionPQBase::scheduledNode(SUnit *SU) {
2031 if (!TracksRegPressure)
2032 return;
2033
2034 if (!SU->getNode())
2035 return;
2036
2037 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2038 I != E; ++I) {
2039 if (I->isCtrl())
2040 continue;
2041 SUnit *PredSU = I->getSUnit();
2042 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2043 // to cover the number of registers defined (they are all live).
2044 if (PredSU->NumRegDefsLeft == 0) {
2045 continue;
2046 }
2047 // FIXME: The ScheduleDAG currently loses information about which of a
2048 // node's values is consumed by each dependence. Consequently, if the node
2049 // defines multiple register classes, we don't know which to pressurize
2050 // here. Instead the following loop consumes the register defs in an
2051 // arbitrary order. At least it handles the common case of clustered loads
2052 // to the same class. For precise liveness, each SDep needs to indicate the
2053 // result number. But that tightly couples the ScheduleDAG with the
2054 // SelectionDAG making updates tricky. A simpler hack would be to attach a
2055 // value type or register class to SDep.
2056 //
2057 // The most important aspect of register tracking is balancing the increase
2058 // here with the reduction further below. Note that this SU may use multiple
2059 // defs in PredSU. The can't be determined here, but we've already
2060 // compensated by reducing NumRegDefsLeft in PredSU during
2061 // ScheduleDAGSDNodes::AddSchedEdges.
2062 --PredSU->NumRegDefsLeft;
2063 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2064 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2065 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2066 if (SkipRegDefs)
2067 continue;
2068
2069 unsigned RCId, Cost;
2070 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2071 RegPressure[RCId] += Cost;
2072 break;
2073 }
2074 }
2075
2076 // We should have this assert, but there may be dead SDNodes that never
2077 // materialize as SUnits, so they don't appear to generate liveness.
2078 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2079 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2080 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2081 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2082 if (SkipRegDefs > 0)
2083 continue;
2084 unsigned RCId, Cost;
2085 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2086 if (RegPressure[RCId] < Cost) {
2087 // Register pressure tracking is imprecise. This can happen. But we try
2088 // hard not to let it happen because it likely results in poor scheduling.
2089 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2090 RegPressure[RCId] = 0;
2091 }
2092 else {
2093 RegPressure[RCId] -= Cost;
2094 }
2095 }
2096 dumpRegPressure();
2097 }
2098
unscheduledNode(SUnit * SU)2099 void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2100 if (!TracksRegPressure)
2101 return;
2102
2103 const SDNode *N = SU->getNode();
2104 if (!N) return;
2105
2106 if (!N->isMachineOpcode()) {
2107 if (N->getOpcode() != ISD::CopyToReg)
2108 return;
2109 } else {
2110 unsigned Opc = N->getMachineOpcode();
2111 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2112 Opc == TargetOpcode::INSERT_SUBREG ||
2113 Opc == TargetOpcode::SUBREG_TO_REG ||
2114 Opc == TargetOpcode::REG_SEQUENCE ||
2115 Opc == TargetOpcode::IMPLICIT_DEF)
2116 return;
2117 }
2118
2119 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2120 I != E; ++I) {
2121 if (I->isCtrl())
2122 continue;
2123 SUnit *PredSU = I->getSUnit();
2124 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2125 // counts data deps.
2126 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2127 continue;
2128 const SDNode *PN = PredSU->getNode();
2129 if (!PN->isMachineOpcode()) {
2130 if (PN->getOpcode() == ISD::CopyFromReg) {
2131 MVT VT = PN->getSimpleValueType(0);
2132 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2133 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2134 }
2135 continue;
2136 }
2137 unsigned POpc = PN->getMachineOpcode();
2138 if (POpc == TargetOpcode::IMPLICIT_DEF)
2139 continue;
2140 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2141 POpc == TargetOpcode::INSERT_SUBREG ||
2142 POpc == TargetOpcode::SUBREG_TO_REG) {
2143 MVT VT = PN->getSimpleValueType(0);
2144 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2145 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2146 continue;
2147 }
2148 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2149 for (unsigned i = 0; i != NumDefs; ++i) {
2150 MVT VT = PN->getSimpleValueType(i);
2151 if (!PN->hasAnyUseOfValue(i))
2152 continue;
2153 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2154 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2155 // Register pressure tracking is imprecise. This can happen.
2156 RegPressure[RCId] = 0;
2157 else
2158 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2159 }
2160 }
2161
2162 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2163 // may transfer data dependencies to CopyToReg.
2164 if (SU->NumSuccs && N->isMachineOpcode()) {
2165 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2166 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2167 MVT VT = N->getSimpleValueType(i);
2168 if (VT == MVT::Glue || VT == MVT::Other)
2169 continue;
2170 if (!N->hasAnyUseOfValue(i))
2171 continue;
2172 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2173 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2174 }
2175 }
2176
2177 dumpRegPressure();
2178 }
2179
2180 //===----------------------------------------------------------------------===//
2181 // Dynamic Node Priority for Register Pressure Reduction
2182 //===----------------------------------------------------------------------===//
2183
2184 /// closestSucc - Returns the scheduled cycle of the successor which is
2185 /// closest to the current cycle.
closestSucc(const SUnit * SU)2186 static unsigned closestSucc(const SUnit *SU) {
2187 unsigned MaxHeight = 0;
2188 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2189 I != E; ++I) {
2190 if (I->isCtrl()) continue; // ignore chain succs
2191 unsigned Height = I->getSUnit()->getHeight();
2192 // If there are bunch of CopyToRegs stacked up, they should be considered
2193 // to be at the same position.
2194 if (I->getSUnit()->getNode() &&
2195 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2196 Height = closestSucc(I->getSUnit())+1;
2197 if (Height > MaxHeight)
2198 MaxHeight = Height;
2199 }
2200 return MaxHeight;
2201 }
2202
2203 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2204 /// for scratch registers, i.e. number of data dependencies.
calcMaxScratches(const SUnit * SU)2205 static unsigned calcMaxScratches(const SUnit *SU) {
2206 unsigned Scratches = 0;
2207 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2208 I != E; ++I) {
2209 if (I->isCtrl()) continue; // ignore chain preds
2210 Scratches++;
2211 }
2212 return Scratches;
2213 }
2214
2215 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2216 /// CopyFromReg from a virtual register.
hasOnlyLiveInOpers(const SUnit * SU)2217 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2218 bool RetVal = false;
2219 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2220 I != E; ++I) {
2221 if (I->isCtrl()) continue;
2222 const SUnit *PredSU = I->getSUnit();
2223 if (PredSU->getNode() &&
2224 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2225 unsigned Reg =
2226 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2227 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2228 RetVal = true;
2229 continue;
2230 }
2231 }
2232 return false;
2233 }
2234 return RetVal;
2235 }
2236
2237 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2238 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2239 /// it has no other use. It should be scheduled closer to the terminator.
hasOnlyLiveOutUses(const SUnit * SU)2240 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2241 bool RetVal = false;
2242 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2243 I != E; ++I) {
2244 if (I->isCtrl()) continue;
2245 const SUnit *SuccSU = I->getSUnit();
2246 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2247 unsigned Reg =
2248 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2249 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2250 RetVal = true;
2251 continue;
2252 }
2253 }
2254 return false;
2255 }
2256 return RetVal;
2257 }
2258
2259 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2260 // set isVRegCycle for its CopyFromReg operands.
2261 //
2262 // This is only relevant for single-block loops, in which case the VRegCycle
2263 // node is likely an induction variable in which the operand and target virtual
2264 // registers should be coalesced (e.g. pre/post increment values). Setting the
2265 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2266 // CopyFromReg so that this node becomes the virtual register "kill". This
2267 // avoids interference between the values live in and out of the block and
2268 // eliminates a copy inside the loop.
initVRegCycle(SUnit * SU)2269 static void initVRegCycle(SUnit *SU) {
2270 if (DisableSchedVRegCycle)
2271 return;
2272
2273 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2274 return;
2275
2276 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2277
2278 SU->isVRegCycle = true;
2279
2280 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2281 I != E; ++I) {
2282 if (I->isCtrl()) continue;
2283 I->getSUnit()->isVRegCycle = true;
2284 }
2285 }
2286
2287 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2288 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
resetVRegCycle(SUnit * SU)2289 static void resetVRegCycle(SUnit *SU) {
2290 if (!SU->isVRegCycle)
2291 return;
2292
2293 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2294 I != E; ++I) {
2295 if (I->isCtrl()) continue; // ignore chain preds
2296 SUnit *PredSU = I->getSUnit();
2297 if (PredSU->isVRegCycle) {
2298 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2299 "VRegCycle def must be CopyFromReg");
2300 I->getSUnit()->isVRegCycle = 0;
2301 }
2302 }
2303 }
2304
2305 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2306 // means a node that defines the VRegCycle has not been scheduled yet.
hasVRegCycleUse(const SUnit * SU)2307 static bool hasVRegCycleUse(const SUnit *SU) {
2308 // If this SU also defines the VReg, don't hoist it as a "use".
2309 if (SU->isVRegCycle)
2310 return false;
2311
2312 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2313 I != E; ++I) {
2314 if (I->isCtrl()) continue; // ignore chain preds
2315 if (I->getSUnit()->isVRegCycle &&
2316 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2317 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2318 return true;
2319 }
2320 }
2321 return false;
2322 }
2323
2324 // Check for either a dependence (latency) or resource (hazard) stall.
2325 //
2326 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
BUHasStall(SUnit * SU,int Height,RegReductionPQBase * SPQ)2327 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2328 if ((int)SPQ->getCurCycle() < Height) return true;
2329 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2330 != ScheduleHazardRecognizer::NoHazard)
2331 return true;
2332 return false;
2333 }
2334
2335 // Return -1 if left has higher priority, 1 if right has higher priority.
2336 // Return 0 if latency-based priority is equivalent.
BUCompareLatency(SUnit * left,SUnit * right,bool checkPref,RegReductionPQBase * SPQ)2337 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2338 RegReductionPQBase *SPQ) {
2339 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2340 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2341 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2342 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2343 int LHeight = (int)left->getHeight() + LPenalty;
2344 int RHeight = (int)right->getHeight() + RPenalty;
2345
2346 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2347 BUHasStall(left, LHeight, SPQ);
2348 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2349 BUHasStall(right, RHeight, SPQ);
2350
2351 // If scheduling one of the node will cause a pipeline stall, delay it.
2352 // If scheduling either one of the node will cause a pipeline stall, sort
2353 // them according to their height.
2354 if (LStall) {
2355 if (!RStall)
2356 return 1;
2357 if (LHeight != RHeight)
2358 return LHeight > RHeight ? 1 : -1;
2359 } else if (RStall)
2360 return -1;
2361
2362 // If either node is scheduling for latency, sort them by height/depth
2363 // and latency.
2364 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2365 right->SchedulingPref == Sched::ILP)) {
2366 // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
2367 // is enabled, grouping instructions by cycle, then its height is already
2368 // covered so only its depth matters. We also reach this point if both stall
2369 // but have the same height.
2370 if (!SPQ->getHazardRec()->isEnabled()) {
2371 if (LHeight != RHeight)
2372 return LHeight > RHeight ? 1 : -1;
2373 }
2374 int LDepth = left->getDepth() - LPenalty;
2375 int RDepth = right->getDepth() - RPenalty;
2376 if (LDepth != RDepth) {
2377 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2378 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2379 << ") depth " << RDepth << "\n");
2380 return LDepth < RDepth ? 1 : -1;
2381 }
2382 if (left->Latency != right->Latency)
2383 return left->Latency > right->Latency ? 1 : -1;
2384 }
2385 return 0;
2386 }
2387
BURRSort(SUnit * left,SUnit * right,RegReductionPQBase * SPQ)2388 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2389 // Schedule physical register definitions close to their use. This is
2390 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2391 // long as shortening physreg live ranges is generally good, we can defer
2392 // creating a subtarget hook.
2393 if (!DisableSchedPhysRegJoin) {
2394 bool LHasPhysReg = left->hasPhysRegDefs;
2395 bool RHasPhysReg = right->hasPhysRegDefs;
2396 if (LHasPhysReg != RHasPhysReg) {
2397 #ifndef NDEBUG
2398 static const char *const PhysRegMsg[] = { " has no physreg",
2399 " defines a physreg" };
2400 #endif
2401 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2402 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2403 << PhysRegMsg[RHasPhysReg] << "\n");
2404 return LHasPhysReg < RHasPhysReg;
2405 }
2406 }
2407
2408 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2409 unsigned LPriority = SPQ->getNodePriority(left);
2410 unsigned RPriority = SPQ->getNodePriority(right);
2411
2412 // Be really careful about hoisting call operands above previous calls.
2413 // Only allows it if it would reduce register pressure.
2414 if (left->isCall && right->isCallOp) {
2415 unsigned RNumVals = right->getNode()->getNumValues();
2416 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2417 }
2418 if (right->isCall && left->isCallOp) {
2419 unsigned LNumVals = left->getNode()->getNumValues();
2420 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2421 }
2422
2423 if (LPriority != RPriority)
2424 return LPriority > RPriority;
2425
2426 // One or both of the nodes are calls and their sethi-ullman numbers are the
2427 // same, then keep source order.
2428 if (left->isCall || right->isCall) {
2429 unsigned LOrder = SPQ->getNodeOrdering(left);
2430 unsigned ROrder = SPQ->getNodeOrdering(right);
2431
2432 // Prefer an ordering where the lower the non-zero order number, the higher
2433 // the preference.
2434 if ((LOrder || ROrder) && LOrder != ROrder)
2435 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2436 }
2437
2438 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2439 // e.g.
2440 // t1 = op t2, c1
2441 // t3 = op t4, c2
2442 //
2443 // and the following instructions are both ready.
2444 // t2 = op c3
2445 // t4 = op c4
2446 //
2447 // Then schedule t2 = op first.
2448 // i.e.
2449 // t4 = op c4
2450 // t2 = op c3
2451 // t1 = op t2, c1
2452 // t3 = op t4, c2
2453 //
2454 // This creates more short live intervals.
2455 unsigned LDist = closestSucc(left);
2456 unsigned RDist = closestSucc(right);
2457 if (LDist != RDist)
2458 return LDist < RDist;
2459
2460 // How many registers becomes live when the node is scheduled.
2461 unsigned LScratch = calcMaxScratches(left);
2462 unsigned RScratch = calcMaxScratches(right);
2463 if (LScratch != RScratch)
2464 return LScratch > RScratch;
2465
2466 // Comparing latency against a call makes little sense unless the node
2467 // is register pressure-neutral.
2468 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2469 return (left->NodeQueueId > right->NodeQueueId);
2470
2471 // Do not compare latencies when one or both of the nodes are calls.
2472 if (!DisableSchedCycles &&
2473 !(left->isCall || right->isCall)) {
2474 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2475 if (result != 0)
2476 return result > 0;
2477 }
2478 else {
2479 if (left->getHeight() != right->getHeight())
2480 return left->getHeight() > right->getHeight();
2481
2482 if (left->getDepth() != right->getDepth())
2483 return left->getDepth() < right->getDepth();
2484 }
2485
2486 assert(left->NodeQueueId && right->NodeQueueId &&
2487 "NodeQueueId cannot be zero");
2488 return (left->NodeQueueId > right->NodeQueueId);
2489 }
2490
2491 // Bottom up
operator ()(SUnit * left,SUnit * right) const2492 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2493 if (int res = checkSpecialNodes(left, right))
2494 return res > 0;
2495
2496 return BURRSort(left, right, SPQ);
2497 }
2498
2499 // Source order, otherwise bottom up.
operator ()(SUnit * left,SUnit * right) const2500 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2501 if (int res = checkSpecialNodes(left, right))
2502 return res > 0;
2503
2504 unsigned LOrder = SPQ->getNodeOrdering(left);
2505 unsigned ROrder = SPQ->getNodeOrdering(right);
2506
2507 // Prefer an ordering where the lower the non-zero order number, the higher
2508 // the preference.
2509 if ((LOrder || ROrder) && LOrder != ROrder)
2510 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2511
2512 return BURRSort(left, right, SPQ);
2513 }
2514
2515 // If the time between now and when the instruction will be ready can cover
2516 // the spill code, then avoid adding it to the ready queue. This gives long
2517 // stalls highest priority and allows hoisting across calls. It should also
2518 // speed up processing the available queue.
isReady(SUnit * SU,unsigned CurCycle) const2519 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2520 static const unsigned ReadyDelay = 3;
2521
2522 if (SPQ->MayReduceRegPressure(SU)) return true;
2523
2524 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2525
2526 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2527 != ScheduleHazardRecognizer::NoHazard)
2528 return false;
2529
2530 return true;
2531 }
2532
2533 // Return true if right should be scheduled with higher priority than left.
operator ()(SUnit * left,SUnit * right) const2534 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2535 if (int res = checkSpecialNodes(left, right))
2536 return res > 0;
2537
2538 if (left->isCall || right->isCall)
2539 // No way to compute latency of calls.
2540 return BURRSort(left, right, SPQ);
2541
2542 bool LHigh = SPQ->HighRegPressure(left);
2543 bool RHigh = SPQ->HighRegPressure(right);
2544 // Avoid causing spills. If register pressure is high, schedule for
2545 // register pressure reduction.
2546 if (LHigh && !RHigh) {
2547 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2548 << right->NodeNum << ")\n");
2549 return true;
2550 }
2551 else if (!LHigh && RHigh) {
2552 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2553 << left->NodeNum << ")\n");
2554 return false;
2555 }
2556 if (!LHigh && !RHigh) {
2557 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2558 if (result != 0)
2559 return result > 0;
2560 }
2561 return BURRSort(left, right, SPQ);
2562 }
2563
2564 // Schedule as many instructions in each cycle as possible. So don't make an
2565 // instruction available unless it is ready in the current cycle.
isReady(SUnit * SU,unsigned CurCycle) const2566 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2567 if (SU->getHeight() > CurCycle) return false;
2568
2569 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2570 != ScheduleHazardRecognizer::NoHazard)
2571 return false;
2572
2573 return true;
2574 }
2575
canEnableCoalescing(SUnit * SU)2576 static bool canEnableCoalescing(SUnit *SU) {
2577 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2578 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2579 // CopyToReg should be close to its uses to facilitate coalescing and
2580 // avoid spilling.
2581 return true;
2582
2583 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2584 Opc == TargetOpcode::SUBREG_TO_REG ||
2585 Opc == TargetOpcode::INSERT_SUBREG)
2586 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2587 // close to their uses to facilitate coalescing.
2588 return true;
2589
2590 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2591 // If SU does not have a register def, schedule it close to its uses
2592 // because it does not lengthen any live ranges.
2593 return true;
2594
2595 return false;
2596 }
2597
2598 // list-ilp is currently an experimental scheduler that allows various
2599 // heuristics to be enabled prior to the normal register reduction logic.
operator ()(SUnit * left,SUnit * right) const2600 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2601 if (int res = checkSpecialNodes(left, right))
2602 return res > 0;
2603
2604 if (left->isCall || right->isCall)
2605 // No way to compute latency of calls.
2606 return BURRSort(left, right, SPQ);
2607
2608 unsigned LLiveUses = 0, RLiveUses = 0;
2609 int LPDiff = 0, RPDiff = 0;
2610 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2611 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2612 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2613 }
2614 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2615 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2616 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2617 return LPDiff > RPDiff;
2618 }
2619
2620 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2621 bool LReduce = canEnableCoalescing(left);
2622 bool RReduce = canEnableCoalescing(right);
2623 if (LReduce && !RReduce) return false;
2624 if (RReduce && !LReduce) return true;
2625 }
2626
2627 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2628 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2629 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2630 return LLiveUses < RLiveUses;
2631 }
2632
2633 if (!DisableSchedStalls) {
2634 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2635 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2636 if (LStall != RStall)
2637 return left->getHeight() > right->getHeight();
2638 }
2639
2640 if (!DisableSchedCriticalPath) {
2641 int spread = (int)left->getDepth() - (int)right->getDepth();
2642 if (std::abs(spread) > MaxReorderWindow) {
2643 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2644 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2645 << right->getDepth() << "\n");
2646 return left->getDepth() < right->getDepth();
2647 }
2648 }
2649
2650 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2651 int spread = (int)left->getHeight() - (int)right->getHeight();
2652 if (std::abs(spread) > MaxReorderWindow)
2653 return left->getHeight() > right->getHeight();
2654 }
2655
2656 return BURRSort(left, right, SPQ);
2657 }
2658
initNodes(std::vector<SUnit> & sunits)2659 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2660 SUnits = &sunits;
2661 // Add pseudo dependency edges for two-address nodes.
2662 if (!Disable2AddrHack)
2663 AddPseudoTwoAddrDeps();
2664 // Reroute edges to nodes with multiple uses.
2665 if (!TracksRegPressure && !SrcOrder)
2666 PrescheduleNodesWithMultipleUses();
2667 // Calculate node priorities.
2668 CalculateSethiUllmanNumbers();
2669
2670 // For single block loops, mark nodes that look like canonical IV increments.
2671 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2672 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2673 initVRegCycle(&sunits[i]);
2674 }
2675 }
2676 }
2677
2678 //===----------------------------------------------------------------------===//
2679 // Preschedule for Register Pressure
2680 //===----------------------------------------------------------------------===//
2681
canClobber(const SUnit * SU,const SUnit * Op)2682 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2683 if (SU->isTwoAddress) {
2684 unsigned Opc = SU->getNode()->getMachineOpcode();
2685 const MCInstrDesc &MCID = TII->get(Opc);
2686 unsigned NumRes = MCID.getNumDefs();
2687 unsigned NumOps = MCID.getNumOperands() - NumRes;
2688 for (unsigned i = 0; i != NumOps; ++i) {
2689 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2690 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2691 if (DU->getNodeId() != -1 &&
2692 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2693 return true;
2694 }
2695 }
2696 }
2697 return false;
2698 }
2699
2700 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2701 /// successor's explicit physregs whose definition can reach DepSU.
2702 /// i.e. DepSU should not be scheduled above SU.
canClobberReachingPhysRegUse(const SUnit * DepSU,const SUnit * SU,ScheduleDAGRRList * scheduleDAG,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI)2703 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2704 ScheduleDAGRRList *scheduleDAG,
2705 const TargetInstrInfo *TII,
2706 const TargetRegisterInfo *TRI) {
2707 const uint16_t *ImpDefs
2708 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2709 const uint32_t *RegMask = getNodeRegMask(SU->getNode());
2710 if(!ImpDefs && !RegMask)
2711 return false;
2712
2713 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2714 SI != SE; ++SI) {
2715 SUnit *SuccSU = SI->getSUnit();
2716 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2717 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2718 if (!PI->isAssignedRegDep())
2719 continue;
2720
2721 if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
2722 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2723 return true;
2724
2725 if (ImpDefs)
2726 for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2727 // Return true if SU clobbers this physical register use and the
2728 // definition of the register reaches from DepSU. IsReachable queries
2729 // a topological forward sort of the DAG (following the successors).
2730 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2731 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2732 return true;
2733 }
2734 }
2735 return false;
2736 }
2737
2738 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2739 /// physical register defs.
canClobberPhysRegDefs(const SUnit * SuccSU,const SUnit * SU,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI)2740 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2741 const TargetInstrInfo *TII,
2742 const TargetRegisterInfo *TRI) {
2743 SDNode *N = SuccSU->getNode();
2744 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2745 const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2746 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2747 for (const SDNode *SUNode = SU->getNode(); SUNode;
2748 SUNode = SUNode->getGluedNode()) {
2749 if (!SUNode->isMachineOpcode())
2750 continue;
2751 const uint16_t *SUImpDefs =
2752 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2753 const uint32_t *SURegMask = getNodeRegMask(SUNode);
2754 if (!SUImpDefs && !SURegMask)
2755 continue;
2756 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2757 EVT VT = N->getValueType(i);
2758 if (VT == MVT::Glue || VT == MVT::Other)
2759 continue;
2760 if (!N->hasAnyUseOfValue(i))
2761 continue;
2762 unsigned Reg = ImpDefs[i - NumDefs];
2763 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
2764 return true;
2765 if (!SUImpDefs)
2766 continue;
2767 for (;*SUImpDefs; ++SUImpDefs) {
2768 unsigned SUReg = *SUImpDefs;
2769 if (TRI->regsOverlap(Reg, SUReg))
2770 return true;
2771 }
2772 }
2773 }
2774 return false;
2775 }
2776
2777 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2778 /// are not handled well by the general register pressure reduction
2779 /// heuristics. When presented with code like this:
2780 ///
2781 /// N
2782 /// / |
2783 /// / |
2784 /// U store
2785 /// |
2786 /// ...
2787 ///
2788 /// the heuristics tend to push the store up, but since the
2789 /// operand of the store has another use (U), this would increase
2790 /// the length of that other use (the U->N edge).
2791 ///
2792 /// This function transforms code like the above to route U's
2793 /// dependence through the store when possible, like this:
2794 ///
2795 /// N
2796 /// ||
2797 /// ||
2798 /// store
2799 /// |
2800 /// U
2801 /// |
2802 /// ...
2803 ///
2804 /// This results in the store being scheduled immediately
2805 /// after N, which shortens the U->N live range, reducing
2806 /// register pressure.
2807 ///
PrescheduleNodesWithMultipleUses()2808 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2809 // Visit all the nodes in topological order, working top-down.
2810 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2811 SUnit *SU = &(*SUnits)[i];
2812 // For now, only look at nodes with no data successors, such as stores.
2813 // These are especially important, due to the heuristics in
2814 // getNodePriority for nodes with no data successors.
2815 if (SU->NumSuccs != 0)
2816 continue;
2817 // For now, only look at nodes with exactly one data predecessor.
2818 if (SU->NumPreds != 1)
2819 continue;
2820 // Avoid prescheduling copies to virtual registers, which don't behave
2821 // like other nodes from the perspective of scheduling heuristics.
2822 if (SDNode *N = SU->getNode())
2823 if (N->getOpcode() == ISD::CopyToReg &&
2824 TargetRegisterInfo::isVirtualRegister
2825 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2826 continue;
2827
2828 // Locate the single data predecessor.
2829 SUnit *PredSU = nullptr;
2830 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2831 EE = SU->Preds.end(); II != EE; ++II)
2832 if (!II->isCtrl()) {
2833 PredSU = II->getSUnit();
2834 break;
2835 }
2836 assert(PredSU);
2837
2838 // Don't rewrite edges that carry physregs, because that requires additional
2839 // support infrastructure.
2840 if (PredSU->hasPhysRegDefs)
2841 continue;
2842 // Short-circuit the case where SU is PredSU's only data successor.
2843 if (PredSU->NumSuccs == 1)
2844 continue;
2845 // Avoid prescheduling to copies from virtual registers, which don't behave
2846 // like other nodes from the perspective of scheduling heuristics.
2847 if (SDNode *N = SU->getNode())
2848 if (N->getOpcode() == ISD::CopyFromReg &&
2849 TargetRegisterInfo::isVirtualRegister
2850 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2851 continue;
2852
2853 // Perform checks on the successors of PredSU.
2854 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2855 EE = PredSU->Succs.end(); II != EE; ++II) {
2856 SUnit *PredSuccSU = II->getSUnit();
2857 if (PredSuccSU == SU) continue;
2858 // If PredSU has another successor with no data successors, for
2859 // now don't attempt to choose either over the other.
2860 if (PredSuccSU->NumSuccs == 0)
2861 goto outer_loop_continue;
2862 // Don't break physical register dependencies.
2863 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2864 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2865 goto outer_loop_continue;
2866 // Don't introduce graph cycles.
2867 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2868 goto outer_loop_continue;
2869 }
2870
2871 // Ok, the transformation is safe and the heuristics suggest it is
2872 // profitable. Update the graph.
2873 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2874 << " next to PredSU #" << PredSU->NodeNum
2875 << " to guide scheduling in the presence of multiple uses\n");
2876 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2877 SDep Edge = PredSU->Succs[i];
2878 assert(!Edge.isAssignedRegDep());
2879 SUnit *SuccSU = Edge.getSUnit();
2880 if (SuccSU != SU) {
2881 Edge.setSUnit(PredSU);
2882 scheduleDAG->RemovePred(SuccSU, Edge);
2883 scheduleDAG->AddPred(SU, Edge);
2884 Edge.setSUnit(SU);
2885 scheduleDAG->AddPred(SuccSU, Edge);
2886 --i;
2887 }
2888 }
2889 outer_loop_continue:;
2890 }
2891 }
2892
2893 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2894 /// it as a def&use operand. Add a pseudo control edge from it to the other
2895 /// node (if it won't create a cycle) so the two-address one will be scheduled
2896 /// first (lower in the schedule). If both nodes are two-address, favor the
2897 /// one that has a CopyToReg use (more likely to be a loop induction update).
2898 /// If both are two-address, but one is commutable while the other is not
2899 /// commutable, favor the one that's not commutable.
AddPseudoTwoAddrDeps()2900 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2901 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2902 SUnit *SU = &(*SUnits)[i];
2903 if (!SU->isTwoAddress)
2904 continue;
2905
2906 SDNode *Node = SU->getNode();
2907 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2908 continue;
2909
2910 bool isLiveOut = hasOnlyLiveOutUses(SU);
2911 unsigned Opc = Node->getMachineOpcode();
2912 const MCInstrDesc &MCID = TII->get(Opc);
2913 unsigned NumRes = MCID.getNumDefs();
2914 unsigned NumOps = MCID.getNumOperands() - NumRes;
2915 for (unsigned j = 0; j != NumOps; ++j) {
2916 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2917 continue;
2918 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2919 if (DU->getNodeId() == -1)
2920 continue;
2921 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2922 if (!DUSU) continue;
2923 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2924 E = DUSU->Succs.end(); I != E; ++I) {
2925 if (I->isCtrl()) continue;
2926 SUnit *SuccSU = I->getSUnit();
2927 if (SuccSU == SU)
2928 continue;
2929 // Be conservative. Ignore if nodes aren't at roughly the same
2930 // depth and height.
2931 if (SuccSU->getHeight() < SU->getHeight() &&
2932 (SU->getHeight() - SuccSU->getHeight()) > 1)
2933 continue;
2934 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2935 // constrains whatever is using the copy, instead of the copy
2936 // itself. In the case that the copy is coalesced, this
2937 // preserves the intent of the pseudo two-address heurietics.
2938 while (SuccSU->Succs.size() == 1 &&
2939 SuccSU->getNode()->isMachineOpcode() &&
2940 SuccSU->getNode()->getMachineOpcode() ==
2941 TargetOpcode::COPY_TO_REGCLASS)
2942 SuccSU = SuccSU->Succs.front().getSUnit();
2943 // Don't constrain non-instruction nodes.
2944 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2945 continue;
2946 // Don't constrain nodes with physical register defs if the
2947 // predecessor can clobber them.
2948 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2949 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2950 continue;
2951 }
2952 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2953 // these may be coalesced away. We want them close to their uses.
2954 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2955 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2956 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2957 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2958 continue;
2959 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2960 (!canClobber(SuccSU, DUSU) ||
2961 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2962 (!SU->isCommutable && SuccSU->isCommutable)) &&
2963 !scheduleDAG->IsReachable(SuccSU, SU)) {
2964 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2965 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2966 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
2967 }
2968 }
2969 }
2970 }
2971 }
2972
2973 //===----------------------------------------------------------------------===//
2974 // Public Constructor Functions
2975 //===----------------------------------------------------------------------===//
2976
2977 llvm::ScheduleDAGSDNodes *
createBURRListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)2978 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2979 CodeGenOpt::Level OptLevel) {
2980 const TargetMachine &TM = IS->TM;
2981 const TargetInstrInfo *TII = TM.getInstrInfo();
2982 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2983
2984 BURegReductionPriorityQueue *PQ =
2985 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
2986 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2987 PQ->setScheduleDAG(SD);
2988 return SD;
2989 }
2990
2991 llvm::ScheduleDAGSDNodes *
createSourceListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)2992 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2993 CodeGenOpt::Level OptLevel) {
2994 const TargetMachine &TM = IS->TM;
2995 const TargetInstrInfo *TII = TM.getInstrInfo();
2996 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2997
2998 SrcRegReductionPriorityQueue *PQ =
2999 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
3000 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
3001 PQ->setScheduleDAG(SD);
3002 return SD;
3003 }
3004
3005 llvm::ScheduleDAGSDNodes *
createHybridListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)3006 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
3007 CodeGenOpt::Level OptLevel) {
3008 const TargetMachine &TM = IS->TM;
3009 const TargetInstrInfo *TII = TM.getInstrInfo();
3010 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
3011 const TargetLowering *TLI = IS->getTargetLowering();
3012
3013 HybridBURRPriorityQueue *PQ =
3014 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3015
3016 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3017 PQ->setScheduleDAG(SD);
3018 return SD;
3019 }
3020
3021 llvm::ScheduleDAGSDNodes *
createILPListDAGScheduler(SelectionDAGISel * IS,CodeGenOpt::Level OptLevel)3022 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
3023 CodeGenOpt::Level OptLevel) {
3024 const TargetMachine &TM = IS->TM;
3025 const TargetInstrInfo *TII = TM.getInstrInfo();
3026 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
3027 const TargetLowering *TLI = IS->getTargetLowering();
3028
3029 ILPBURRPriorityQueue *PQ =
3030 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3031 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3032 PQ->setScheduleDAG(SD);
3033 return SD;
3034 }
3035