1 //===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the ScheduleDAG class, which is used as the common 11 // base class for instruction schedulers. This encapsulates the scheduling DAG, 12 // which is shared between SelectionDAG and MachineInstr scheduling. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_CODEGEN_SCHEDULEDAG_H 17 #define LLVM_CODEGEN_SCHEDULEDAG_H 18 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/GraphTraits.h" 21 #include "llvm/ADT/PointerIntPair.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/Target/TargetLowering.h" 25 26 namespace llvm { 27 class AliasAnalysis; 28 class SUnit; 29 class MachineConstantPool; 30 class MachineFunction; 31 class MachineRegisterInfo; 32 class MachineInstr; 33 struct MCSchedClassDesc; 34 class TargetRegisterInfo; 35 class ScheduleDAG; 36 class SDNode; 37 class TargetInstrInfo; 38 class MCInstrDesc; 39 class TargetMachine; 40 class TargetRegisterClass; 41 template<class Graph> class GraphWriter; 42 43 /// SDep - Scheduling dependency. This represents one direction of an 44 /// edge in the scheduling DAG. 45 class SDep { 46 public: 47 /// Kind - These are the different kinds of scheduling dependencies. 48 enum Kind { 49 Data, ///< Regular data dependence (aka true-dependence). 50 Anti, ///< A register anti-dependedence (aka WAR). 51 Output, ///< A register output-dependence (aka WAW). 52 Order ///< Any other ordering dependency. 53 }; 54 55 // Strong dependencies must be respected by the scheduler. Artificial 56 // dependencies may be removed only if they are redundant with another 57 // strong depedence. 58 // 59 // Weak dependencies may be violated by the scheduling strategy, but only if 60 // the strategy can prove it is correct to do so. 61 // 62 // Strong OrderKinds must occur before "Weak". 63 // Weak OrderKinds must occur after "Weak". 64 enum OrderKind { 65 Barrier, ///< An unknown scheduling barrier. 66 MayAliasMem, ///< Nonvolatile load/Store instructions that may alias. 67 MustAliasMem, ///< Nonvolatile load/Store instructions that must alias. 68 Artificial, ///< Arbitrary strong DAG edge (no real dependence). 69 Weak, ///< Arbitrary weak DAG edge. 70 Cluster ///< Weak DAG edge linking a chain of clustered instrs. 71 }; 72 73 private: 74 /// Dep - A pointer to the depending/depended-on SUnit, and an enum 75 /// indicating the kind of the dependency. 76 PointerIntPair<SUnit *, 2, Kind> Dep; 77 78 /// Contents - A union discriminated by the dependence kind. 79 union { 80 /// Reg - For Data, Anti, and Output dependencies, the associated 81 /// register. For Data dependencies that don't currently have a register 82 /// assigned, this is set to zero. 83 unsigned Reg; 84 85 /// Order - Additional information about Order dependencies. 86 unsigned OrdKind; // enum OrderKind 87 } Contents; 88 89 /// Latency - The time associated with this edge. Often this is just 90 /// the value of the Latency field of the predecessor, however advanced 91 /// models may provide additional information about specific edges. 92 unsigned Latency; 93 94 public: 95 /// SDep - Construct a null SDep. This is only for use by container 96 /// classes which require default constructors. SUnits may not 97 /// have null SDep edges. SDep()98 SDep() : Dep(nullptr, Data) {} 99 100 /// SDep - Construct an SDep with the specified values. SDep(SUnit * S,Kind kind,unsigned Reg)101 SDep(SUnit *S, Kind kind, unsigned Reg) 102 : Dep(S, kind), Contents() { 103 switch (kind) { 104 default: 105 llvm_unreachable("Reg given for non-register dependence!"); 106 case Anti: 107 case Output: 108 assert(Reg != 0 && 109 "SDep::Anti and SDep::Output must use a non-zero Reg!"); 110 Contents.Reg = Reg; 111 Latency = 0; 112 break; 113 case Data: 114 Contents.Reg = Reg; 115 Latency = 1; 116 break; 117 } 118 } SDep(SUnit * S,OrderKind kind)119 SDep(SUnit *S, OrderKind kind) 120 : Dep(S, Order), Contents(), Latency(0) { 121 Contents.OrdKind = kind; 122 } 123 124 /// Return true if the specified SDep is equivalent except for latency. overlaps(const SDep & Other)125 bool overlaps(const SDep &Other) const { 126 if (Dep != Other.Dep) return false; 127 switch (Dep.getInt()) { 128 case Data: 129 case Anti: 130 case Output: 131 return Contents.Reg == Other.Contents.Reg; 132 case Order: 133 return Contents.OrdKind == Other.Contents.OrdKind; 134 } 135 llvm_unreachable("Invalid dependency kind!"); 136 } 137 138 bool operator==(const SDep &Other) const { 139 return overlaps(Other) && Latency == Other.Latency; 140 } 141 142 bool operator!=(const SDep &Other) const { 143 return !operator==(Other); 144 } 145 146 /// getLatency - Return the latency value for this edge, which roughly 147 /// means the minimum number of cycles that must elapse between the 148 /// predecessor and the successor, given that they have this edge 149 /// between them. getLatency()150 unsigned getLatency() const { 151 return Latency; 152 } 153 154 /// setLatency - Set the latency for this edge. setLatency(unsigned Lat)155 void setLatency(unsigned Lat) { 156 Latency = Lat; 157 } 158 159 //// getSUnit - Return the SUnit to which this edge points. getSUnit()160 SUnit *getSUnit() const { 161 return Dep.getPointer(); 162 } 163 164 //// setSUnit - Assign the SUnit to which this edge points. setSUnit(SUnit * SU)165 void setSUnit(SUnit *SU) { 166 Dep.setPointer(SU); 167 } 168 169 /// getKind - Return an enum value representing the kind of the dependence. getKind()170 Kind getKind() const { 171 return Dep.getInt(); 172 } 173 174 /// isCtrl - Shorthand for getKind() != SDep::Data. isCtrl()175 bool isCtrl() const { 176 return getKind() != Data; 177 } 178 179 /// isNormalMemory - Test if this is an Order dependence between two 180 /// memory accesses where both sides of the dependence access memory 181 /// in non-volatile and fully modeled ways. isNormalMemory()182 bool isNormalMemory() const { 183 return getKind() == Order && (Contents.OrdKind == MayAliasMem 184 || Contents.OrdKind == MustAliasMem); 185 } 186 187 /// isBarrier - Test if this is an Order dependence that is marked 188 /// as a barrier. isBarrier()189 bool isBarrier() const { 190 return getKind() == Order && Contents.OrdKind == Barrier; 191 } 192 193 /// isMustAlias - Test if this is an Order dependence that is marked 194 /// as "must alias", meaning that the SUnits at either end of the edge 195 /// have a memory dependence on a known memory location. isMustAlias()196 bool isMustAlias() const { 197 return getKind() == Order && Contents.OrdKind == MustAliasMem; 198 } 199 200 /// isWeak - Test if this a weak dependence. Weak dependencies are 201 /// considered DAG edges for height computation and other heuristics, but do 202 /// not force ordering. Breaking a weak edge may require the scheduler to 203 /// compensate, for example by inserting a copy. isWeak()204 bool isWeak() const { 205 return getKind() == Order && Contents.OrdKind >= Weak; 206 } 207 208 /// isArtificial - Test if this is an Order dependence that is marked 209 /// as "artificial", meaning it isn't necessary for correctness. isArtificial()210 bool isArtificial() const { 211 return getKind() == Order && Contents.OrdKind == Artificial; 212 } 213 214 /// isCluster - Test if this is an Order dependence that is marked 215 /// as "cluster", meaning it is artificial and wants to be adjacent. isCluster()216 bool isCluster() const { 217 return getKind() == Order && Contents.OrdKind == Cluster; 218 } 219 220 /// isAssignedRegDep - Test if this is a Data dependence that is 221 /// associated with a register. isAssignedRegDep()222 bool isAssignedRegDep() const { 223 return getKind() == Data && Contents.Reg != 0; 224 } 225 226 /// getReg - Return the register associated with this edge. This is 227 /// only valid on Data, Anti, and Output edges. On Data edges, this 228 /// value may be zero, meaning there is no associated register. getReg()229 unsigned getReg() const { 230 assert((getKind() == Data || getKind() == Anti || getKind() == Output) && 231 "getReg called on non-register dependence edge!"); 232 return Contents.Reg; 233 } 234 235 /// setReg - Assign the associated register for this edge. This is 236 /// only valid on Data, Anti, and Output edges. On Anti and Output 237 /// edges, this value must not be zero. On Data edges, the value may 238 /// be zero, which would mean that no specific register is associated 239 /// with this edge. setReg(unsigned Reg)240 void setReg(unsigned Reg) { 241 assert((getKind() == Data || getKind() == Anti || getKind() == Output) && 242 "setReg called on non-register dependence edge!"); 243 assert((getKind() != Anti || Reg != 0) && 244 "SDep::Anti edge cannot use the zero register!"); 245 assert((getKind() != Output || Reg != 0) && 246 "SDep::Output edge cannot use the zero register!"); 247 Contents.Reg = Reg; 248 } 249 }; 250 251 template <> 252 struct isPodLike<SDep> { static const bool value = true; }; 253 254 /// SUnit - Scheduling unit. This is a node in the scheduling DAG. 255 class SUnit { 256 private: 257 enum : unsigned { BoundaryID = ~0u }; 258 259 SDNode *Node; // Representative node. 260 MachineInstr *Instr; // Alternatively, a MachineInstr. 261 public: 262 SUnit *OrigNode; // If not this, the node from which 263 // this node was cloned. 264 // (SD scheduling only) 265 266 const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass. 267 268 // Preds/Succs - The SUnits before/after us in the graph. 269 SmallVector<SDep, 4> Preds; // All sunit predecessors. 270 SmallVector<SDep, 4> Succs; // All sunit successors. 271 272 typedef SmallVectorImpl<SDep>::iterator pred_iterator; 273 typedef SmallVectorImpl<SDep>::iterator succ_iterator; 274 typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator; 275 typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator; 276 277 unsigned NodeNum; // Entry # of node in the node vector. 278 unsigned NodeQueueId; // Queue id of node. 279 unsigned NumPreds; // # of SDep::Data preds. 280 unsigned NumSuccs; // # of SDep::Data sucss. 281 unsigned NumPredsLeft; // # of preds not scheduled. 282 unsigned NumSuccsLeft; // # of succs not scheduled. 283 unsigned WeakPredsLeft; // # of weak preds not scheduled. 284 unsigned WeakSuccsLeft; // # of weak succs not scheduled. 285 unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use. 286 unsigned short Latency; // Node latency. 287 bool isVRegCycle : 1; // May use and def the same vreg. 288 bool isCall : 1; // Is a function call. 289 bool isCallOp : 1; // Is a function call operand. 290 bool isTwoAddress : 1; // Is a two-address instruction. 291 bool isCommutable : 1; // Is a commutable instruction. 292 bool hasPhysRegUses : 1; // Has physreg uses. 293 bool hasPhysRegDefs : 1; // Has physreg defs that are being used. 294 bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not. 295 bool isPending : 1; // True once pending. 296 bool isAvailable : 1; // True once available. 297 bool isScheduled : 1; // True once scheduled. 298 bool isScheduleHigh : 1; // True if preferable to schedule high. 299 bool isScheduleLow : 1; // True if preferable to schedule low. 300 bool isCloned : 1; // True if this node has been cloned. 301 bool isUnbuffered : 1; // Uses an unbuffered resource. 302 bool hasReservedResource : 1; // Uses a reserved resource. 303 Sched::Preference SchedulingPref; // Scheduling preference. 304 305 private: 306 bool isDepthCurrent : 1; // True if Depth is current. 307 bool isHeightCurrent : 1; // True if Height is current. 308 unsigned Depth; // Node depth. 309 unsigned Height; // Node height. 310 public: 311 unsigned TopReadyCycle; // Cycle relative to start when node is ready. 312 unsigned BotReadyCycle; // Cycle relative to end when node is ready. 313 314 const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null. 315 const TargetRegisterClass *CopySrcRC; 316 317 /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent 318 /// an SDNode and any nodes flagged to it. 319 SUnit(SDNode *node, unsigned nodenum) 320 : Node(node), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr), 321 NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0), 322 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), 323 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false), 324 isCallOp(false), isTwoAddress(false), isCommutable(false), 325 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false), 326 isPending(false), isAvailable(false), isScheduled(false), 327 isScheduleHigh(false), isScheduleLow(false), isCloned(false), 328 isUnbuffered(false), hasReservedResource(false), 329 SchedulingPref(Sched::None), isDepthCurrent(false), 330 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0), 331 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {} 332 333 /// SUnit - Construct an SUnit for post-regalloc scheduling to represent 334 /// a MachineInstr. 335 SUnit(MachineInstr *instr, unsigned nodenum) 336 : Node(nullptr), Instr(instr), OrigNode(nullptr), SchedClass(nullptr), 337 NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0), 338 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), 339 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false), 340 isCallOp(false), isTwoAddress(false), isCommutable(false), 341 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false), 342 isPending(false), isAvailable(false), isScheduled(false), 343 isScheduleHigh(false), isScheduleLow(false), isCloned(false), 344 isUnbuffered(false), hasReservedResource(false), 345 SchedulingPref(Sched::None), isDepthCurrent(false), 346 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0), 347 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {} 348 349 /// SUnit - Construct a placeholder SUnit. 350 SUnit() 351 : Node(nullptr), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr), 352 NodeNum(BoundaryID), NodeQueueId(0), NumPreds(0), NumSuccs(0), 353 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), 354 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false), 355 isCallOp(false), isTwoAddress(false), isCommutable(false), 356 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false), 357 isPending(false), isAvailable(false), isScheduled(false), 358 isScheduleHigh(false), isScheduleLow(false), isCloned(false), 359 isUnbuffered(false), hasReservedResource(false), 360 SchedulingPref(Sched::None), isDepthCurrent(false), 361 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0), 362 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {} 363 364 /// \brief Boundary nodes are placeholders for the boundary of the 365 /// scheduling region. 366 /// 367 /// BoundaryNodes can have DAG edges, including Data edges, but they do not 368 /// correspond to schedulable entities (e.g. instructions) and do not have a 369 /// valid ID. Consequently, always check for boundary nodes before accessing 370 /// an assoicative data structure keyed on node ID. 371 bool isBoundaryNode() const { return NodeNum == BoundaryID; }; 372 373 /// setNode - Assign the representative SDNode for this SUnit. 374 /// This may be used during pre-regalloc scheduling. 375 void setNode(SDNode *N) { 376 assert(!Instr && "Setting SDNode of SUnit with MachineInstr!"); 377 Node = N; 378 } 379 380 /// getNode - Return the representative SDNode for this SUnit. 381 /// This may be used during pre-regalloc scheduling. 382 SDNode *getNode() const { 383 assert(!Instr && "Reading SDNode of SUnit with MachineInstr!"); 384 return Node; 385 } 386 387 /// isInstr - Return true if this SUnit refers to a machine instruction as 388 /// opposed to an SDNode. 389 bool isInstr() const { return Instr; } 390 391 /// setInstr - Assign the instruction for the SUnit. 392 /// This may be used during post-regalloc scheduling. 393 void setInstr(MachineInstr *MI) { 394 assert(!Node && "Setting MachineInstr of SUnit with SDNode!"); 395 Instr = MI; 396 } 397 398 /// getInstr - Return the representative MachineInstr for this SUnit. 399 /// This may be used during post-regalloc scheduling. 400 MachineInstr *getInstr() const { 401 assert(!Node && "Reading MachineInstr of SUnit with SDNode!"); 402 return Instr; 403 } 404 405 /// addPred - This adds the specified edge as a pred of the current node if 406 /// not already. It also adds the current node as a successor of the 407 /// specified node. 408 bool addPred(const SDep &D, bool Required = true); 409 410 /// removePred - This removes the specified edge as a pred of the current 411 /// node if it exists. It also removes the current node as a successor of 412 /// the specified node. 413 void removePred(const SDep &D); 414 415 /// getDepth - Return the depth of this node, which is the length of the 416 /// maximum path up to any node which has no predecessors. 417 unsigned getDepth() const { 418 if (!isDepthCurrent) 419 const_cast<SUnit *>(this)->ComputeDepth(); 420 return Depth; 421 } 422 423 /// getHeight - Return the height of this node, which is the length of the 424 /// maximum path down to any node which has no successors. 425 unsigned getHeight() const { 426 if (!isHeightCurrent) 427 const_cast<SUnit *>(this)->ComputeHeight(); 428 return Height; 429 } 430 431 /// setDepthToAtLeast - If NewDepth is greater than this node's 432 /// depth value, set it to be the new depth value. This also 433 /// recursively marks successor nodes dirty. 434 void setDepthToAtLeast(unsigned NewDepth); 435 436 /// setDepthToAtLeast - If NewDepth is greater than this node's 437 /// depth value, set it to be the new height value. This also 438 /// recursively marks predecessor nodes dirty. 439 void setHeightToAtLeast(unsigned NewHeight); 440 441 /// setDepthDirty - Set a flag in this node to indicate that its 442 /// stored Depth value will require recomputation the next time 443 /// getDepth() is called. 444 void setDepthDirty(); 445 446 /// setHeightDirty - Set a flag in this node to indicate that its 447 /// stored Height value will require recomputation the next time 448 /// getHeight() is called. 449 void setHeightDirty(); 450 451 /// isPred - Test if node N is a predecessor of this node. 452 bool isPred(SUnit *N) { 453 for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i) 454 if (Preds[i].getSUnit() == N) 455 return true; 456 return false; 457 } 458 459 /// isSucc - Test if node N is a successor of this node. 460 bool isSucc(SUnit *N) { 461 for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i) 462 if (Succs[i].getSUnit() == N) 463 return true; 464 return false; 465 } 466 467 bool isTopReady() const { 468 return NumPredsLeft == 0; 469 } 470 bool isBottomReady() const { 471 return NumSuccsLeft == 0; 472 } 473 474 /// \brief Order this node's predecessor edges such that the critical path 475 /// edge occurs first. 476 void biasCriticalPath(); 477 478 void dump(const ScheduleDAG *G) const; 479 void dumpAll(const ScheduleDAG *G) const; 480 void print(raw_ostream &O, const ScheduleDAG *G) const; 481 482 private: 483 void ComputeDepth(); 484 void ComputeHeight(); 485 }; 486 487 //===--------------------------------------------------------------------===// 488 /// SchedulingPriorityQueue - This interface is used to plug different 489 /// priorities computation algorithms into the list scheduler. It implements 490 /// the interface of a standard priority queue, where nodes are inserted in 491 /// arbitrary order and returned in priority order. The computation of the 492 /// priority and the representation of the queue are totally up to the 493 /// implementation to decide. 494 /// 495 class SchedulingPriorityQueue { 496 virtual void anchor(); 497 unsigned CurCycle; 498 bool HasReadyFilter; 499 public: 500 SchedulingPriorityQueue(bool rf = false): 501 CurCycle(0), HasReadyFilter(rf) {} 502 virtual ~SchedulingPriorityQueue() {} 503 504 virtual bool isBottomUp() const = 0; 505 506 virtual void initNodes(std::vector<SUnit> &SUnits) = 0; 507 virtual void addNode(const SUnit *SU) = 0; 508 virtual void updateNode(const SUnit *SU) = 0; 509 virtual void releaseState() = 0; 510 511 virtual bool empty() const = 0; 512 513 bool hasReadyFilter() const { return HasReadyFilter; } 514 515 virtual bool tracksRegPressure() const { return false; } 516 517 virtual bool isReady(SUnit *) const { 518 assert(!HasReadyFilter && "The ready filter must override isReady()"); 519 return true; 520 } 521 virtual void push(SUnit *U) = 0; 522 523 void push_all(const std::vector<SUnit *> &Nodes) { 524 for (std::vector<SUnit *>::const_iterator I = Nodes.begin(), 525 E = Nodes.end(); I != E; ++I) 526 push(*I); 527 } 528 529 virtual SUnit *pop() = 0; 530 531 virtual void remove(SUnit *SU) = 0; 532 533 virtual void dump(ScheduleDAG *) const {} 534 535 /// scheduledNode - As each node is scheduled, this method is invoked. This 536 /// allows the priority function to adjust the priority of related 537 /// unscheduled nodes, for example. 538 /// 539 virtual void scheduledNode(SUnit *) {} 540 541 virtual void unscheduledNode(SUnit *) {} 542 543 void setCurCycle(unsigned Cycle) { 544 CurCycle = Cycle; 545 } 546 547 unsigned getCurCycle() const { 548 return CurCycle; 549 } 550 }; 551 552 class ScheduleDAG { 553 public: 554 const TargetMachine &TM; // Target processor 555 const TargetInstrInfo *TII; // Target instruction information 556 const TargetRegisterInfo *TRI; // Target processor register info 557 MachineFunction &MF; // Machine function 558 MachineRegisterInfo &MRI; // Virtual/real register map 559 std::vector<SUnit> SUnits; // The scheduling units. 560 SUnit EntrySU; // Special node for the region entry. 561 SUnit ExitSU; // Special node for the region exit. 562 563 #ifdef NDEBUG 564 static const bool StressSched = false; 565 #else 566 bool StressSched; 567 #endif 568 569 explicit ScheduleDAG(MachineFunction &mf); 570 571 virtual ~ScheduleDAG(); 572 573 /// clearDAG - clear the DAG state (between regions). 574 void clearDAG(); 575 576 /// getInstrDesc - Return the MCInstrDesc of this SUnit. 577 /// Return NULL for SDNodes without a machine opcode. 578 const MCInstrDesc *getInstrDesc(const SUnit *SU) const { 579 if (SU->isInstr()) return &SU->getInstr()->getDesc(); 580 return getNodeDesc(SU->getNode()); 581 } 582 583 /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered 584 /// using 'dot'. 585 /// 586 virtual void viewGraph(const Twine &Name, const Twine &Title); 587 virtual void viewGraph(); 588 589 virtual void dumpNode(const SUnit *SU) const = 0; 590 591 /// getGraphNodeLabel - Return a label for an SUnit node in a visualization 592 /// of the ScheduleDAG. 593 virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0; 594 595 /// getDAGLabel - Return a label for the region of code covered by the DAG. 596 virtual std::string getDAGName() const = 0; 597 598 /// addCustomGraphFeatures - Add custom features for a visualization of 599 /// the ScheduleDAG. 600 virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {} 601 602 #ifndef NDEBUG 603 /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that 604 /// their state is consistent. Return the number of scheduled SUnits. 605 unsigned VerifyScheduledDAG(bool isBottomUp); 606 #endif 607 608 private: 609 // Return the MCInstrDesc of this SDNode or NULL. 610 const MCInstrDesc *getNodeDesc(const SDNode *Node) const; 611 }; 612 613 class SUnitIterator : public std::iterator<std::forward_iterator_tag, 614 SUnit, ptrdiff_t> { 615 SUnit *Node; 616 unsigned Operand; 617 618 SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {} 619 public: 620 bool operator==(const SUnitIterator& x) const { 621 return Operand == x.Operand; 622 } 623 bool operator!=(const SUnitIterator& x) const { return !operator==(x); } 624 625 const SUnitIterator &operator=(const SUnitIterator &I) { 626 assert(I.Node==Node && "Cannot assign iterators to two different nodes!"); 627 Operand = I.Operand; 628 return *this; 629 } 630 631 pointer operator*() const { 632 return Node->Preds[Operand].getSUnit(); 633 } 634 pointer operator->() const { return operator*(); } 635 636 SUnitIterator& operator++() { // Preincrement 637 ++Operand; 638 return *this; 639 } 640 SUnitIterator operator++(int) { // Postincrement 641 SUnitIterator tmp = *this; ++*this; return tmp; 642 } 643 644 static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); } 645 static SUnitIterator end (SUnit *N) { 646 return SUnitIterator(N, (unsigned)N->Preds.size()); 647 } 648 649 unsigned getOperand() const { return Operand; } 650 const SUnit *getNode() const { return Node; } 651 /// isCtrlDep - Test if this is not an SDep::Data dependence. 652 bool isCtrlDep() const { 653 return getSDep().isCtrl(); 654 } 655 bool isArtificialDep() const { 656 return getSDep().isArtificial(); 657 } 658 const SDep &getSDep() const { 659 return Node->Preds[Operand]; 660 } 661 }; 662 663 template <> struct GraphTraits<SUnit*> { 664 typedef SUnit NodeType; 665 typedef SUnitIterator ChildIteratorType; 666 static inline NodeType *getEntryNode(SUnit *N) { return N; } 667 static inline ChildIteratorType child_begin(NodeType *N) { 668 return SUnitIterator::begin(N); 669 } 670 static inline ChildIteratorType child_end(NodeType *N) { 671 return SUnitIterator::end(N); 672 } 673 }; 674 675 template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> { 676 typedef std::vector<SUnit>::iterator nodes_iterator; 677 static nodes_iterator nodes_begin(ScheduleDAG *G) { 678 return G->SUnits.begin(); 679 } 680 static nodes_iterator nodes_end(ScheduleDAG *G) { 681 return G->SUnits.end(); 682 } 683 }; 684 685 /// ScheduleDAGTopologicalSort is a class that computes a topological 686 /// ordering for SUnits and provides methods for dynamically updating 687 /// the ordering as new edges are added. 688 /// 689 /// This allows a very fast implementation of IsReachable, for example. 690 /// 691 class ScheduleDAGTopologicalSort { 692 /// SUnits - A reference to the ScheduleDAG's SUnits. 693 std::vector<SUnit> &SUnits; 694 SUnit *ExitSU; 695 696 /// Index2Node - Maps topological index to the node number. 697 std::vector<int> Index2Node; 698 /// Node2Index - Maps the node number to its topological index. 699 std::vector<int> Node2Index; 700 /// Visited - a set of nodes visited during a DFS traversal. 701 BitVector Visited; 702 703 /// DFS - make a DFS traversal and mark all nodes affected by the 704 /// edge insertion. These nodes will later get new topological indexes 705 /// by means of the Shift method. 706 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop); 707 708 /// Shift - reassign topological indexes for the nodes in the DAG 709 /// to preserve the topological ordering. 710 void Shift(BitVector& Visited, int LowerBound, int UpperBound); 711 712 /// Allocate - assign the topological index to the node n. 713 void Allocate(int n, int index); 714 715 public: 716 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU); 717 718 /// InitDAGTopologicalSorting - create the initial topological 719 /// ordering from the DAG to be scheduled. 720 void InitDAGTopologicalSorting(); 721 722 /// IsReachable - Checks if SU is reachable from TargetSU. 723 bool IsReachable(const SUnit *SU, const SUnit *TargetSU); 724 725 /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle. 726 bool WillCreateCycle(SUnit *TargetSU, SUnit *SU); 727 728 /// AddPred - Updates the topological ordering to accommodate an edge 729 /// to be added from SUnit X to SUnit Y. 730 void AddPred(SUnit *Y, SUnit *X); 731 732 /// RemovePred - Updates the topological ordering to accommodate an 733 /// an edge to be removed from the specified node N from the predecessors 734 /// of the current node M. 735 void RemovePred(SUnit *M, SUnit *N); 736 737 typedef std::vector<int>::iterator iterator; 738 typedef std::vector<int>::const_iterator const_iterator; 739 iterator begin() { return Index2Node.begin(); } 740 const_iterator begin() const { return Index2Node.begin(); } 741 iterator end() { return Index2Node.end(); } 742 const_iterator end() const { return Index2Node.end(); } 743 744 typedef std::vector<int>::reverse_iterator reverse_iterator; 745 typedef std::vector<int>::const_reverse_iterator const_reverse_iterator; 746 reverse_iterator rbegin() { return Index2Node.rbegin(); } 747 const_reverse_iterator rbegin() const { return Index2Node.rbegin(); } 748 reverse_iterator rend() { return Index2Node.rend(); } 749 const_reverse_iterator rend() const { return Index2Node.rend(); } 750 }; 751 } 752 753 #endif 754