/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIMachineScheduler.cpp | 198 NodeNum2Index[SU->NodeNum] = SUnits.size(); in addUnit() 199 SUnits.push_back(SU); in addUnit() 293 for (SUnit* SU : SUnits) { in fastSchedule() 411 for (SUnit* SU : SUnits) { in schedule() 429 assert(SUnits.size() == ScheduledSUnits.size() && in schedule() 431 for (SUnit* SU : SUnits) { in schedule() 441 for (SUnit* SU : SUnits) { in undoSchedule() 448 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); in undoSchedule() 487 if (SuccSU->NodeNum >= DAG->SUnits.size()) in releaseSuccessors() 513 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); in nodeScheduled() [all …]
|
D | GCNMinRegStrategy.cpp | 66 void initNumPreds(const decltype(ScheduleDAG::SUnits) &SUnits); 86 void GCNMinRegScheduler::initNumPreds(const decltype(ScheduleDAG::SUnits) &SUnits) { in initNumPreds() argument 87 NumPreds.resize(SUnits.size()); in initNumPreds() 88 for (unsigned I = 0; I < SUnits.size(); ++I) in initNumPreds() 89 NumPreds[I] = SUnits[I].NumPredsLeft; in initNumPreds() 234 const auto &SUnits = DAG.SUnits; in schedule() local 236 Schedule.reserve(SUnits.size()); in schedule() 238 initNumPreds(SUnits); in schedule() 271 assert(SUnits.size() == Schedule.size()); in schedule()
|
D | GCNILPSched.cpp | 293 auto &SUnits = const_cast<ScheduleDAG&>(DAG).SUnits; in schedule() local 296 SUSavedCopy.resize(SUnits.size()); in schedule() 300 for (const SUnit &SU : SUnits) in schedule() 303 SUNumbers.assign(SUnits.size(), 0); in schedule() 304 for (const SUnit &SU : SUnits) in schedule() 314 Schedule.reserve(SUnits.size()); in schedule() 346 assert(SUnits.size() == Schedule.size()); in schedule() 351 for (auto &SU : SUnits) in schedule()
|
D | AMDGPUSubtarget.cpp | 768 for (SUnit &SU : DAG->SUnits) { in apply() 889 if (!TSchedModel || DAG->SUnits.empty()) in apply() 896 auto LastSALU = DAG->SUnits.begin(); in apply() 897 auto E = DAG->SUnits.end(); in apply() 899 for (SUnit &SU : DAG->SUnits) { in apply()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | LatencyPriorityQueue.h | 34 std::vector<SUnit> *SUnits; variable 53 SUnits = &sunits; in initNodes() 54 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in initNodes() 58 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in addNode() 65 SUnits = nullptr; in releaseState() 69 assert(NodeNum < (*SUnits).size()); in getLatency() 70 return (*SUnits)[NodeNum].getHeight(); in getLatency()
|
D | ResourcePriorityQueue.h | 39 std::vector<SUnit> *SUnits; variable 84 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in addNode() 90 SUnits = nullptr; in releaseState() 94 assert(NodeNum < (*SUnits).size()); in getLatency() 95 return (*SUnits)[NodeNum].getHeight(); in getLatency()
|
D | ScheduleDAGInstrs.h | 386 const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0]; in newSUnit() 388 SUnits.emplace_back(MI, (unsigned)SUnits.size()); in newSUnit() 389 assert((Addr == nullptr || Addr == &SUnits[0]) && in newSUnit() 391 return &SUnits.back(); in newSUnit()
|
D | ScheduleDAG.h | 509 virtual void initNodes(std::vector<SUnit> &SUnits) = 0; 562 std::vector<SUnit> SUnits; ///< The scheduling units. variable 678 return nodes_iterator(G->SUnits.begin()); 681 return nodes_iterator(G->SUnits.end()); 691 std::vector<SUnit> &SUnits; 725 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
|
D | MachinePipeliner.h | 161 std::vector<SUnit> &SUnits; variable 173 : SUnits(SUs), Blocked(SUs.size()), B(SUs.size()), AdjK(SUs.size()) { in Circuits() 186 B.assign(SUnits.size(), SmallPtrSet<SUnit *, 4>()); in reset() 203 RegClassInfo(rci), II_setByPragma(II), Topo(SUnits, &ExitSU) { in SwingSchedulerDAG()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonSubtarget.cpp | 129 for (SUnit &SU : DAG->SUnits) { in apply() 142 for (SUnit &SU : DAG->SUnits) { in apply() 205 for (unsigned su = 0, e = DAG->SUnits.size(); su != e; ++su) { in apply() 207 if (DAG->SUnits[su].getInstr()->isCall()) in apply() 208 LastSequentialCall = &DAG->SUnits[su]; in apply() 210 else if (DAG->SUnits[su].getInstr()->isCompare() && LastSequentialCall) in apply() 211 DAG->addEdge(&DAG->SUnits[su], SDep(LastSequentialCall, SDep::Barrier)); in apply() 214 shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1])) in apply() 215 DAG->addEdge(&DAG->SUnits[su], SDep(&DAG->SUnits[su-1], SDep::Barrier)); in apply() 231 const MachineInstr *MI = DAG->SUnits[su].getInstr(); in apply() [all …]
|
D | HexagonMachineScheduler.cpp | 211 for (unsigned su = 0, e = SUnits.size(); su != e; in schedule() 212 ++su) if (SUnits[su].getHeight() > maxH) maxH = in schedule() 213 SUnits[su].getHeight(); in schedule() 216 for (unsigned su = 0, e = SUnits.size(); su != e; in schedule() 217 ++su) if (SUnits[su].getDepth() > maxD) maxD = in schedule() 218 SUnits[su].getDepth(); in schedule()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
D | ScheduleDAGVLIW.cpp | 100 AvailableQueue->initNodes(SUnits); in Schedule() 173 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { in listScheduleTopDown() 175 if (SUnits[i].Preds.empty()) { in listScheduleTopDown() 176 AvailableQueue->push(&SUnits[i]); in listScheduleTopDown() 177 SUnits[i].isAvailable = true; in listScheduleTopDown() 184 Sequence.reserve(SUnits.size()); in listScheduleTopDown()
|
D | ScheduleDAGSDNodes.cpp | 71 if (!SUnits.empty()) in newSUnit() 72 Addr = &SUnits[0]; in newSUnit() 74 SUnits.emplace_back(N, (unsigned)SUnits.size()); in newSUnit() 75 assert((Addr == nullptr || Addr == &SUnits[0]) && in newSUnit() 77 SUnits.back().OrigNode = &SUnits.back(); in newSUnit() 78 SUnit *SU = &SUnits.back(); in newSUnit() 337 SUnits.reserve(NumNodes * 2); in BuildSchedUnits() 430 SUnit *SrcSU = &SUnits[SrcN->getNodeId()]; in BuildSchedUnits() 443 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { in AddSchedEdges() 444 SUnit *SU = &SUnits[su]; in AddSchedEdges() [all …]
|
D | ScheduleDAGRRList.cpp | 194 Topo(SUnits, nullptr) { in ScheduleDAGRRList() 278 unsigned NumSUnits = SUnits.size(); in CreateNewSUnit() 288 unsigned NumSUnits = SUnits.size(); in CreateClone() 376 AvailableQueue->initNodes(SUnits); in Schedule() 587 SUnit *Def = &SUnits[N->getNodeId()]; in ReleasePredecessors() 1005 LoadSU = &SUnits[LoadNode->getNodeId()]; in TryUnfoldSU() 1023 NewSU = &SUnits[N->getNodeId()]; in TryUnfoldSU() 1602 if (!SUnits.empty()) { in ListScheduleBottomUp() 1603 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; in ListScheduleBottomUp() 1611 Sequence.reserve(SUnits.size()); in ListScheduleBottomUp() [all …]
|
D | ResourcePriorityQueue.cpp | 164 SUnits = &sunits; in initNodes() 165 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in initNodes() 167 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { in initNodes() 168 SUnit *SU = &(*SUnits)[i]; in initNodes()
|
D | ScheduleDAGFast.cpp | 270 LoadSU = &SUnits[LoadNode->getNodeId()]; in CopyAndMoveSuccessors() 531 if (!SUnits.empty()) { in ListScheduleBottomUp() 532 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; in ListScheduleBottomUp() 542 Sequence.reserve(SUnits.size()); in ListScheduleBottomUp()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | ScheduleDAG.cpp | 65 SUnits.clear(); in clearDAG() 393 for (const SUnit &SUnit : SUnits) { in VerifyScheduledDAG() 434 return SUnits.size() - DeadNodes; in VerifyScheduledDAG() 470 unsigned DAGSize = SUnits.size(); in InitDAGTopologicalSorting() 480 for (SUnit &SU : SUnits) { in InitDAGTopologicalSorting() 514 for (SUnit &SU : SUnits) { in InitDAGTopologicalSorting() 573 WorkList.reserve(SUnits.size()); in DFS() 613 WorkList.reserve(SUnits.size()); in GetSubGraph() 646 VisitedBack.resize(SUnits.size()); in GetSubGraph() 741 : SUnits(sunits), ExitSU(exitsu) {} in ScheduleDAGTopologicalSort()
|
D | MachinePipeliner.cpp | 664 for (auto &SU : SUnits) { in addLoopCarriedDependences() 760 for (SUnit &I : SUnits) { in updatePhiDependences() 843 for (SUnit &I : SUnits) { in changeDependences() 1113 static void swapAntiDependences(std::vector<SUnit> &SUnits) { in swapAntiDependences() argument 1115 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { in swapAntiDependences() 1116 SUnit *SU = &SUnits[i]; in swapAntiDependences() 1143 BitVector Added(SUnits.size()); in createAdjacencyStructure() 1145 for (int i = 0, e = SUnits.size(); i != e; ++i) { in createAdjacencyStructure() 1148 for (auto &SI : SUnits[i].Succs) { in createAdjacencyStructure() 1174 for (auto &PI : SUnits[i].Preds) { in createAdjacencyStructure() [all …]
|
D | PostRASchedulerList.cpp | 397 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, in schedule() 419 AvailableQueue.initNodes(SUnits); in schedule() 536 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { in ListScheduleTopDown() 538 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) { in ListScheduleTopDown() 539 AvailableQueue.push(&SUnits[i]); in ListScheduleTopDown() 540 SUnits[i].isAvailable = true; in ListScheduleTopDown() 551 Sequence.reserve(SUnits.size()); in ListScheduleTopDown()
|
D | MacroFusion.cpp | 125 for (SUnit &SU : DAG.SUnits) { in fuseInstructionPair() 158 for (SUnit &ISU : DAG->SUnits) in apply()
|
D | CriticalAntiDepBreaker.cpp | 447 BreakAntiDependencies(const std::vector<SUnit> &SUnits, in BreakAntiDependencies() argument 454 if (SUnits.empty()) return 0; in BreakAntiDependencies() 464 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { in BreakAntiDependencies() 465 const SUnit *SU = &SUnits[i]; in BreakAntiDependencies()
|
D | AggressiveAntiDepBreaker.cpp | 754 const std::vector<SUnit> &SUnits, in BreakAntiDependencies() argument 766 if (SUnits.empty()) return 0; in BreakAntiDependencies() 773 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { in BreakAntiDependencies() 774 const SUnit *SU = &SUnits[i]; in BreakAntiDependencies() 785 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { in BreakAntiDependencies() 786 const SUnit *SU = &SUnits[i]; in BreakAntiDependencies()
|
D | ScheduleDAGInstrs.cpp | 116 … Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) { in ScheduleDAGInstrs() 568 SUnits.reserve(NumRegionInstrs); in initSUnits() 746 PDiffs->init(SUnits.size()); in buildSchedGraph() 1065 SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)]; in reduceHugeMemNodeMaps() 1171 for (const SUnit &SU : SUnits) in dump() 1447 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { in compute() argument 1452 for (const SUnit &SU : SUnits) { in compute()
|
D | AntiDepBreaker.h | 43 virtual unsigned BreakAntiDependencies(const std::vector<SUnit> &SUnits,
|
D | CriticalAntiDepBreaker.h | 82 unsigned BreakAntiDependencies(const std::vector<SUnit> &SUnits,
|