• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #if TARGAARCH64
17 #include "aarch64_schedule.h"
18 #elif defined(TARGRISCV64) && TARGRISCV64
19 #include "riscv64_schedule.h"
20 #endif
21 #if defined(TARGARM32) && TARGARM32
22 #include "arm32_schedule.h"
23 #endif
24 #include "cg.h"
25 #include "optimize_common.h"
26 
27 #undef PRESCHED_DEBUG
28 
29 namespace maplebe {
30 /* pressure standard value; pressure under this value will not lead to spill operation */
31 static constexpr int PRESSURE_STANDARD = 27;
32 /* optimistic scheduling option */
33 static constexpr bool OPTIMISTIC_SCHEDULING = false;
34 /* brute maximum count limit option */
35 static constexpr bool BRUTE_MAXIMUM_LIMIT = true;
36 /* brute maximum count */
37 static constexpr int SCHEDULING_MAXIMUM_COUNT = 20000;
38 
39 /* ---- RegPressureSchedule function ---- */
InitBBInfo(BB & b,MemPool & memPool,const MapleVector<DepNode * > & nodes)40 void RegPressureSchedule::InitBBInfo(BB &b, MemPool &memPool, const MapleVector<DepNode *> &nodes)
41 {
42     bb = &b;
43     liveReg.clear();
44     scheduledNode.clear();
45     readyList.clear();
46     maxPriority = 0;
47     maxPressure = memPool.NewArray<int32>(RegPressure::GetMaxRegClassNum());
48     curPressure = memPool.NewArray<int32>(RegPressure::GetMaxRegClassNum());
49     physicalRegNum = memPool.NewArray<int32>(RegPressure::GetMaxRegClassNum());
50     for (auto node : nodes) {
51         node->SetState(kNormal);
52     }
53 }
54 
55 /* return register type according to register number */
GetRegisterType(regno_t reg) const56 RegType RegPressureSchedule::GetRegisterType(regno_t reg) const
57 {
58     return cgFunc.GetRegisterType(reg);
59 }
60 
61 /* Get amount of every physical register */
BuildPhyRegInfo(const std::vector<int32> & regNumVec) const62 void RegPressureSchedule::BuildPhyRegInfo(const std::vector<int32> &regNumVec) const
63 {
64     FOR_ALL_REGCLASS(i)
65     {
66         physicalRegNum[i] = regNumVec[i];
67     }
68 }
69 
70 /* Initialize pre-scheduling split point in BB */
InitPartialSplitters(const MapleVector<DepNode * > & nodes)71 void RegPressureSchedule::InitPartialSplitters(const MapleVector<DepNode *> &nodes)
72 {
73     bool addFirstAndLastNodeIndex = false;
74     constexpr uint32 kSecondLastNodeIndexFromBack = 2;
75     constexpr uint32 kLastNodeIndexFromBack = 1;
76     constexpr uint32 kFirstNodeIndex = 0;
77     constexpr uint32 kMinimumBBSize = 2;
78     /* Add split point for the last instruction in return BB */
79     if (bb->GetKind() == BB::kBBReturn && nodes.size() > kMinimumBBSize) {
80         splitterIndexes.emplace_back(nodes.size() - kSecondLastNodeIndexFromBack);
81         addFirstAndLastNodeIndex = true;
82     }
83     /* Add first and last node as split point if needed */
84     if (addFirstAndLastNodeIndex) {
85         CHECK_FATAL(nodes.size() > 0, "must not be zero");
86         splitterIndexes.emplace_back(nodes.size() - kLastNodeIndexFromBack);
87         splitterIndexes.emplace_back(kFirstNodeIndex);
88     }
89     std::sort(splitterIndexes.begin(), splitterIndexes.end(), std::less<int> {});
90 }
91 
92 /* initialize register pressure information according to bb's live-in data.
93  * initialize node's valid preds size.
94  */
Init(const MapleVector<DepNode * > & nodes)95 void RegPressureSchedule::Init(const MapleVector<DepNode *> &nodes)
96 {
97     readyList.clear();
98     scheduledNode.clear();
99     liveReg.clear();
100     liveInRegNO.clear();
101     liveOutRegNO.clear();
102     liveInRegNO = bb->GetLiveInRegNO();
103     liveOutRegNO = bb->GetLiveOutRegNO();
104 
105     FOR_ALL_REGCLASS(i)
106     {
107         curPressure[i] = 0;
108         maxPressure[i] = 0;
109     }
110 
111     for (auto *node : nodes) {
112         /* calculate the node uses'register pressure */
113         for (auto &useReg : node->GetUseRegnos()) {
114             CalculatePressure(*node, useReg, false);
115         }
116 
117         /* calculate the node defs'register pressure */
118         size_t i = 0;
119         for (auto &defReg : node->GetDefRegnos()) {
120             CalculatePressure(*node, defReg, true);
121             RegType regType = GetRegisterType(defReg);
122             /* if no use list, a register is only defined, not be used */
123             if (node->GetRegDefs(i) == nullptr && liveOutRegNO.find(defReg) == liveOutRegNO.end()) {
124                 node->IncDeadDefByIndex(regType);
125             }
126             ++i;
127         }
128         /* Calculate pred size of the node */
129         CalculatePredSize(*node);
130     }
131 
132     DepNode *firstNode = nodes.front();
133     readyList.emplace_back(firstNode);
134     firstNode->SetState(kReady);
135     scheduledNode.reserve(nodes.size());
136     constexpr size_t readyListSize = 10;
137     readyList.reserve(readyListSize);
138 }
139 
SortReadyList()140 void RegPressureSchedule::SortReadyList()
141 {
142     std::sort(readyList.begin(), readyList.end(), DepNodePriorityCmp);
143 }
144 
145 /* return true if nodes1 first. */
DepNodePriorityCmp(const DepNode * node1,const DepNode * node2)146 bool RegPressureSchedule::DepNodePriorityCmp(const DepNode *node1, const DepNode *node2)
147 {
148     CHECK_NULL_FATAL(node1);
149     CHECK_NULL_FATAL(node2);
150     int32 priority1 = node1->GetPriority();
151     int32 priority2 = node2->GetPriority();
152     if (priority1 != priority2) {
153         return priority1 > priority2;
154     }
155 
156     int32 numCall1 = node1->GetNumCall();
157     int32 numCall2 = node2->GetNumCall();
158     if (node1->GetIncPressure() && node2->GetIncPressure()) {
159         if (numCall1 != numCall2) {
160             return numCall1 > numCall2;
161         }
162     }
163 
164     int32 near1 = node1->GetNear();
165     int32 near2 = node1->GetNear();
166     int32 depthS1 = node1->GetMaxDepth() + near1;
167     int32 depthS2 = node2->GetMaxDepth() + near2;
168     if (depthS1 != depthS2) {
169         return depthS1 > depthS2;
170     }
171 
172     if (near1 != near2) {
173         return near1 > near2;
174     }
175 
176     if (numCall1 != numCall2) {
177         return numCall1 > numCall2;
178     }
179 
180     size_t succsSize1 = node1->GetSuccs().size();
181     size_t succsSize2 = node1->GetSuccs().size();
182     if (succsSize1 != succsSize2) {
183         return succsSize1 < succsSize2;
184     }
185 
186     if (node1->GetHasPreg() != node2->GetHasPreg()) {
187         return node1->GetHasPreg();
188     }
189 
190     return node1->GetInsn()->GetId() < node2->GetInsn()->GetId();
191 }
192 
193 /* set a node's incPressure is true, when a class register inscrease */
ReCalculateDepNodePressure(const DepNode & node) const194 void RegPressureSchedule::ReCalculateDepNodePressure(const DepNode &node) const
195 {
196     /* if there is a type of register pressure increases, set incPressure as true. */
197     auto &pressures = node.GetPressure();
198     node.SetIncPressure(pressures[kRegisterInt] > 0);
199 }
200 
201 /* calculate the maxDepth of every node in nodes. */
CalculateMaxDepth(const MapleVector<DepNode * > & nodes) const202 void RegPressureSchedule::CalculateMaxDepth(const MapleVector<DepNode *> &nodes) const
203 {
204     /* from the last node to first node. */
205     for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) {
206         /* init call count */
207         if ((*it)->GetInsn()->IsCall()) {
208             (*it)->SetNumCall(1);
209         }
210         /* traversing each successor of it. */
211         for (auto succ : (*it)->GetSuccs()) {
212             DepNode &to = succ->GetTo();
213             if ((*it)->GetMaxDepth() < (to.GetMaxDepth() + 1)) {
214                 (*it)->SetMaxDepth(to.GetMaxDepth() + 1);
215             }
216 
217             if (to.GetInsn()->IsCall() && ((*it)->GetNumCall() < to.GetNumCall() + 1)) {
218                 (*it)->SetNumCall(to.GetNumCall() + 1);
219             } else if ((*it)->GetNumCall() < to.GetNumCall()) {
220                 (*it)->SetNumCall(to.GetNumCall());
221             }
222         }
223     }
224 }
225 
226 /* calculate the near of every successor of the node. */
CalculateNear(const DepNode & node) const227 void RegPressureSchedule::CalculateNear(const DepNode &node) const
228 {
229     for (auto succ : node.GetSuccs()) {
230         DepNode &to = succ->GetTo();
231         if (succ->GetDepType() == kDependenceTypeTrue && to.GetNear() < node.GetNear() + 1) {
232             to.SetNear(node.GetNear() + 1);
233         }
234     }
235 }
236 
237 /* return true if it is last time using the regNO. */
IsLastUse(const DepNode & node,regno_t regNO)238 bool RegPressureSchedule::IsLastUse(const DepNode &node, regno_t regNO)
239 {
240     size_t i = 0;
241     for (auto reg : node.GetUseRegnos()) {
242         if (reg == regNO) {
243             break;
244         }
245         ++i;
246     }
247     RegList *regList = node.GetRegUses(i);
248 
249     /*
250      * except the node, if there are insn that has no scheduled in regNO's regList,
251      * then it is not the last time using the regNO, return false.
252      */
253     while (regList != nullptr) {
254         CHECK_NULL_FATAL(regList->insn);
255         DepNode *useNode = regList->insn->GetDepNode();
256         DEBUG_ASSERT(useNode != nullptr, "get depend node failed in RegPressureSchedule::IsLastUse");
257         if ((regList->insn != node.GetInsn()) && (useNode->GetState() != kScheduled)) {
258             return false;
259         }
260         regList = regList->next;
261     }
262     return true;
263 }
264 
CalculatePressure(const DepNode & node,regno_t reg,bool def) const265 void RegPressureSchedule::CalculatePressure(const DepNode &node, regno_t reg, bool def) const
266 {
267     RegType regType = GetRegisterType(reg);
268     /* if def a register, register pressure increase. */
269     if (def) {
270         node.IncPressureByIndex(regType);
271     } else {
272         /* if it is the last time using the reg, register pressure decrease. */
273         if (IsLastUse(node, reg)) {
274             node.DecPressureByIndex(regType);
275         }
276     }
277 }
278 
279 /* update live reg information. */
UpdateLiveReg(const DepNode & node,regno_t reg,bool def)280 void RegPressureSchedule::UpdateLiveReg(const DepNode &node, regno_t reg, bool def)
281 {
282     if (def) {
283         if (liveReg.find(reg) == liveReg.end()) {
284             (void)liveReg.insert(reg);
285 #ifdef PRESCHED_DEBUG
286             LogInfo::MapleLogger() << "Add new def R" << reg << " to live reg list \n";
287 #endif
288         }
289         /* if no use list, a register is only defined, not be used */
290         size_t i = 1;
291         for (auto defReg : node.GetDefRegnos()) {
292             if (defReg == reg) {
293                 break;
294             }
295             ++i;
296         }
297         if (node.GetRegDefs(i) == nullptr && liveOutRegNO.find(reg) == liveOutRegNO.end()) {
298 #ifdef PRESCHED_DEBUG
299             LogInfo::MapleLogger() << "Remove dead def " << reg << " from live reg list \n";
300 #endif
301             liveReg.erase(reg);
302         } else if (node.GetRegDefs(i) != nullptr) {
303 #ifdef PRESCHED_DEBUG
304             auto regList = node.GetRegDefs(i);
305             LogInfo::MapleLogger() << i << " Live def, dump use insn here \n";
306             while (regList != nullptr) {
307                 node.GetRegDefs(i)->insn->Dump();
308                 regList = regList->next;
309             }
310 #endif
311         }
312     } else {
313         if (IsLastUse(node, reg)) {
314             if (liveReg.find(reg) != liveReg.end() && liveOutRegNO.find(reg) == liveOutRegNO.end()) {
315 #ifdef PRESCHED_DEBUG
316                 LogInfo::MapleLogger() << "Remove last use R" << reg << " from live reg list\n";
317 #endif
318                 liveReg.erase(reg);
319             }
320         }
321     }
322 }
323 
324 /* update register pressure information. */
UpdateBBPressure(const DepNode & node)325 void RegPressureSchedule::UpdateBBPressure(const DepNode &node)
326 {
327     size_t idx = 0;
328     for (auto &reg : node.GetUseRegnos()) {
329 #ifdef PRESCHED_DEBUG
330         LogInfo::MapleLogger() << "Use Reg : R" << reg << "\n";
331         UpdateLiveReg(node, reg, false);
332         if (liveReg.find(reg) == liveReg.end()) {
333             ++idx;
334             continue;
335         }
336 #endif
337 
338         /* find all insn that use the reg, if a insn use the reg lastly, insn'pressure - 1 */
339         RegList *regList = node.GetRegUses(idx);
340 
341         while (regList != nullptr) {
342             CHECK_NULL_FATAL(regList->insn);
343             DepNode *useNode = regList->insn->GetDepNode();
344             if (useNode->GetState() == kScheduled) {
345                 regList = regList->next;
346                 continue;
347             }
348 
349             if (IsLastUse(*useNode, reg)) {
350                 RegType regType = GetRegisterType(reg);
351                 useNode->DecPressureByIndex(regType);
352             }
353             break;
354         }
355         ++idx;
356     }
357 
358 #ifdef PRESCHED_DEBUG
359     for (auto &defReg : node.GetDefRegnos()) {
360         UpdateLiveReg(node, defReg, true);
361     }
362 #endif
363 
364     const auto &pressures = node.GetPressure();
365     const auto &deadDefNum = node.GetDeadDefNum();
366 #ifdef PRESCHED_DEBUG
367     LogInfo::MapleLogger() << "\nnode's pressure: ";
368     for (auto pressure : pressures) {
369         LogInfo::MapleLogger() << pressure << " ";
370     }
371     LogInfo::MapleLogger() << "\n";
372 #endif
373 
374     FOR_ALL_REGCLASS(i)
375     {
376         curPressure[i] += pressures[i];
377         curPressure[i] -= deadDefNum[i];
378         if (curPressure[i] > maxPressure[i]) {
379             maxPressure[i] = curPressure[i];
380         }
381     }
382 }
383 
384 /* update node priority and try to update the priority of all node's ancestor. */
UpdatePriority(DepNode & node)385 void RegPressureSchedule::UpdatePriority(DepNode &node)
386 {
387     std::vector<DepNode *> workQueue;
388     workQueue.emplace_back(&node);
389     node.SetPriority(maxPriority++);
390     do {
391         DepNode *nowNode = workQueue.front();
392         (void)workQueue.erase(workQueue.begin());
393         for (auto pred : nowNode->GetPreds()) {
394             DepNode &from = pred->GetFrom();
395             if (from.GetState() != kScheduled && from.GetPriority() < maxPriority) {
396                 from.SetPriority(maxPriority);
397                 workQueue.emplace_back(&from);
398             }
399         }
400     } while (!workQueue.empty());
401 }
402 
403 /* return true if all node's pred has been scheduled. */
CanSchedule(const DepNode & node) const404 bool RegPressureSchedule::CanSchedule(const DepNode &node) const
405 {
406     return node.GetValidPredsSize() == 0;
407 }
408 
409 /*
410  * delete node from readylist and
411  * add the successor of node to readyList when
412  *  1. successor has no been scheduled;
413  *  2. successor's has been scheduled or the dependence between node and successor is true-dependence.
414  */
UpdateReadyList(const DepNode & node)415 void RegPressureSchedule::UpdateReadyList(const DepNode &node)
416 {
417     /* delete node from readylist */
418     for (auto it = readyList.begin(); it != readyList.end(); ++it) {
419         if (*it == &node) {
420             readyList.erase(it);
421             break;
422         }
423     }
424     /* update dependency information of the successors and add nodes into readyList */
425     for (auto *succ : node.GetSuccs()) {
426         DepNode &succNode = succ->GetTo();
427         if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) {
428             continue;
429         }
430         succNode.DecreaseValidPredsSize();
431         if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) &&
432             (succNode.GetState() == kNormal)) {
433             readyList.emplace_back(&succNode);
434             succNode.SetState(kReady);
435         }
436     }
437 }
438 /*
439  * Another version of UpdateReadyList for brute force ready list update
440  * The difference is to store the state change status for the successors for later restoring
441  */
BruteUpdateReadyList(const DepNode & node,std::vector<bool> & changedToReady)442 void RegPressureSchedule::BruteUpdateReadyList(const DepNode &node, std::vector<bool> &changedToReady)
443 {
444     /* delete node from readylist */
445     for (auto it = readyList.begin(); it != readyList.end(); ++it) {
446         if (*it == &node) {
447             readyList.erase(it);
448             break;
449         }
450     }
451     /* update dependency information of the successors and add nodes into readyList */
452     for (auto *succ : node.GetSuccs()) {
453         DepNode &succNode = succ->GetTo();
454         if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) {
455             continue;
456         }
457         succNode.DecreaseValidPredsSize();
458         if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) &&
459             (succNode.GetState() == kNormal)) {
460             readyList.emplace_back(&succNode);
461             succNode.SetState(kReady);
462             changedToReady.emplace_back(true);
463         } else {
464             changedToReady.emplace_back(false);
465         }
466     }
467 }
468 
469 /*
470  * Restore the ready list status when finishing one brute scheduling series generation
471  */
RestoreReadyList(DepNode & node,std::vector<bool> & changedToReady)472 void RegPressureSchedule::RestoreReadyList(DepNode &node, std::vector<bool> &changedToReady)
473 {
474     uint32 i = 0;
475     /* restore state information of the successors and delete them from readyList */
476     for (auto *succ : node.GetSuccs()) {
477         DepNode &succNode = succ->GetTo();
478         succNode.IncreaseValidPredsSize();
479         if (changedToReady.at(i)) {
480             succNode.SetState(kNormal);
481             for (auto it = readyList.begin(); it != readyList.end(); ++it) {
482                 if (*it == &succNode) {
483                     readyList.erase(it);
484                     break;
485                 }
486             }
487         }
488         ++i;
489     }
490     /* add the node back into the readyList */
491     readyList.emplace_back(&node);
492 }
493 /* choose a node to schedule */
ChooseNode()494 DepNode *RegPressureSchedule::ChooseNode()
495 {
496     DepNode *node = nullptr;
497     for (auto *it : readyList) {
498         if (!it->GetIncPressure() && !it->GetHasNativeCallRegister()) {
499             if (CanSchedule(*it)) {
500                 return it;
501             } else if (node == nullptr) {
502                 node = it;
503             }
504         }
505     }
506     if (node == nullptr) {
507         node = readyList.front();
508     }
509     return node;
510 }
511 
DumpBBLiveInfo() const512 void RegPressureSchedule::DumpBBLiveInfo() const
513 {
514     LogInfo::MapleLogger() << "Live In: ";
515     for (auto reg : bb->GetLiveInRegNO()) {
516         LogInfo::MapleLogger() << "R" << reg << " ";
517     }
518     LogInfo::MapleLogger() << "\n";
519 
520     LogInfo::MapleLogger() << "Live Out: ";
521     for (auto reg : bb->GetLiveOutRegNO()) {
522         LogInfo::MapleLogger() << "R" << reg << " ";
523     }
524     LogInfo::MapleLogger() << "\n";
525 }
526 
DumpReadyList() const527 void RegPressureSchedule::DumpReadyList() const
528 {
529     LogInfo::MapleLogger() << "readyList: "
530                            << "\n";
531     for (DepNode *it : readyList) {
532         if (CanSchedule(*it)) {
533             LogInfo::MapleLogger() << it->GetInsn()->GetId() << "CS ";
534         } else {
535             LogInfo::MapleLogger() << it->GetInsn()->GetId() << "NO ";
536         }
537     }
538     LogInfo::MapleLogger() << "\n";
539 }
540 
DumpSelectInfo(const DepNode & node) const541 void RegPressureSchedule::DumpSelectInfo(const DepNode &node) const
542 {
543     LogInfo::MapleLogger() << "select a node: "
544                            << "\n";
545     node.DumpSchedInfo();
546     node.DumpRegPressure();
547     node.GetInsn()->Dump();
548 
549     LogInfo::MapleLogger() << "liveReg: ";
550     for (auto reg : liveReg) {
551         LogInfo::MapleLogger() << "R" << reg << " ";
552     }
553     LogInfo::MapleLogger() << "\n";
554 
555     LogInfo::MapleLogger() << "\n";
556 }
557 
DumpDependencyInfo(const MapleVector<DepNode * > & nodes)558 void RegPressureSchedule::DumpDependencyInfo(const MapleVector<DepNode *> &nodes)
559 {
560     LogInfo::MapleLogger() << "Dump Dependency Begin \n";
561     for (auto node : nodes) {
562         LogInfo::MapleLogger() << "Insn \n";
563         node->GetInsn()->Dump();
564         LogInfo::MapleLogger() << "Successors \n";
565         /* update dependency information of the successors and add nodes into readyList */
566         for (auto *succ : node->GetSuccs()) {
567             DepNode &succNode = succ->GetTo();
568             succNode.GetInsn()->Dump();
569         }
570     }
571     LogInfo::MapleLogger() << "Dump Dependency End \n";
572 }
573 
ReportScheduleError() const574 void RegPressureSchedule::ReportScheduleError() const
575 {
576     LogInfo::MapleLogger() << "Error No Equal Length for Series"
577                            << "\n";
578     DumpDependencyInfo(originalNodeSeries);
579     for (auto node : scheduledNode) {
580         node->GetInsn()->Dump();
581     }
582     LogInfo::MapleLogger() << "Original One"
583                            << "\n";
584     for (auto node : originalNodeSeries) {
585         node->GetInsn()->Dump();
586     }
587     LogInfo::MapleLogger() << "Error No Equal Length for End"
588                            << "\n";
589 }
590 
ReportScheduleOutput() const591 void RegPressureSchedule::ReportScheduleOutput() const
592 {
593     LogInfo::MapleLogger() << "Original Pressure : " << originalPressure << " \n";
594     LogInfo::MapleLogger() << "Scheduled Pressure : " << scheduledPressure << " \n";
595     if (originalPressure > scheduledPressure) {
596         LogInfo::MapleLogger() << "Pressure Reduced by : " << (originalPressure - scheduledPressure) << " \n";
597         return;
598     } else if (originalPressure == scheduledPressure) {
599         LogInfo::MapleLogger() << "Pressure Not Changed \n";
600     } else {
601         LogInfo::MapleLogger() << "Pressure Increased by : " << (scheduledPressure - originalPressure) << " \n";
602     }
603     LogInfo::MapleLogger() << "Pressure Not Reduced, Restore Node Series \n";
604 }
605 
DumpBBPressureInfo() const606 void RegPressureSchedule::DumpBBPressureInfo() const
607 {
608     LogInfo::MapleLogger() << "curPressure: ";
609     FOR_ALL_REGCLASS(i)
610     {
611         LogInfo::MapleLogger() << curPressure[i] << " ";
612     }
613     LogInfo::MapleLogger() << "\n";
614 
615     LogInfo::MapleLogger() << "maxPressure: ";
616     FOR_ALL_REGCLASS(i)
617     {
618         LogInfo::MapleLogger() << maxPressure[i] << " ";
619     }
620     LogInfo::MapleLogger() << "\n";
621 }
622 
DoScheduling(MapleVector<DepNode * > & nodes)623 void RegPressureSchedule::DoScheduling(MapleVector<DepNode *> &nodes)
624 {
625     /* Store the original series */
626     originalNodeSeries.clear();
627     for (auto node : nodes) {
628         originalNodeSeries.emplace_back(node);
629     }
630     InitPartialSplitters(nodes);
631 #if PRESCHED_DEBUG
632     LogInfo::MapleLogger() << "\n Calculate Pressure Info for Schedule Input Series \n";
633 #endif
634     originalPressure = CalculateRegisterPressure(nodes);
635 #if PRESCHED_DEBUG
636     LogInfo::MapleLogger() << "Original pressure : " << originalPressure << "\n";
637 #endif
638     /* Original pressure is small enough, skip pre-scheduling */
639     if (originalPressure < PRESSURE_STANDARD) {
640 #if PRESCHED_DEBUG
641         LogInfo::MapleLogger() << "Original pressure is small enough, skip pre-scheduling \n";
642 #endif
643         return;
644     }
645     if (splitterIndexes.empty()) {
646         LogInfo::MapleLogger() << "No splitter, normal scheduling \n";
647         if (!OPTIMISTIC_SCHEDULING) {
648             HeuristicScheduling(nodes);
649         } else {
650             InitBruteForceScheduling(nodes);
651             BruteForceScheduling();
652             if (optimisticScheduledNodes.size() == nodes.size() && minPressure < originalPressure) {
653                 nodes.clear();
654                 for (auto node : optimisticScheduledNodes) {
655                     nodes.emplace_back(node);
656                 }
657             }
658         }
659     } else {
660         /* Split the node list into multiple parts based on split point and conduct scheduling */
661         PartialScheduling(nodes);
662     }
663     scheduledPressure = CalculateRegisterPressure(nodes);
664     EmitSchedulingSeries(nodes);
665 }
666 
HeuristicScheduling(MapleVector<DepNode * > & nodes)667 void RegPressureSchedule::HeuristicScheduling(MapleVector<DepNode *> &nodes)
668 {
669 #ifdef PRESCHED_DEBUG
670     LogInfo::MapleLogger() << "--------------- bb " << bb->GetId() << " begin scheduling -------------"
671                            << "\n";
672     DumpBBLiveInfo();
673 #endif
674 
675     /* initialize register pressure information and readylist. */
676     Init(nodes);
677     CalculateMaxDepth(nodes);
678     while (!readyList.empty()) {
679         /* calculate register pressure */
680         for (DepNode *it : readyList) {
681             ReCalculateDepNodePressure(*it);
682         }
683         if (readyList.size() > 1) {
684             SortReadyList();
685         }
686 
687         /* choose a node can be scheduled currently. */
688         DepNode *node = ChooseNode();
689 #ifdef PRESCHED_DEBUG
690         DumpBBPressureInfo();
691         DumpReadyList();
692         LogInfo::MapleLogger() << "first tmp select node: " << node->GetInsn()->GetId() << "\n";
693 #endif
694 
695         while (!CanSchedule(*node)) {
696             UpdatePriority(*node);
697             SortReadyList();
698             node = readyList.front();
699 #ifdef PRESCHED_DEBUG
700             LogInfo::MapleLogger() << "update ready list: "
701                                    << "\n";
702             DumpReadyList();
703 #endif
704         }
705 
706         scheduledNode.emplace_back(node);
707         /* mark node has scheduled */
708         node->SetState(kScheduled);
709         UpdateBBPressure(*node);
710         CalculateNear(*node);
711         UpdateReadyList(*node);
712 #ifdef PRESCHED_DEBUG
713         DumpSelectInfo(*node);
714 #endif
715     }
716 
717 #ifdef PRESCHED_DEBUG
718     LogInfo::MapleLogger() << "---------------------------------- end --------------------------------"
719                            << "\n";
720 #endif
721     /* update nodes according to scheduledNode. */
722     nodes.clear();
723     for (auto node : scheduledNode) {
724         nodes.emplace_back(node);
725     }
726 }
727 /*
728  * Calculate the register pressure for current BB based on an instruction series
729  */
CalculateRegisterPressure(MapleVector<DepNode * > & nodes)730 int RegPressureSchedule::CalculateRegisterPressure(MapleVector<DepNode *> &nodes)
731 {
732     /* Initialize the live, live in, live out register max pressure information */
733     liveReg.clear();
734     liveInRegNO = bb->GetLiveInRegNO();
735     liveOutRegNO = bb->GetLiveOutRegNO();
736     std::vector<ScheduleState> restoreStateSeries;
737     int maximumPressure = 0;
738     /* Mock all the nodes to kScheduled status for pressure calculation */
739     for (auto node : nodes) {
740         restoreStateSeries.emplace_back(node->GetState());
741         node->SetState(kScheduled);
742     }
743     /* Update live register set according to the instruction series */
744     for (auto node : nodes) {
745         for (auto &reg : node->GetUseRegnos()) {
746             UpdateLiveReg(*node, reg, false);
747         }
748         for (auto &defReg : node->GetDefRegnos()) {
749             UpdateLiveReg(*node, defReg, true);
750         }
751         int currentPressure = static_cast<int>(liveReg.size());
752         if (currentPressure > maximumPressure) {
753             maximumPressure = currentPressure;
754         }
755 #ifdef PRESCHED_DEBUG
756         node->GetInsn()->Dump();
757         LogInfo::MapleLogger() << "Dump Live Reg : "
758                                << "\n";
759         for (auto reg : liveReg) {
760             LogInfo::MapleLogger() << "R" << reg << " ";
761         }
762         LogInfo::MapleLogger() << "\n";
763 #endif
764     }
765     /* Restore the Schedule State */
766     uint32 i = 0;
767     for (auto node : nodes) {
768         node->SetState(restoreStateSeries.at(i));
769         ++i;
770     }
771     return maximumPressure;
772 }
773 
774 /*
775  * Split the series into multiple parts and conduct pre-scheduling in every part
776  */
PartialScheduling(MapleVector<DepNode * > & nodes)777 void RegPressureSchedule::PartialScheduling(MapleVector<DepNode *> &nodes)
778 {
779     CHECK_FATAL(splitterIndexes.size() > 0, "must not be zero");
780     for (size_t i = 0; i < splitterIndexes.size() - 1; ++i) {
781         constexpr uint32 lastTwoNodeIndex = 2;
782         auto begin = static_cast<uint32>(splitterIndexes.at(i));
783         auto end = static_cast<uint32>(splitterIndexes.at(i + 1));
784         for (uint32 j = begin; j < end; ++j) {
785             partialList.emplace_back(nodes.at(j));
786         }
787         if (i == splitterIndexes.size() - lastTwoNodeIndex) {
788             partialList.emplace_back(nodes.at(end));
789         }
790         for (auto node : partialList) {
791             partialSet.insert(node);
792         }
793         HeuristicScheduling(partialList);
794         for (auto node : partialList) {
795             partialScheduledNode.emplace_back(node);
796         }
797         partialList.clear();
798         partialSet.clear();
799     }
800     nodes.clear();
801     /* Construct overall scheduling output */
802     for (auto node : partialScheduledNode) {
803         nodes.emplace_back(node);
804     }
805 }
806 
807 /*
808  * Brute-force scheduling algorithm
809  * It enumerates all the possible schedule series and pick a best one
810  */
BruteForceScheduling()811 void RegPressureSchedule::BruteForceScheduling()
812 {
813     /* stop brute force scheduling when exceeding the count limit */
814     if (BRUTE_MAXIMUM_LIMIT && (scheduleSeriesCount > SCHEDULING_MAXIMUM_COUNT)) {
815         return;
816     }
817     int defaultPressureValue = -1;
818     /* ReadyList is empty, scheduling is over */
819     if (readyList.empty()) {
820         if (originalNodeSeries.size() != scheduledNode.size()) {
821 #ifdef PRESCHED_DEBUG
822             ReportScheduleError();
823 #endif
824             return;
825         }
826         ++scheduleSeriesCount;
827         int currentPressure = CalculateRegisterPressure(scheduledNode);
828         if (minPressure == defaultPressureValue || currentPressure < minPressure) {
829             minPressure = currentPressure;
830             /* update better scheduled series */
831             optimisticScheduledNodes.clear();
832             for (auto node : scheduledNode) {
833                 optimisticScheduledNodes.emplace_back(node);
834             }
835             return;
836         }
837         return;
838     }
839     /* store the current status of the ready list */
840     std::vector<DepNode *> innerList;
841     for (auto tempNode : readyList) {
842         innerList.emplace_back(tempNode);
843     }
844     for (auto *node : innerList) {
845         if (CanSchedule(*node)) {
846             /* update readyList and node dependency info */
847             std::vector<bool> changedToReady;
848             BruteUpdateReadyList(*node, changedToReady);
849             scheduledNode.emplace_back(node);
850             node->SetState(kScheduled);
851             BruteForceScheduling();
852             node->SetState(kReady);
853             /* restore readyList and node dependency info */
854             RestoreReadyList(*node, changedToReady);
855             scheduledNode.pop_back();
856         }
857     }
858 }
859 
860 /*
861  * Calculate the pred size based on the dependency information
862  */
CalculatePredSize(DepNode & node)863 void RegPressureSchedule::CalculatePredSize(DepNode &node)
864 {
865     constexpr uint32 emptyPredsSize = 0;
866     node.SetValidPredsSize(emptyPredsSize);
867     for (auto pred : node.GetPreds()) {
868         DepNode &from = pred->GetFrom();
869         if (!partialSet.empty() && (partialSet.find(&from) == partialSet.end())) {
870             continue;
871         } else {
872             node.IncreaseValidPredsSize();
873         }
874     }
875 }
876 
InitBruteForceScheduling(MapleVector<DepNode * > & nodes)877 void RegPressureSchedule::InitBruteForceScheduling(MapleVector<DepNode *> &nodes)
878 {
879     /* Calculate pred size of the node */
880     for (auto node : nodes) {
881         CalculatePredSize(*node);
882     }
883     readyList.clear();
884     optimisticScheduledNodes.clear();
885     scheduledNode.clear();
886     DepNode *firstNode = nodes.front();
887     firstNode->SetState(kReady);
888     readyList.emplace_back(firstNode);
889 }
890 
891 /*
892  * Give out the pre-scheduling output based on new register pressure
893  */
EmitSchedulingSeries(MapleVector<DepNode * > & nodes)894 void RegPressureSchedule::EmitSchedulingSeries(MapleVector<DepNode *> &nodes)
895 {
896 #ifdef PRESCHED_DEBUG
897     ReportScheduleOutput();
898 #endif
899     if (originalPressure <= scheduledPressure) {
900         /* Restore the original series */
901         nodes.clear();
902         for (auto node : originalNodeSeries) {
903             nodes.emplace_back(node);
904         }
905     }
906 }
907 
908 /*
909  * ------------- Schedule function ----------
910  * calculate and mark each insn id, each BB's firstLoc and lastLoc.
911  */
InitIDAndLoc()912 void Schedule::InitIDAndLoc()
913 {
914     uint32 id = 0;
915     FOR_ALL_BB(bb, &cgFunc)
916     {
917         bb->SetLastLoc(bb->GetPrev() ? bb->GetPrev()->GetLastLoc() : nullptr);
918         FOR_BB_INSNS(insn, bb)
919         {
920             insn->SetId(id++);
921 #if DEBUG
922             insn->AppendComment(" Insn id: " + std::to_string(insn->GetId()));
923 #endif
924             if (insn->IsImmaterialInsn() && !insn->IsComment()) {
925                 bb->SetLastLoc(insn);
926             } else if (!bb->GetFirstLoc() && insn->IsMachineInstruction()) {
927                 bb->SetFirstLoc(*bb->GetLastLoc());
928             }
929         }
930     }
931 }
932 
933 /* === new pm === */
PhaseRun(maplebe::CGFunc & f)934 bool CgPreScheduling::PhaseRun(maplebe::CGFunc &f)
935 {
936     if (f.HasAsm()) {
937         return true;
938     }
939     if (LIST_SCHED_DUMP_NEWPM) {
940         LogInfo::MapleLogger() << "Before CgDoPreScheduling : " << f.GetName() << "\n";
941         DotGenerator::GenerateDot("preschedule", f, f.GetMirModule());
942     }
943     auto *live = GET_ANALYSIS(CgLiveAnalysis, f);
944     /* revert liveanalysis result container. */
945     DEBUG_ASSERT(live != nullptr, "nullptr check");
946     live->ResetLiveSet();
947 
948     Schedule *schedule = nullptr;
949 #if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGRISCV64) && TARGRISCV64)
950     schedule = GetPhaseAllocator()->New<AArch64Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
951 #endif
952 #if defined(TARGARM32) && TARGARM32
953     schedule = GetPhaseAllocator()->New<Arm32Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
954 #endif
955     schedule->ListScheduling(true);
956     live->ClearInOutDataInfo();
957 
958     return true;
959 }
960 
GetAnalysisDependence(maple::AnalysisDep & aDep) const961 void CgPreScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const
962 {
963     aDep.AddRequired<CgLiveAnalysis>();
964     aDep.PreservedAllExcept<CgLiveAnalysis>();
965 }
MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreScheduling,prescheduling)966 MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreScheduling, prescheduling)
967 
968 bool CgScheduling::PhaseRun(maplebe::CGFunc &f)
969 {
970     if (f.HasAsm()) {
971         return true;
972     }
973     if (LIST_SCHED_DUMP_NEWPM) {
974         LogInfo::MapleLogger() << "Before CgDoScheduling : " << f.GetName() << "\n";
975         DotGenerator::GenerateDot("scheduling", f, f.GetMirModule());
976     }
977     auto *live = GET_ANALYSIS(CgLiveAnalysis, f);
978     /* revert liveanalysis result container. */
979     DEBUG_ASSERT(live != nullptr, "nullptr check");
980     live->ResetLiveSet();
981 
982     Schedule *schedule = nullptr;
983 #if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGRISCV64) && TARGRISCV64)
984     schedule = GetPhaseAllocator()->New<AArch64Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
985 #endif
986 #if defined(TARGARM32) && TARGARM32
987     schedule = GetPhaseAllocator()->New<Arm32Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
988 #endif
989     schedule->ListScheduling(false);
990     live->ClearInOutDataInfo();
991 
992     return true;
993 }
994 
GetAnalysisDependence(maple::AnalysisDep & aDep) const995 void CgScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const
996 {
997     aDep.AddRequired<CgLiveAnalysis>();
998     aDep.PreservedAllExcept<CgLiveAnalysis>();
999 }
1000 MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgScheduling, scheduling)
1001 } /* namespace maplebe */
1002