• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #if TARGAARCH64
17 #include "aarch64_schedule.h"
18 #elif TARGRISCV64
19 #include "riscv64_schedule.h"
20 #endif
21 #if TARGARM32
22 #include "arm32_schedule.h"
23 #endif
24 #include "cg.h"
25 #include "optimize_common.h"
26 
27 #undef PRESCHED_DEBUG
28 
29 namespace maplebe {
30 /* pressure standard value; pressure under this value will not lead to spill operation */
31 static constexpr int g_pressureStandard = 27;
32 /* optimistic scheduling option */
33 static constexpr bool g_optimisticScheduling = false;
34 /* brute maximum count limit option */
35 static constexpr bool g_bruteMaximumLimit = true;
36 /* brute maximum count */
37 static constexpr int g_schedulingMaximumCount = 20000;
38 
39 /* ---- RegPressureSchedule function ---- */
InitBBInfo(BB & b,MemPool & memPool,const MapleVector<DepNode * > & nodes)40 void RegPressureSchedule::InitBBInfo(BB &b, MemPool &memPool, const MapleVector<DepNode *> &nodes)
41 {
42     bb = &b;
43     liveReg.clear();
44     scheduledNode.clear();
45     readyList.clear();
46     maxPriority = 0;
47     maxPressure = memPool.NewArray<int32>(RegPressure::GetMaxRegClassNum());
48     curPressure = memPool.NewArray<int32>(RegPressure::GetMaxRegClassNum());
49     physicalRegNum = memPool.NewArray<int32>(RegPressure::GetMaxRegClassNum());
50     for (auto node : nodes) {
51         node->SetState(kNormal);
52     }
53 }
54 
55 /* return register type according to register number */
GetRegisterType(regno_t reg) const56 RegType RegPressureSchedule::GetRegisterType(regno_t reg) const
57 {
58     return cgFunc.GetRegisterType(reg);
59 }
60 
61 /* Get amount of every physical register */
BuildPhyRegInfo(const std::vector<int32> & regNumVec)62 void RegPressureSchedule::BuildPhyRegInfo(const std::vector<int32> &regNumVec)
63 {
64     FOR_ALL_REGCLASS(i)
65     {
66         physicalRegNum[i] = regNumVec[i];
67     }
68 }
69 
70 /* Initialize pre-scheduling split point in BB */
initPartialSplitters(const MapleVector<DepNode * > & nodes)71 void RegPressureSchedule::initPartialSplitters(const MapleVector<DepNode *> &nodes)
72 {
73     bool addFirstAndLastNodeIndex = false;
74     constexpr uint32 SecondLastNodeIndexFromBack = 2;
75     constexpr uint32 LastNodeIndexFromBack = 1;
76     constexpr uint32 FirstNodeIndex = 0;
77     constexpr uint32 minimumBBSize = 2;
78     /* Add split point for the last instruction in return BB */
79     if (bb->GetKind() == BB::kBBReturn && nodes.size() > minimumBBSize) {
80         splitterIndexes.emplace_back(nodes.size() - SecondLastNodeIndexFromBack);
81         addFirstAndLastNodeIndex = true;
82     }
83     /* Add first and last node as split point if needed */
84     if (addFirstAndLastNodeIndex) {
85         splitterIndexes.emplace_back(nodes.size() - LastNodeIndexFromBack);
86         splitterIndexes.emplace_back(FirstNodeIndex);
87     }
88     std::sort(splitterIndexes.begin(), splitterIndexes.end(), std::less<int> {});
89 }
90 
91 /* initialize register pressure information according to bb's live-in data.
92  * initialize node's valid preds size.
93  */
Init(const MapleVector<DepNode * > & nodes)94 void RegPressureSchedule::Init(const MapleVector<DepNode *> &nodes)
95 {
96     readyList.clear();
97     scheduledNode.clear();
98     liveReg.clear();
99     liveInRegNO.clear();
100     liveOutRegNO.clear();
101     liveInRegNO = bb->GetLiveInRegNO();
102     liveOutRegNO = bb->GetLiveOutRegNO();
103 
104     FOR_ALL_REGCLASS(i)
105     {
106         curPressure[i] = 0;
107         maxPressure[i] = 0;
108     }
109 
110     for (auto *node : nodes) {
111         /* calculate the node uses'register pressure */
112         for (auto &useReg : node->GetUseRegnos()) {
113             CalculatePressure(*node, useReg, false);
114         }
115 
116         /* calculate the node defs'register pressure */
117         size_t i = 0;
118         for (auto &defReg : node->GetDefRegnos()) {
119             CalculatePressure(*node, defReg, true);
120             RegType regType = GetRegisterType(defReg);
121             /* if no use list, a register is only defined, not be used */
122             if (node->GetRegDefs(i) == nullptr && liveOutRegNO.find(defReg) == liveOutRegNO.end()) {
123                 node->IncDeadDefByIndex(regType);
124             }
125             ++i;
126         }
127         /* Calculate pred size of the node */
128         CalculatePredSize(*node);
129     }
130 
131     DepNode *firstNode = nodes.front();
132     readyList.emplace_back(firstNode);
133     firstNode->SetState(kReady);
134     scheduledNode.reserve(nodes.size());
135     constexpr size_t readyListSize = 10;
136     readyList.reserve(readyListSize);
137 }
138 
SortReadyList()139 void RegPressureSchedule::SortReadyList()
140 {
141     std::sort(readyList.begin(), readyList.end(), DepNodePriorityCmp);
142 }
143 
144 /* return true if nodes1 first. */
DepNodePriorityCmp(const DepNode * node1,const DepNode * node2)145 bool RegPressureSchedule::DepNodePriorityCmp(const DepNode *node1, const DepNode *node2)
146 {
147     CHECK_NULL_FATAL(node1);
148     CHECK_NULL_FATAL(node2);
149     int32 priority1 = node1->GetPriority();
150     int32 priority2 = node2->GetPriority();
151     if (priority1 != priority2) {
152         return priority1 > priority2;
153     }
154 
155     int32 numCall1 = node1->GetNumCall();
156     int32 numCall2 = node2->GetNumCall();
157     if (node1->GetIncPressure() == true && node2->GetIncPressure() == true) {
158         if (numCall1 != numCall2) {
159             return numCall1 > numCall2;
160         }
161     }
162 
163     int32 near1 = node1->GetNear();
164     int32 near2 = node1->GetNear();
165     int32 depthS1 = node1->GetMaxDepth() + near1;
166     int32 depthS2 = node2->GetMaxDepth() + near2;
167     if (depthS1 != depthS2) {
168         return depthS1 > depthS2;
169     }
170 
171     if (near1 != near2) {
172         return near1 > near2;
173     }
174 
175     if (numCall1 != numCall2) {
176         return numCall1 > numCall2;
177     }
178 
179     size_t succsSize1 = node1->GetSuccs().size();
180     size_t succsSize2 = node1->GetSuccs().size();
181     if (succsSize1 != succsSize2) {
182         return succsSize1 < succsSize2;
183     }
184 
185     if (node1->GetHasPreg() != node2->GetHasPreg()) {
186         return node1->GetHasPreg();
187     }
188 
189     return node1->GetInsn()->GetId() < node2->GetInsn()->GetId();
190 }
191 
192 /* set a node's incPressure is true, when a class register inscrease */
ReCalculateDepNodePressure(DepNode & node)193 void RegPressureSchedule::ReCalculateDepNodePressure(DepNode &node)
194 {
195     /* if there is a type of register pressure increases, set incPressure as true. */
196     auto &pressures = node.GetPressure();
197     node.SetIncPressure(pressures[kRegisterInt] > 0);
198 }
199 
200 /* calculate the maxDepth of every node in nodes. */
CalculateMaxDepth(const MapleVector<DepNode * > & nodes)201 void RegPressureSchedule::CalculateMaxDepth(const MapleVector<DepNode *> &nodes)
202 {
203     /* from the last node to first node. */
204     for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) {
205         /* init call count */
206         if ((*it)->GetInsn()->IsCall()) {
207             (*it)->SetNumCall(1);
208         }
209         /* traversing each successor of it. */
210         for (auto succ : (*it)->GetSuccs()) {
211             DepNode &to = succ->GetTo();
212             if ((*it)->GetMaxDepth() < (to.GetMaxDepth() + 1)) {
213                 (*it)->SetMaxDepth(to.GetMaxDepth() + 1);
214             }
215 
216             if (to.GetInsn()->IsCall() && ((*it)->GetNumCall() < to.GetNumCall() + 1)) {
217                 (*it)->SetNumCall(to.GetNumCall() + 1);
218             } else if ((*it)->GetNumCall() < to.GetNumCall()) {
219                 (*it)->SetNumCall(to.GetNumCall());
220             }
221         }
222     }
223 }
224 
225 /* calculate the near of every successor of the node. */
CalculateNear(const DepNode & node)226 void RegPressureSchedule::CalculateNear(const DepNode &node)
227 {
228     for (auto succ : node.GetSuccs()) {
229         DepNode &to = succ->GetTo();
230         if (succ->GetDepType() == kDependenceTypeTrue && to.GetNear() < node.GetNear() + 1) {
231             to.SetNear(node.GetNear() + 1);
232         }
233     }
234 }
235 
236 /* return true if it is last time using the regNO. */
IsLastUse(const DepNode & node,regno_t regNO)237 bool RegPressureSchedule::IsLastUse(const DepNode &node, regno_t regNO)
238 {
239     size_t i = 0;
240     for (auto reg : node.GetUseRegnos()) {
241         if (reg == regNO) {
242             break;
243         }
244         ++i;
245     }
246     RegList *regList = node.GetRegUses(i);
247 
248     /*
249      * except the node, if there are insn that has no scheduled in regNO'sregList,
250      * then it is not the last time using the regNO, return false.
251      */
252     while (regList != nullptr) {
253         CHECK_NULL_FATAL(regList->insn);
254         DepNode *useNode = regList->insn->GetDepNode();
255         DEBUG_ASSERT(useNode != nullptr, "get depend node failed in RegPressureSchedule::IsLastUse");
256         if ((regList->insn != node.GetInsn()) && (useNode->GetState() != kScheduled)) {
257             return false;
258         }
259         regList = regList->next;
260     }
261     return true;
262 }
263 
CalculatePressure(DepNode & node,regno_t reg,bool def)264 void RegPressureSchedule::CalculatePressure(DepNode &node, regno_t reg, bool def)
265 {
266     RegType regType = GetRegisterType(reg);
267     /* if def a register, register pressure increase. */
268     if (def) {
269         node.IncPressureByIndex(regType);
270     } else {
271         /* if it is the last time using the reg, register pressure decrease. */
272         if (IsLastUse(node, reg)) {
273             node.DecPressureByIndex(regType);
274         }
275     }
276 }
277 
278 /* update live reg information. */
UpdateLiveReg(const DepNode & node,regno_t reg,bool def)279 void RegPressureSchedule::UpdateLiveReg(const DepNode &node, regno_t reg, bool def)
280 {
281     if (def) {
282         if (liveReg.find(reg) == liveReg.end()) {
283             (void)liveReg.insert(reg);
284 #ifdef PRESCHED_DEBUG
285             LogInfo::MapleLogger() << "Add new def R" << reg << " to live reg list \n";
286 #endif
287         }
288         /* if no use list, a register is only defined, not be used */
289         size_t i = 1;
290         for (auto defReg : node.GetDefRegnos()) {
291             if (defReg == reg) {
292                 break;
293             }
294             ++i;
295         }
296         if (node.GetRegDefs(i) == nullptr && liveOutRegNO.find(reg) == liveOutRegNO.end()) {
297 #ifdef PRESCHED_DEBUG
298             LogInfo::MapleLogger() << "Remove dead def " << reg << " from live reg list \n";
299 #endif
300             liveReg.erase(reg);
301         } else if (node.GetRegDefs(i) != nullptr) {
302 #ifdef PRESCHED_DEBUG
303             auto regList = node.GetRegDefs(i);
304             LogInfo::MapleLogger() << i << " Live def, dump use insn here \n";
305             while (regList != nullptr) {
306                 node.GetRegDefs(i)->insn->Dump();
307                 regList = regList->next;
308             }
309 #endif
310         }
311     } else {
312         if (IsLastUse(node, reg)) {
313             if (liveReg.find(reg) != liveReg.end() && liveOutRegNO.find(reg) == liveOutRegNO.end()) {
314 #ifdef PRESCHED_DEBUG
315                 LogInfo::MapleLogger() << "Remove last use R" << reg << " from live reg list\n";
316 #endif
317                 liveReg.erase(reg);
318             }
319         }
320     }
321 }
322 
323 /* update register pressure information. */
UpdateBBPressure(const DepNode & node)324 void RegPressureSchedule::UpdateBBPressure(const DepNode &node)
325 {
326     size_t idx = 0;
327     for (auto &reg : node.GetUseRegnos()) {
328 #ifdef PRESCHED_DEBUG
329         LogInfo::MapleLogger() << "Use Reg : R" << reg << "\n";
330         UpdateLiveReg(node, reg, false);
331         if (liveReg.find(reg) == liveReg.end()) {
332             ++idx;
333             continue;
334         }
335 #endif
336 
337         /* find all insn that use the reg, if a insn use the reg lastly, insn'pressure - 1 */
338         RegList *regList = node.GetRegUses(idx);
339 
340         while (regList != nullptr) {
341             CHECK_NULL_FATAL(regList->insn);
342             DepNode *useNode = regList->insn->GetDepNode();
343             if (useNode->GetState() == kScheduled) {
344                 regList = regList->next;
345                 continue;
346             }
347 
348             if (IsLastUse(*useNode, reg)) {
349                 RegType regType = GetRegisterType(reg);
350                 useNode->DecPressureByIndex(regType);
351             }
352             break;
353         }
354         ++idx;
355     }
356 
357 #ifdef PRESCHED_DEBUG
358     for (auto &defReg : node.GetDefRegnos()) {
359         UpdateLiveReg(node, defReg, true);
360     }
361 #endif
362 
363     const auto &pressures = node.GetPressure();
364     const auto &deadDefNum = node.GetDeadDefNum();
365 #ifdef PRESCHED_DEBUG
366     LogInfo::MapleLogger() << "\nnode's pressure: ";
367     for (auto pressure : pressures) {
368         LogInfo::MapleLogger() << pressure << " ";
369     }
370     LogInfo::MapleLogger() << "\n";
371 #endif
372 
373     FOR_ALL_REGCLASS(i)
374     {
375         curPressure[i] += pressures[i];
376         curPressure[i] -= deadDefNum[i];
377         if (curPressure[i] > maxPressure[i]) {
378             maxPressure[i] = curPressure[i];
379         }
380     }
381 }
382 
383 /* update node priority and try to update the priority of all node's ancestor. */
UpdatePriority(DepNode & node)384 void RegPressureSchedule::UpdatePriority(DepNode &node)
385 {
386     std::vector<DepNode *> workQueue;
387     workQueue.emplace_back(&node);
388     node.SetPriority(maxPriority++);
389     do {
390         DepNode *nowNode = workQueue.front();
391         (void)workQueue.erase(workQueue.begin());
392         for (auto pred : nowNode->GetPreds()) {
393             DepNode &from = pred->GetFrom();
394             if (from.GetState() != kScheduled && from.GetPriority() < maxPriority) {
395                 from.SetPriority(maxPriority);
396                 workQueue.emplace_back(&from);
397             }
398         }
399     } while (!workQueue.empty());
400 }
401 
402 /* return true if all node's pred has been scheduled. */
CanSchedule(const DepNode & node) const403 bool RegPressureSchedule::CanSchedule(const DepNode &node) const
404 {
405     return node.GetValidPredsSize() == 0;
406 }
407 
408 /*
409  * delete node from readylist and
410  * add the successor of node to readyList when
411  *  1. successor has no been scheduled;
412  *  2. successor's has been scheduled or the dependence between node and successor is true-dependence.
413  */
UpdateReadyList(const DepNode & node)414 void RegPressureSchedule::UpdateReadyList(const DepNode &node)
415 {
416     /* delete node from readylist */
417     for (auto it = readyList.begin(); it != readyList.end(); ++it) {
418         if (*it == &node) {
419             readyList.erase(it);
420             break;
421         }
422     }
423     /* update dependency information of the successors and add nodes into readyList */
424     for (auto *succ : node.GetSuccs()) {
425         DepNode &succNode = succ->GetTo();
426         if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) {
427             continue;
428         }
429         succNode.DescreaseValidPredsSize();
430         if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) &&
431             (succNode.GetState() == kNormal)) {
432             readyList.emplace_back(&succNode);
433             succNode.SetState(kReady);
434         }
435     }
436 }
437 /*
438  * Another version of UpdateReadyList for brute force ready list update
439  * The difference is to store the state change status for the successors for later restoring
440  */
BruteUpdateReadyList(const DepNode & node,std::vector<bool> & changedToReady)441 void RegPressureSchedule::BruteUpdateReadyList(const DepNode &node, std::vector<bool> &changedToReady)
442 {
443     /* delete node from readylist */
444     for (auto it = readyList.begin(); it != readyList.end(); ++it) {
445         if (*it == &node) {
446             readyList.erase(it);
447             break;
448         }
449     }
450     /* update dependency information of the successors and add nodes into readyList */
451     for (auto *succ : node.GetSuccs()) {
452         DepNode &succNode = succ->GetTo();
453         if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) {
454             continue;
455         }
456         succNode.DescreaseValidPredsSize();
457         if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) &&
458             (succNode.GetState() == kNormal)) {
459             readyList.emplace_back(&succNode);
460             succNode.SetState(kReady);
461             changedToReady.emplace_back(true);
462         } else {
463             changedToReady.emplace_back(false);
464         }
465     }
466 }
467 
468 /*
469  * Restore the ready list status when finishing one brute scheduling series generation
470  */
RestoreReadyList(DepNode & node,std::vector<bool> & changedToReady)471 void RegPressureSchedule::RestoreReadyList(DepNode &node, std::vector<bool> &changedToReady)
472 {
473     uint32 i = 0;
474     /* restore state information of the successors and delete them from readyList */
475     for (auto *succ : node.GetSuccs()) {
476         DepNode &succNode = succ->GetTo();
477         succNode.IncreaseValidPredsSize();
478         if (changedToReady.at(i)) {
479             succNode.SetState(kNormal);
480             for (auto it = readyList.begin(); it != readyList.end(); ++it) {
481                 if (*it == &succNode) {
482                     readyList.erase(it);
483                     break;
484                 }
485             }
486         }
487         ++i;
488     }
489     /* add the node back into the readyList */
490     readyList.emplace_back(&node);
491 }
492 /* choose a node to schedule */
ChooseNode()493 DepNode *RegPressureSchedule::ChooseNode()
494 {
495     DepNode *node = nullptr;
496     for (auto *it : readyList) {
497         if (!it->GetIncPressure() && !it->GetHasNativeCallRegister()) {
498             if (CanSchedule(*it)) {
499                 return it;
500             } else if (node == nullptr) {
501                 node = it;
502             }
503         }
504     }
505     if (node == nullptr) {
506         node = readyList.front();
507     }
508     return node;
509 }
510 
DumpBBLiveInfo() const511 void RegPressureSchedule::DumpBBLiveInfo() const
512 {
513     LogInfo::MapleLogger() << "Live In: ";
514     for (auto reg : bb->GetLiveInRegNO()) {
515         LogInfo::MapleLogger() << "R" << reg << " ";
516     }
517     LogInfo::MapleLogger() << "\n";
518 
519     LogInfo::MapleLogger() << "Live Out: ";
520     for (auto reg : bb->GetLiveOutRegNO()) {
521         LogInfo::MapleLogger() << "R" << reg << " ";
522     }
523     LogInfo::MapleLogger() << "\n";
524 }
525 
DumpReadyList() const526 void RegPressureSchedule::DumpReadyList() const
527 {
528     LogInfo::MapleLogger() << "readyList: "
529                            << "\n";
530     for (DepNode *it : readyList) {
531         if (CanSchedule(*it)) {
532             LogInfo::MapleLogger() << it->GetInsn()->GetId() << "CS ";
533         } else {
534             LogInfo::MapleLogger() << it->GetInsn()->GetId() << "NO ";
535         }
536     }
537     LogInfo::MapleLogger() << "\n";
538 }
539 
DumpSelectInfo(const DepNode & node) const540 void RegPressureSchedule::DumpSelectInfo(const DepNode &node) const
541 {
542     LogInfo::MapleLogger() << "select a node: "
543                            << "\n";
544     node.DumpSchedInfo();
545     node.DumpRegPressure();
546     node.GetInsn()->Dump();
547 
548     LogInfo::MapleLogger() << "liveReg: ";
549     for (auto reg : liveReg) {
550         LogInfo::MapleLogger() << "R" << reg << " ";
551     }
552     LogInfo::MapleLogger() << "\n";
553 
554     LogInfo::MapleLogger() << "\n";
555 }
556 
DumpDependencyInfo(const MapleVector<DepNode * > & nodes)557 void RegPressureSchedule::DumpDependencyInfo(const MapleVector<DepNode *> &nodes)
558 {
559     LogInfo::MapleLogger() << "Dump Dependency Begin \n";
560     for (auto node : nodes) {
561         LogInfo::MapleLogger() << "Insn \n";
562         node->GetInsn()->Dump();
563         LogInfo::MapleLogger() << "Successors \n";
564         /* update dependency information of the successors and add nodes into readyList */
565         for (auto *succ : node->GetSuccs()) {
566             DepNode &succNode = succ->GetTo();
567             succNode.GetInsn()->Dump();
568         }
569     }
570     LogInfo::MapleLogger() << "Dump Dependency End \n";
571 }
572 
ReportScheduleError() const573 void RegPressureSchedule::ReportScheduleError() const
574 {
575     LogInfo::MapleLogger() << "Error No Equal Length for Series"
576                            << "\n";
577     DumpDependencyInfo(originalNodeSeries);
578     for (auto node : scheduledNode) {
579         node->GetInsn()->Dump();
580     }
581     LogInfo::MapleLogger() << "Original One"
582                            << "\n";
583     for (auto node : originalNodeSeries) {
584         node->GetInsn()->Dump();
585     }
586     LogInfo::MapleLogger() << "Error No Equal Length for End"
587                            << "\n";
588 }
589 
ReportScheduleOutput() const590 void RegPressureSchedule::ReportScheduleOutput() const
591 {
592     LogInfo::MapleLogger() << "Original Pressure : " << originalPressure << " \n";
593     LogInfo::MapleLogger() << "Scheduled Pressure : " << scheduledPressure << " \n";
594     if (originalPressure > scheduledPressure) {
595         LogInfo::MapleLogger() << "Pressure Reduced by : " << (originalPressure - scheduledPressure) << " \n";
596         return;
597     } else if (originalPressure == scheduledPressure) {
598         LogInfo::MapleLogger() << "Pressure Not Changed \n";
599     } else {
600         LogInfo::MapleLogger() << "Pressure Increased by : " << (scheduledPressure - originalPressure) << " \n";
601     }
602     LogInfo::MapleLogger() << "Pressure Not Reduced, Restore Node Series \n";
603 }
604 
DumpBBPressureInfo() const605 void RegPressureSchedule::DumpBBPressureInfo() const
606 {
607     LogInfo::MapleLogger() << "curPressure: ";
608     FOR_ALL_REGCLASS(i)
609     {
610         LogInfo::MapleLogger() << curPressure[i] << " ";
611     }
612     LogInfo::MapleLogger() << "\n";
613 
614     LogInfo::MapleLogger() << "maxPressure: ";
615     FOR_ALL_REGCLASS(i)
616     {
617         LogInfo::MapleLogger() << maxPressure[i] << " ";
618     }
619     LogInfo::MapleLogger() << "\n";
620 }
621 
DoScheduling(MapleVector<DepNode * > & nodes)622 void RegPressureSchedule::DoScheduling(MapleVector<DepNode *> &nodes)
623 {
624     /* Store the original series */
625     originalNodeSeries.clear();
626     for (auto node : nodes) {
627         originalNodeSeries.emplace_back(node);
628     }
629     initPartialSplitters(nodes);
630 #if PRESCHED_DEBUG
631     LogInfo::MapleLogger() << "\n Calculate Pressure Info for Schedule Input Series \n";
632 #endif
633     originalPressure = CalculateRegisterPressure(nodes);
634 #if PRESCHED_DEBUG
635     LogInfo::MapleLogger() << "Original pressure : " << originalPressure << "\n";
636 #endif
637     /* Original pressure is small enough, skip pre-scheduling */
638     if (originalPressure < g_pressureStandard) {
639 #if PRESCHED_DEBUG
640         LogInfo::MapleLogger() << "Original pressure is small enough, skip pre-scheduling \n";
641 #endif
642         return;
643     }
644     if (splitterIndexes.empty()) {
645         LogInfo::MapleLogger() << "No splitter, normal scheduling \n";
646         if (!g_optimisticScheduling) {
647             HeuristicScheduling(nodes);
648         } else {
649             InitBruteForceScheduling(nodes);
650             BruteForceScheduling();
651             if (optimisticScheduledNodes.size() == nodes.size() && minPressure < originalPressure) {
652                 nodes.clear();
653                 for (auto node : optimisticScheduledNodes) {
654                     nodes.emplace_back(node);
655                 }
656             }
657         }
658     } else {
659         /* Split the node list into multiple parts based on split point and conduct scheduling */
660         PartialScheduling(nodes);
661     }
662     scheduledPressure = CalculateRegisterPressure(nodes);
663     EmitSchedulingSeries(nodes);
664 }
665 
HeuristicScheduling(MapleVector<DepNode * > & nodes)666 void RegPressureSchedule::HeuristicScheduling(MapleVector<DepNode *> &nodes)
667 {
668 #ifdef PRESCHED_DEBUG
669     LogInfo::MapleLogger() << "--------------- bb " << bb->GetId() << " begin scheduling -------------"
670                            << "\n";
671     DumpBBLiveInfo();
672 #endif
673 
674     /* initialize register pressure information and readylist. */
675     Init(nodes);
676     CalculateMaxDepth(nodes);
677     while (!readyList.empty()) {
678         /* calculate register pressure */
679         for (DepNode *it : readyList) {
680             ReCalculateDepNodePressure(*it);
681         }
682         if (readyList.size() > 1) {
683             SortReadyList();
684         }
685 
686         /* choose a node can be scheduled currently. */
687         DepNode *node = ChooseNode();
688 #ifdef PRESCHED_DEBUG
689         DumpBBPressureInfo();
690         DumpReadyList();
691         LogInfo::MapleLogger() << "first tmp select node: " << node->GetInsn()->GetId() << "\n";
692 #endif
693 
694         while (!CanSchedule(*node)) {
695             UpdatePriority(*node);
696             SortReadyList();
697             node = readyList.front();
698 #ifdef PRESCHED_DEBUG
699             LogInfo::MapleLogger() << "update ready list: "
700                                    << "\n";
701             DumpReadyList();
702 #endif
703         }
704 
705         scheduledNode.emplace_back(node);
706         /* mark node has scheduled */
707         node->SetState(kScheduled);
708         UpdateBBPressure(*node);
709         CalculateNear(*node);
710         UpdateReadyList(*node);
711 #ifdef PRESCHED_DEBUG
712         DumpSelectInfo(*node);
713 #endif
714     }
715 
716 #ifdef PRESCHED_DEBUG
717     LogInfo::MapleLogger() << "---------------------------------- end --------------------------------"
718                            << "\n";
719 #endif
720     /* update nodes according to scheduledNode. */
721     nodes.clear();
722     for (auto node : scheduledNode) {
723         nodes.emplace_back(node);
724     }
725 }
726 /*
727  * Calculate the register pressure for current BB based on an instruction series
728  */
CalculateRegisterPressure(MapleVector<DepNode * > & nodes)729 int RegPressureSchedule::CalculateRegisterPressure(MapleVector<DepNode *> &nodes)
730 {
731     /* Initialize the live, live in, live out register max pressure information */
732     liveReg.clear();
733     liveInRegNO = bb->GetLiveInRegNO();
734     liveOutRegNO = bb->GetLiveOutRegNO();
735     std::vector<ScheduleState> restoreStateSeries;
736     int maximumPressure = 0;
737     /* Mock all the nodes to kScheduled status for pressure calculation */
738     for (auto node : nodes) {
739         restoreStateSeries.emplace_back(node->GetState());
740         node->SetState(kScheduled);
741     }
742     /* Update live register set according to the instruction series */
743     for (auto node : nodes) {
744         for (auto &reg : node->GetUseRegnos()) {
745             UpdateLiveReg(*node, reg, false);
746         }
747         for (auto &defReg : node->GetDefRegnos()) {
748             UpdateLiveReg(*node, defReg, true);
749         }
750         int currentPressure = static_cast<int>(liveReg.size());
751         if (currentPressure > maximumPressure) {
752             maximumPressure = currentPressure;
753         }
754 #ifdef PRESCHED_DEBUG
755         node->GetInsn()->Dump();
756         LogInfo::MapleLogger() << "Dump Live Reg : "
757                                << "\n";
758         for (auto reg : liveReg) {
759             LogInfo::MapleLogger() << "R" << reg << " ";
760         }
761         LogInfo::MapleLogger() << "\n";
762 #endif
763     }
764     /* Restore the Schedule State */
765     uint32 i = 0;
766     for (auto node : nodes) {
767         node->SetState(restoreStateSeries.at(i));
768         ++i;
769     }
770     return maximumPressure;
771 }
772 
773 /*
774  * Split the series into multiple parts and conduct pre-scheduling in every part
775  */
PartialScheduling(MapleVector<DepNode * > & nodes)776 void RegPressureSchedule::PartialScheduling(MapleVector<DepNode *> &nodes)
777 {
778     for (size_t i = 0; i < splitterIndexes.size() - 1; ++i) {
779         constexpr uint32 lastTwoNodeIndex = 2;
780         auto begin = static_cast<uint32>(splitterIndexes.at(i));
781         auto end = static_cast<uint32>(splitterIndexes.at(i + 1));
782         for (uint32 j = begin; j < end; ++j) {
783             partialList.emplace_back(nodes.at(j));
784         }
785         if (i == splitterIndexes.size() - lastTwoNodeIndex) {
786             partialList.emplace_back(nodes.at(end));
787         }
788         for (auto node : partialList) {
789             partialSet.insert(node);
790         }
791         HeuristicScheduling(partialList);
792         for (auto node : partialList) {
793             partialScheduledNode.emplace_back(node);
794         }
795         partialList.clear();
796         partialSet.clear();
797     }
798     nodes.clear();
799     /* Construct overall scheduling output */
800     for (auto node : partialScheduledNode) {
801         nodes.emplace_back(node);
802     }
803 }
804 
805 /*
806  * Brute-force scheduling algorithm
807  * It enumerates all the possible schedule series and pick a best one
808  */
BruteForceScheduling()809 void RegPressureSchedule::BruteForceScheduling()
810 {
811     /* stop brute force scheduling when exceeding the count limit */
812     if (g_bruteMaximumLimit && (scheduleSeriesCount > g_schedulingMaximumCount)) {
813         return;
814     }
815     int defaultPressureValue = -1;
816     /* ReadyList is empty, scheduling is over */
817     if (readyList.empty()) {
818         if (originalNodeSeries.size() != scheduledNode.size()) {
819 #ifdef PRESCHED_DEBUG
820             ReportScheduleError();
821 #endif
822             return;
823         }
824         ++scheduleSeriesCount;
825         int currentPressure = CalculateRegisterPressure(scheduledNode);
826         if (minPressure == defaultPressureValue || currentPressure < minPressure) {
827             minPressure = currentPressure;
828             /* update better scheduled series */
829             optimisticScheduledNodes.clear();
830             for (auto node : scheduledNode) {
831                 optimisticScheduledNodes.emplace_back(node);
832             }
833             return;
834         }
835         return;
836     }
837     /* store the current status of the ready list */
838     std::vector<DepNode *> innerList;
839     for (auto tempNode : readyList) {
840         innerList.emplace_back(tempNode);
841     }
842     for (auto *node : innerList) {
843         if (CanSchedule(*node)) {
844             /* update readyList and node dependency info */
845             std::vector<bool> changedToReady;
846             BruteUpdateReadyList(*node, changedToReady);
847             scheduledNode.emplace_back(node);
848             node->SetState(kScheduled);
849             BruteForceScheduling();
850             node->SetState(kReady);
851             /* restore readyList and node dependency info */
852             RestoreReadyList(*node, changedToReady);
853             scheduledNode.pop_back();
854         }
855     }
856 }
857 
858 /*
859  * Calculate the pred size based on the dependency information
860  */
CalculatePredSize(DepNode & node)861 void RegPressureSchedule::CalculatePredSize(DepNode &node)
862 {
863     constexpr uint32 emptyPredsSize = 0;
864     node.SetValidPredsSize(emptyPredsSize);
865     for (auto pred : node.GetPreds()) {
866         DepNode &from = pred->GetFrom();
867         if (!partialSet.empty() && (partialSet.find(&from) == partialSet.end())) {
868             continue;
869         } else {
870             node.IncreaseValidPredsSize();
871         }
872     }
873 }
874 
InitBruteForceScheduling(MapleVector<DepNode * > & nodes)875 void RegPressureSchedule::InitBruteForceScheduling(MapleVector<DepNode *> &nodes)
876 {
877     /* Calculate pred size of the node */
878     for (auto node : nodes) {
879         CalculatePredSize(*node);
880     }
881     readyList.clear();
882     optimisticScheduledNodes.clear();
883     scheduledNode.clear();
884     DepNode *firstNode = nodes.front();
885     firstNode->SetState(kReady);
886     readyList.emplace_back(firstNode);
887 }
888 
889 /*
890  * Give out the pre-scheduling output based on new register pressure
891  */
EmitSchedulingSeries(MapleVector<DepNode * > & nodes)892 void RegPressureSchedule::EmitSchedulingSeries(MapleVector<DepNode *> &nodes)
893 {
894 #ifdef PRESCHED_DEBUG
895     ReportScheduleOutput();
896 #endif
897     if (originalPressure <= scheduledPressure) {
898         /* Restore the original series */
899         nodes.clear();
900         for (auto node : originalNodeSeries) {
901             nodes.emplace_back(node);
902         }
903     }
904 }
905 
906 /*
907  * ------------- Schedule function ----------
908  * calculate and mark each insn id, each BB's firstLoc and lastLoc.
909  */
InitIDAndLoc()910 void Schedule::InitIDAndLoc()
911 {
912     uint32 id = 0;
913     FOR_ALL_BB(bb, &cgFunc) {
914         bb->SetLastLoc(bb->GetPrev() ? bb->GetPrev()->GetLastLoc() : nullptr);
915         FOR_BB_INSNS(insn, bb) {
916             insn->SetId(id++);
917 #if DEBUG
918             insn->AppendComment(" Insn id: " + std::to_string(insn->GetId()));
919 #endif
920             if (insn->IsImmaterialInsn() && !insn->IsComment()) {
921                 bb->SetLastLoc(insn);
922             } else if (!bb->GetFirstLoc() && insn->IsMachineInstruction()) {
923                 bb->SetFirstLoc(*bb->GetLastLoc());
924             }
925         }
926     }
927 }
928 
929 /* === new pm === */
PhaseRun(maplebe::CGFunc & f)930 bool CgPreScheduling::PhaseRun(maplebe::CGFunc &f)
931 {
932     if (f.HasAsm()) {
933         return true;
934     }
935     if (LIST_SCHED_DUMP_NEWPM) {
936         LogInfo::MapleLogger() << "Before CgDoPreScheduling : " << f.GetName() << "\n";
937         DotGenerator::GenerateDot("preschedule", f, f.GetMirModule(), true);
938     }
939     auto *live = GET_ANALYSIS(CgLiveAnalysis, f);
940     /* revert liveanalysis result container. */
941     DEBUG_ASSERT(live != nullptr, "nullptr check");
942     live->ResetLiveSet();
943 
944     Schedule *schedule = nullptr;
945 #if TARGAARCH64 || TARGRISCV64
946     schedule = GetPhaseAllocator()->New<AArch64Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
947 #endif
948 #if TARGARM32
949     schedule = GetPhaseAllocator()->New<Arm32Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
950 #endif
951     schedule->ListScheduling(true);
952     live->ClearInOutDataInfo();
953 
954     return true;
955 }
956 
GetAnalysisDependence(maple::AnalysisDep & aDep) const957 void CgPreScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const
958 {
959     aDep.AddRequired<CgLiveAnalysis>();
960     aDep.PreservedAllExcept<CgLiveAnalysis>();
961 }
MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreScheduling,prescheduling)962 MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreScheduling, prescheduling)
963 
964 bool CgScheduling::PhaseRun(maplebe::CGFunc &f)
965 {
966     if (f.HasAsm()) {
967         return true;
968     }
969     if (LIST_SCHED_DUMP_NEWPM) {
970         LogInfo::MapleLogger() << "Before CgDoScheduling : " << f.GetName() << "\n";
971         DotGenerator::GenerateDot("scheduling", f, f.GetMirModule(), true);
972     }
973     auto *live = GET_ANALYSIS(CgLiveAnalysis, f);
974     /* revert liveanalysis result container. */
975     DEBUG_ASSERT(live != nullptr, "nullptr check");
976     live->ResetLiveSet();
977 
978     Schedule *schedule = nullptr;
979 #if TARGAARCH64 || TARGRISCV64
980     schedule = GetPhaseAllocator()->New<AArch64Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
981 #endif
982 #if TARGARM32
983     schedule = GetPhaseAllocator()->New<Arm32Schedule>(f, *GetPhaseMemPool(), *live, PhaseName());
984 #endif
985     schedule->ListScheduling(false);
986     live->ClearInOutDataInfo();
987 
988     return true;
989 }
990 
GetAnalysisDependence(maple::AnalysisDep & aDep) const991 void CgScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const
992 {
993     aDep.AddRequired<CgLiveAnalysis>();
994     aDep.PreservedAllExcept<CgLiveAnalysis>();
995 }
996 MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgScheduling, scheduling)
997 } /* namespace maplebe */
998