• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "aarch64_offset_adjust.h"
17 #include "aarch64_cgfunc.h"
18 #include "aarch64_cg.h"
19 
20 namespace maplebe {
Run()21 void AArch64FPLROffsetAdjustment::Run()
22 {
23     AArch64CGFunc *aarchCGFunc = static_cast<AArch64CGFunc *>(cgFunc);
24     FOR_ALL_BB(bb, aarchCGFunc)
25     {
26         FOR_BB_INSNS_SAFE(insn, bb, ninsn)
27         {  // AdjustmentOffsetForOpnd may replace curInsn
28             if (!insn->IsMachineInstruction()) {
29                 continue;
30             }
31             AdjustmentOffsetForOpnd(*insn);
32         }
33     }
34 #ifdef STKLAY_DEBUG
35     AArch64MemLayout *aarch64memlayout = static_cast<AArch64MemLayout *>(cgFunc->GetMemlayout());
36     LogInfo::MapleLogger() << "--------layout of " << cgFunc->GetName() << "-------------"
37                            << "\n";
38     LogInfo::MapleLogger() << "stkpassed: " << aarch64memlayout->GetSegArgsStkPassed().GetSize() << "\n";
39     LogInfo::MapleLogger() << "real framesize: " << aarch64memlayout->RealStackFrameSize() << "\n";
40     LogInfo::MapleLogger() << "gr save: " << aarch64memlayout->GetSizeOfGRSaveArea() << "\n";
41     LogInfo::MapleLogger() << "vr save: " << aarch64memlayout->GetSizeOfVRSaveArea() << "\n";
42     LogInfo::MapleLogger() << "calleesave (includes fp lr): "
43                            << static_cast<AArch64CGFunc *>(cgFunc)->SizeOfCalleeSaved() << "\n";
44     LogInfo::MapleLogger() << "regspill: " << aarch64memlayout->GetSizeOfSpillReg() << "\n";
45     LogInfo::MapleLogger() << "ref local: " << aarch64memlayout->GetSizeOfRefLocals() << "\n";
46     LogInfo::MapleLogger() << "local: " << aarch64memlayout->GetSizeOfLocals() << "\n";
47     LogInfo::MapleLogger() << "regpass: " << aarch64memlayout->GetSegArgsRegPassed().GetSize() << "\n";
48     LogInfo::MapleLogger() << "stkpass: " << aarch64memlayout->GetSegArgsToStkPass().GetSize() << "\n";
49     LogInfo::MapleLogger() << "-------------------------------------------------"
50                            << "\n";
51 #endif
52 }
53 
AdjustmentOffsetForOpnd(Insn & insn) const54 void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn) const
55 {
56     bool isLmbc = (aarchCGFunc->GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc);
57     AArch64reg stackBaseReg = isLmbc ? R29 : (aarchCGFunc->UseFP() ? R29 : RSP);
58     uint32 opndNum = insn.GetOperandSize();
59     bool replaceFP = false;
60     for (uint32 i = 0; i < opndNum; ++i) {
61         Operand &opnd = insn.GetOperand(i);
62         if (opnd.IsRegister()) {
63             auto &regOpnd = static_cast<RegOperand &>(opnd);
64             if (regOpnd.IsOfVary()) {
65                 insn.SetOperand(i, aarchCGFunc->GetOrCreateStackBaseRegOperand());
66                 regOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand();
67             }
68             if (regOpnd.GetRegisterNumber() == RFP) {
69                 insn.SetOperand(i,
70                                 aarchCGFunc->GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt));
71                 replaceFP = true;
72             }
73         } else if (opnd.IsMemoryAccessOperand()) {
74             AdjustMemBaseReg(insn, i, replaceFP);
75             AdjustMemOfstVary(insn, i);
76         } else if (opnd.IsIntImmediate()) {
77             AdjustmentOffsetForImmOpnd(insn, i);
78         }
79     }
80     if (replaceFP && !aarchCGFunc->UseFP()) {
81         AdjustmentStackPointer(insn);
82     }
83     if (!VERIFY_INSN(&insn)) {  // split insn
84         SPLIT_INSN(&insn, aarchCGFunc);
85     }
86 }
87 
AdjustMemBaseReg(Insn & insn,uint32 i,bool & replaceFP) const88 void AArch64FPLROffsetAdjustment::AdjustMemBaseReg(Insn &insn, uint32 i, bool &replaceFP) const
89 {
90     Operand &opnd = insn.GetOperand(i);
91     auto &currMemOpnd = static_cast<MemOperand &>(opnd);
92     MemOperand *newMemOpnd = currMemOpnd.Clone(*aarchCGFunc->GetMemoryPool());
93     CHECK_NULL_FATAL(newMemOpnd);
94     if (newMemOpnd->GetBaseRegister() != nullptr) {
95         if (newMemOpnd->GetBaseRegister()->IsOfVary()) {
96             newMemOpnd->SetBaseRegister(static_cast<RegOperand &>(aarchCGFunc->GetOrCreateStackBaseRegOperand()));
97         }
98         RegOperand *memBaseReg = newMemOpnd->GetBaseRegister();
99         if (memBaseReg->GetRegisterNumber() == RFP) {
100             RegOperand &newBaseOpnd =
101                 aarchCGFunc->GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt);
102             newMemOpnd->SetBaseRegister(newBaseOpnd);
103             replaceFP = true;
104         }
105     }
106     if (newMemOpnd->GetBaseRegister() != nullptr && (newMemOpnd->GetBaseRegister()->GetRegisterNumber() == RFP ||
107                                                      newMemOpnd->GetBaseRegister()->GetRegisterNumber() == RSP)) {
108         newMemOpnd->SetStackMem(true);
109     }
110     insn.SetOperand(i, *newMemOpnd);
111 }
112 
AdjustMemOfstVary(Insn & insn,uint32 i) const113 void AArch64FPLROffsetAdjustment::AdjustMemOfstVary(Insn &insn, uint32 i) const
114 {
115     // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated
116     //       in subsequent steps, which will cause some unknown errors. So, we're going to do a unified
117     //       split after all the steps are complete.
118     Operand &opnd = insn.GetOperand(i);
119     auto &currMemOpnd = static_cast<MemOperand &>(opnd);
120     if (currMemOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) {
121         return;
122     }
123     OfstOperand *ofstOpnd = currMemOpnd.GetOffsetImmediate();
124     CHECK_NULL_FATAL(ofstOpnd);
125     if (ofstOpnd->GetVary() == kUnAdjustVary) {
126         MemLayout *memLayout = aarchCGFunc->GetMemlayout();
127         ofstOpnd->AdjustOffset(static_cast<int32>(static_cast<AArch64MemLayout *>(memLayout)->RealStackFrameSize() -
128                                                   memLayout->SizeOfArgsToStackPass() -
129                                                   static_cast<AArch64MemLayout *>(memLayout)->GetSizeOfColdToStk() -
130                                                   cgFunc->GetFunction().GetFrameReseverdSlot()));
131         ofstOpnd->SetVary(kAdjustVary);
132     }
133 }
134 
AdjustmentOffsetForImmOpnd(Insn & insn,uint32 index) const135 void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index) const
136 {
137     // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated
138     //       in subsequent steps, which will cause some unknown errors. So, we're going to do a unified
139     //       split after all the steps are complete.
140     auto &immOpnd = static_cast<ImmOperand &>(insn.GetOperand(index));
141     auto *memLayout = static_cast<AArch64MemLayout *>(aarchCGFunc->GetMemlayout());
142     if (immOpnd.GetVary() == kUnAdjustVary) {
143         int64 ofst = static_cast<int64>(
144             memLayout->RealStackFrameSize() -
145             static_cast<uint32>(memLayout->SizeOfArgsToStackPass() + memLayout->GetSizeOfColdToStk()) -
146             cgFunc->GetFunction().GetFrameReseverdSlot());
147         if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) {
148             immOpnd.SetValue(immOpnd.GetValue() - ofst);
149             if (immOpnd.GetValue() < 0) {
150                 immOpnd.Negate();
151             }
152             insn.SetMOP(AArch64CG::kMd[A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())]);
153         } else {
154             immOpnd.Add(ofst);
155         }
156         immOpnd.SetVary(kAdjustVary);
157     }
158 }
159 
AdjustmentStackPointer(Insn & insn) const160 void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn) const
161 {
162     // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated
163     //       in subsequent steps, which will cause some unknown errors. So, we're going to do a unified
164     //       split after all the steps are complete.
165     auto *aarch64memlayout = static_cast<AArch64MemLayout *>(aarchCGFunc->GetMemlayout());
166     uint32 offset = aarch64memlayout->SizeOfArgsToStackPass() + cgFunc->GetFunction().GetFrameReseverdSlot();
167     if (offset == 0) {
168         return;
169     }
170     if (insn.IsLoad() || insn.IsStore()) {
171         auto *memOpnd = static_cast<MemOperand *>(insn.GetMemOpnd());
172         CHECK_NULL_FATAL(memOpnd);
173         DEBUG_ASSERT(memOpnd->GetBaseRegister() != nullptr, "Unexpect, need check");
174         CHECK_FATAL(memOpnd->IsIntactIndexed(), "unsupport yet");
175         ImmOperand *ofstOpnd = memOpnd->GetOffsetOperand();
176         CHECK_NULL_FATAL(ofstOpnd);
177         ImmOperand *newOfstOpnd =
178             &aarchCGFunc->GetOrCreateOfstOpnd(static_cast<uint64>(ofstOpnd->GetValue() + offset), ofstOpnd->GetSize());
179         memOpnd->SetOffsetOperand(*newOfstOpnd);
180     } else {
181         switch (insn.GetMachineOpcode()) {
182             case MOP_waddrri12:
183             case MOP_xaddrri12: {
184                 DEBUG_ASSERT(static_cast<RegOperand &>(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP,
185                              "regNumber should be changed in AdjustmentOffsetForOpnd");
186                 auto *newAddImmOpnd = static_cast<ImmOperand *>(
187                     static_cast<ImmOperand &>(insn.GetOperand(kInsnThirdOpnd)).Clone(*cgFunc->GetMemoryPool()));
188                 newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() + offset);
189                 insn.SetOperand(kInsnThirdOpnd, *newAddImmOpnd);
190                 break;
191             }
192             case MOP_waddrri24:
193             case MOP_xaddrri24: {
194                 DEBUG_ASSERT(static_cast<RegOperand &>(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP,
195                              "regNumber should be changed in AdjustmentOffsetForOpnd");
196                 RegOperand &tempReg = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt);
197                 ImmOperand &offsetReg = aarchCGFunc->CreateImmOperand(offset, k64BitSize, false);
198                 aarchCGFunc->SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false,
199                                                 insn);
200                 insn.SetOperand(kInsnSecondOpnd, tempReg);
201                 break;
202             }
203             case MOP_wsubrri12:
204             case MOP_xsubrri12: {
205                 auto *newAddImmOpnd = static_cast<ImmOperand *>(
206                     static_cast<ImmOperand &>(insn.GetOperand(kInsnThirdOpnd)).Clone(*cgFunc->GetMemoryPool()));
207                 newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() - offset);
208                 if (newAddImmOpnd->GetValue() < 0) {
209                     newAddImmOpnd->Negate();
210                 }
211                 insn.SetMOP(AArch64CG::kMd[A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())]);
212                 insn.SetOperand(kInsnThirdOpnd, *newAddImmOpnd);
213                 break;
214             }
215             case MOP_waddsrri12:
216             case MOP_xaddsrri12: {
217                 auto *newAddImmOpnd = static_cast<ImmOperand *>(
218                     static_cast<ImmOperand &>(insn.GetOperand(kInsnFourthOpnd)).Clone(*cgFunc->GetMemoryPool()));
219                 newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() + offset);
220                 insn.SetOperand(kInsnFourthOpnd, *newAddImmOpnd);
221                 break;
222             }
223             default: {
224                 // Only some special insn will replace FP,
225                 insn.Dump();
226                 CHECK_FATAL(false, "Unexpect offset adjustment insn");
227             }
228         }
229     }
230 }
231 } /* namespace maplebe */
232