1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "aarch64_offset_adjust.h"
17 #include "aarch64_cg.h"
18
19 namespace maplebe {
20
GetReversalMOP(MOperator arithMop)21 static MOperator GetReversalMOP(MOperator arithMop)
22 {
23 switch (arithMop) {
24 case MOP_waddrri12:
25 return MOP_wsubrri12;
26 case MOP_xaddrri12:
27 return MOP_xsubrri12;
28 case MOP_xsubrri12:
29 return MOP_xaddrri12;
30 case MOP_wsubrri12:
31 return MOP_waddrri12;
32 default:
33 CHECK_FATAL(false, "NYI");
34 break;
35 }
36 return MOP_undef;
37 }
38
Run()39 void AArch64FPLROffsetAdjustment::Run()
40 {
41 AArch64CGFunc *aarchCGFunc = static_cast<AArch64CGFunc *>(cgFunc);
42 FOR_ALL_BB(bb, aarchCGFunc)
43 {
44 FOR_BB_INSNS_SAFE(insn, bb, ninsn)
45 { // AdjustmentOffsetForOpnd may replace curInsn
46 if (!insn->IsMachineInstruction()) {
47 continue;
48 }
49 AdjustmentOffsetForOpnd(*insn);
50 }
51 }
52 #ifdef STKLAY_DEBUG
53 AArch64MemLayout *aarch64memlayout = static_cast<AArch64MemLayout *>(cgFunc->GetMemlayout());
54 LogInfo::MapleLogger() << "--------layout of " << cgFunc->GetName() << "-------------"
55 << "\n";
56 LogInfo::MapleLogger() << "stkpassed: " << aarch64memlayout->GetSegArgsStkPassed().GetSize() << "\n";
57 LogInfo::MapleLogger() << "real framesize: " << aarch64memlayout->RealStackFrameSize() << "\n";
58 LogInfo::MapleLogger() << "gr save: " << aarch64memlayout->GetSizeOfGRSaveArea() << "\n";
59 LogInfo::MapleLogger() << "vr save: " << aarch64memlayout->GetSizeOfVRSaveArea() << "\n";
60 LogInfo::MapleLogger() << "calleesave (includes fp lr): "
61 << static_cast<AArch64CGFunc *>(cgFunc)->SizeOfCalleeSaved() << "\n";
62 LogInfo::MapleLogger() << "regspill: " << aarch64memlayout->GetSizeOfSpillReg() << "\n";
63 LogInfo::MapleLogger() << "ref local: " << aarch64memlayout->GetSizeOfRefLocals() << "\n";
64 LogInfo::MapleLogger() << "local: " << aarch64memlayout->GetSizeOfLocals() << "\n";
65 LogInfo::MapleLogger() << "regpass: " << aarch64memlayout->GetSegArgsRegPassed().GetSize() << "\n";
66 LogInfo::MapleLogger() << "stkpass: " << aarch64memlayout->GetSegArgsToStkPass().GetSize() << "\n";
67 LogInfo::MapleLogger() << "-------------------------------------------------"
68 << "\n";
69 #endif
70 }
71
AdjustmentOffsetForOpnd(Insn & insn) const72 void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn) const
73 {
74 bool isLmbc = (aarchCGFunc->GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc);
75 AArch64reg stackBaseReg = isLmbc ? R29 : (aarchCGFunc->UseFP() ? R29 : RSP);
76 uint32 opndNum = insn.GetOperandSize();
77 bool replaceFP = false;
78 for (uint32 i = 0; i < opndNum; ++i) {
79 Operand &opnd = insn.GetOperand(i);
80 if (opnd.IsRegister()) {
81 auto ®Opnd = static_cast<RegOperand &>(opnd);
82 if (regOpnd.IsOfVary()) {
83 insn.SetOperand(i, aarchCGFunc->GetOrCreateStackBaseRegOperand());
84 regOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand();
85 }
86 if (regOpnd.GetRegisterNumber() == RFP) {
87 insn.SetOperand(i,
88 aarchCGFunc->GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt));
89 replaceFP = true;
90 }
91 } else if (opnd.IsMemoryAccessOperand()) {
92 AdjustMemBaseReg(insn, i, replaceFP);
93 AdjustMemOfstVary(insn, i);
94 } else if (opnd.IsIntImmediate()) {
95 AdjustmentOffsetForImmOpnd(insn, i);
96 }
97 }
98 if (replaceFP && !aarchCGFunc->UseFP()) {
99 AdjustmentStackPointer(insn);
100 }
101 if (!VERIFY_INSN(&insn)) { // split insn
102 SPLIT_INSN(&insn, aarchCGFunc);
103 }
104 }
105
AdjustMemBaseReg(Insn & insn,uint32 i,bool & replaceFP) const106 void AArch64FPLROffsetAdjustment::AdjustMemBaseReg(Insn &insn, uint32 i, bool &replaceFP) const
107 {
108 Operand &opnd = insn.GetOperand(i);
109 auto &currMemOpnd = static_cast<MemOperand &>(opnd);
110 MemOperand *newMemOpnd = currMemOpnd.Clone(*aarchCGFunc->GetMemoryPool());
111 CHECK_NULL_FATAL(newMemOpnd);
112 if (newMemOpnd->GetBaseRegister() != nullptr) {
113 if (newMemOpnd->GetBaseRegister()->IsOfVary()) {
114 newMemOpnd->SetBaseRegister(static_cast<RegOperand &>(aarchCGFunc->GetOrCreateStackBaseRegOperand()));
115 }
116 RegOperand *memBaseReg = newMemOpnd->GetBaseRegister();
117 if (memBaseReg->GetRegisterNumber() == RFP) {
118 RegOperand &newBaseOpnd =
119 aarchCGFunc->GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt);
120 newMemOpnd->SetBaseRegister(newBaseOpnd);
121 replaceFP = true;
122 }
123 }
124 if (newMemOpnd->GetBaseRegister() != nullptr && (newMemOpnd->GetBaseRegister()->GetRegisterNumber() == RFP ||
125 newMemOpnd->GetBaseRegister()->GetRegisterNumber() == RSP)) {
126 newMemOpnd->SetStackMem(true);
127 }
128 insn.SetOperand(i, *newMemOpnd);
129 }
130
AdjustMemOfstVary(Insn & insn,uint32 i) const131 void AArch64FPLROffsetAdjustment::AdjustMemOfstVary(Insn &insn, uint32 i) const
132 {
133 // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated
134 // in subsequent steps, which will cause some unknown errors. So, we're going to do a unified
135 // split after all the steps are complete.
136 Operand &opnd = insn.GetOperand(i);
137 auto &currMemOpnd = static_cast<MemOperand &>(opnd);
138 if (currMemOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) {
139 return;
140 }
141 OfstOperand *ofstOpnd = currMemOpnd.GetOffsetImmediate();
142 CHECK_NULL_FATAL(ofstOpnd);
143 if (ofstOpnd->GetVary() == kUnAdjustVary) {
144 MemLayout *memLayout = aarchCGFunc->GetMemlayout();
145 ofstOpnd->AdjustOffset(static_cast<int32>(static_cast<AArch64MemLayout *>(memLayout)->RealStackFrameSize() -
146 memLayout->SizeOfArgsToStackPass() -
147 static_cast<AArch64MemLayout *>(memLayout)->GetSizeOfColdToStk() -
148 cgFunc->GetFunction().GetFrameReseverdSlot()));
149 ofstOpnd->SetVary(kAdjustVary);
150 }
151 }
152
AdjustmentOffsetForImmOpnd(Insn & insn,uint32 index) const153 void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index) const
154 {
155 // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated
156 // in subsequent steps, which will cause some unknown errors. So, we're going to do a unified
157 // split after all the steps are complete.
158 auto &immOpnd = static_cast<ImmOperand &>(insn.GetOperand(index));
159 auto *memLayout = static_cast<AArch64MemLayout *>(aarchCGFunc->GetMemlayout());
160 if (immOpnd.GetVary() == kUnAdjustVary) {
161 int64 ofst = static_cast<int64>(
162 memLayout->RealStackFrameSize() -
163 static_cast<uint32>(memLayout->SizeOfArgsToStackPass() + memLayout->GetSizeOfColdToStk()) -
164 cgFunc->GetFunction().GetFrameReseverdSlot());
165 if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) {
166 immOpnd.SetValue(immOpnd.GetValue() - ofst);
167 if (immOpnd.GetValue() < 0) {
168 immOpnd.Negate();
169 }
170 insn.SetMOP(AArch64CG::kMd[GetReversalMOP(insn.GetMachineOpcode())]);
171 } else {
172 immOpnd.Add(ofst);
173 }
174 immOpnd.SetVary(kAdjustVary);
175 }
176 }
177
AdjustmentStackPointer(Insn & insn) const178 void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn) const
179 {
180 // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated
181 // in subsequent steps, which will cause some unknown errors. So, we're going to do a unified
182 // split after all the steps are complete.
183 auto *aarch64memlayout = static_cast<AArch64MemLayout *>(aarchCGFunc->GetMemlayout());
184 uint32 offset = aarch64memlayout->SizeOfArgsToStackPass() + cgFunc->GetFunction().GetFrameReseverdSlot();
185 if (offset == 0) {
186 return;
187 }
188 if (insn.IsLoad() || insn.IsStore()) {
189 auto *memOpnd = static_cast<MemOperand *>(insn.GetMemOpnd());
190 CHECK_NULL_FATAL(memOpnd);
191 DEBUG_ASSERT(memOpnd->GetBaseRegister() != nullptr, "Unexpect, need check");
192 CHECK_FATAL(memOpnd->IsIntactIndexed(), "unsupport yet");
193 ImmOperand *ofstOpnd = memOpnd->GetOffsetOperand();
194 CHECK_NULL_FATAL(ofstOpnd);
195 ImmOperand *newOfstOpnd =
196 &aarchCGFunc->GetOrCreateOfstOpnd(static_cast<uint64>(ofstOpnd->GetValue() + offset), ofstOpnd->GetSize());
197 memOpnd->SetOffsetOperand(*newOfstOpnd);
198 } else {
199 switch (insn.GetMachineOpcode()) {
200 case MOP_waddrri12:
201 case MOP_xaddrri12: {
202 DEBUG_ASSERT(static_cast<RegOperand &>(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP,
203 "regNumber should be changed in AdjustmentOffsetForOpnd");
204 auto *newAddImmOpnd = static_cast<ImmOperand *>(
205 static_cast<ImmOperand &>(insn.GetOperand(kInsnThirdOpnd)).Clone(*cgFunc->GetMemoryPool()));
206 newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() + offset);
207 insn.SetOperand(kInsnThirdOpnd, *newAddImmOpnd);
208 break;
209 }
210 case MOP_waddrri24:
211 case MOP_xaddrri24: {
212 DEBUG_ASSERT(static_cast<RegOperand &>(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP,
213 "regNumber should be changed in AdjustmentOffsetForOpnd");
214 RegOperand &tempReg = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt);
215 ImmOperand &offsetReg = aarchCGFunc->CreateImmOperand(offset, k64BitSize, false);
216 aarchCGFunc->SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false,
217 insn);
218 insn.SetOperand(kInsnSecondOpnd, tempReg);
219 break;
220 }
221 case MOP_wsubrri12:
222 case MOP_xsubrri12: {
223 auto *newAddImmOpnd = static_cast<ImmOperand *>(
224 static_cast<ImmOperand &>(insn.GetOperand(kInsnThirdOpnd)).Clone(*cgFunc->GetMemoryPool()));
225 newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() - offset);
226 if (newAddImmOpnd->GetValue() < 0) {
227 newAddImmOpnd->Negate();
228 }
229 insn.SetMOP(AArch64CG::kMd[GetReversalMOP(insn.GetMachineOpcode())]);
230 insn.SetOperand(kInsnThirdOpnd, *newAddImmOpnd);
231 break;
232 }
233 case MOP_waddsrri12:
234 case MOP_xaddsrri12: {
235 auto *newAddImmOpnd = static_cast<ImmOperand *>(
236 static_cast<ImmOperand &>(insn.GetOperand(kInsnFourthOpnd)).Clone(*cgFunc->GetMemoryPool()));
237 newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() + offset);
238 insn.SetOperand(kInsnFourthOpnd, *newAddImmOpnd);
239 break;
240 }
241 default: {
242 // Only some special insn will replace FP,
243 insn.Dump();
244 CHECK_FATAL(false, "Unexpect offset adjustment insn");
245 }
246 }
247 }
248 }
249 } /* namespace maplebe */
250