1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "x64_memlayout.h"
17 #include "x64_cgfunc.h"
18 #include "becommon.h"
19 #include "mir_nodes.h"
20 #include "x64_call_conv.h"
21 #include "cg.h"
22
23 namespace maplebe {
24 using namespace maple;
25
ComputeStackSpaceRequirementForCall(StmtNode & stmt,int32 & aggCopySize,bool isIcall)26 uint32 X64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall)
27 {
28 /* instantiate a parm locator */
29 X64CallConvImpl parmLocator(cgFunc->GetBecommon(), X64CallConvImpl::GetCallConvKind(stmt));
30 uint32 sizeOfArgsToStkPass = 0;
31 size_t i = 0;
32 /* An indirect call's first operand is the invocation target */
33 if (isIcall) {
34 ++i;
35 }
36
37 aggCopySize = 0;
38 for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) {
39 BaseNode *opnd = stmt.Opnd(i);
40 MIRType *ty = nullptr;
41 if (opnd->GetPrimType() != PTY_agg) {
42 ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<uint32>(opnd->GetPrimType())];
43 } else {
44 Opcode opndOpcode = opnd->GetOpCode();
45 DEBUG_ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread");
46 if (opndOpcode == OP_dread) {
47 DreadNode *dread = static_cast<DreadNode *>(opnd);
48 CHECK_NULL_FATAL(be.GetMIRModule().CurFunction());
49 MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx());
50 ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx());
51 if (dread->GetFieldID() != 0) {
52 DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass ||
53 ty->GetKind() == kTypeUnion,
54 "expect struct or class");
55 if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) {
56 ty = static_cast<MIRStructType *>(ty)->GetFieldType(dread->GetFieldID());
57 } else {
58 ty = static_cast<MIRClassType *>(ty)->GetFieldType(dread->GetFieldID());
59 }
60 }
61 } else {
62 /* OP_iread */
63 IreadNode *iread = static_cast<IreadNode *>(opnd);
64 ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx());
65 DEBUG_ASSERT(ty->GetKind() == kTypePointer, "expect pointer");
66 ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast<MIRPtrType *>(ty)->GetPointedTyIdx());
67 if (iread->GetFieldID() != 0) {
68 DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass ||
69 ty->GetKind() == kTypeUnion,
70 "expect struct or class");
71 if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) {
72 ty = static_cast<MIRStructType *>(ty)->GetFieldType(iread->GetFieldID());
73 } else {
74 ty = static_cast<MIRClassType *>(ty)->GetFieldType(iread->GetFieldID());
75 }
76 }
77 }
78 }
79 CCLocInfo ploc;
80 aggCopySize += parmLocator.LocateNextParm(*ty, ploc);
81 if (ploc.reg0 != 0) {
82 continue; /* passed in register, so no effect on actual area */
83 }
84 sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize());
85 }
86
87 return sizeOfArgsToStkPass;
88 }
89
SetSizeAlignForTypeIdx(uint32 typeIdx,uint32 & size,uint32 & align) const90 void X64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const
91 {
92 align = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx)->GetAlign();
93 size = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx)->GetSize();
94 }
95
LayoutVarargParams()96 void X64MemLayout::LayoutVarargParams()
97 {
98 uint32 nIntRegs = 0;
99 uint32 nFpRegs = 0;
100 X64CallConvImpl parmlocator(be);
101 CCLocInfo ploc;
102 MIRFunction *func = mirFunction;
103 if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) {
104 for (uint32 i = 0; i < func->GetFormalCount(); i++) {
105 if (i == 0) {
106 if (be.HasFuncReturnType(*func)) {
107 TyIdx tidx = be.GetFuncReturnType(*func);
108 if (GlobalTables::GetTypeTable().GetTypeFromTyIdx(tidx.GetIdx())->GetSize() <= k16ByteSize) {
109 continue;
110 }
111 }
112 }
113 MIRType *ty = func->GetNthParamType(i);
114 parmlocator.LocateNextParm(*ty, ploc, i == 0, func);
115 if (ploc.reg0 != kRinvalid) {
116 /* The range here is R0 to R15. However, not all registers in the range are parameter registers.
117 * If necessary later, you can add parameter register checks. */
118 if (ploc.reg0 >= R0 && ploc.reg0 <= R15) {
119 nIntRegs++;
120 } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) {
121 nFpRegs++;
122 }
123 }
124 if (ploc.reg1 != kRinvalid) {
125 if (ploc.reg1 >= R0 && ploc.reg1 <= R15) {
126 nIntRegs++;
127 } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) {
128 nFpRegs++;
129 }
130 }
131 if (ploc.reg2 != kRinvalid) {
132 if (ploc.reg2 >= R0 && ploc.reg2 <= R15) {
133 nIntRegs++;
134 } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) {
135 nFpRegs++;
136 }
137 }
138 if (ploc.reg3 != kRinvalid) {
139 if (ploc.reg3 >= R0 && ploc.reg3 <= R15) {
140 nIntRegs++;
141 } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) {
142 nFpRegs++;
143 }
144 }
145 }
146
147 SetSizeOfGRSaveArea((k6BitSize - nIntRegs) * GetPointerSize());
148 SetSizeOfVRSaveArea((k6BitSize - nFpRegs) * GetPointerSize() * k2ByteSize);
149 }
150 }
151
LayoutFormalParams()152 void X64MemLayout::LayoutFormalParams()
153 {
154 X64CallConvImpl parmLocator(be);
155 CCLocInfo ploc;
156 for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) {
157 MIRSymbol *sym = mirFunction->GetFormal(i);
158 uint32 stIndex = sym->GetStIndex();
159 X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<X64SymbolAlloc>();
160 SetSymAllocInfo(stIndex, *symLoc);
161 if (i == 0) {
162 // The function name here is not appropriate, it should be to determine
163 // whether the function returns a structure less than 16 bytes. At this
164 // time, the first parameter is a structure occupant, which has no
165 // practical significance.
166 if (be.HasFuncReturnType(*mirFunction)) {
167 symLoc->SetMemSegment(GetSegArgsRegPassed());
168 symLoc->SetOffset(GetSegArgsRegPassed().GetSize());
169 continue;
170 }
171 }
172
173 MIRType *ty = mirFunction->GetNthParamType(i);
174 uint32 ptyIdx = ty->GetTypeIndex();
175 parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction);
176 uint32 size = 0;
177 uint32 align = 0;
178 if (ploc.reg0 != kRinvalid) {
179 if (!sym->IsPreg()) {
180 SetSizeAlignForTypeIdx(ptyIdx, size, align);
181 symLoc->SetMemSegment(GetSegArgsRegPassed());
182 if (ty->GetPrimType() == PTY_agg &&
183 GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx)->GetSize() > k4ByteSize) {
184 /* struct param aligned on 8 byte boundary unless it is small enough */
185 align = GetPointerSize();
186 }
187 segArgsRegPassed.SetSize(static_cast<uint32>(RoundUp(segArgsRegPassed.GetSize(), align)));
188 symLoc->SetOffset(segArgsRegPassed.GetSize());
189 segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size);
190 }
191 } else {
192 SetSizeAlignForTypeIdx(ptyIdx, size, align);
193 symLoc->SetMemSegment(GetSegArgsStkPassed());
194 segArgsStkPassed.SetSize(static_cast<uint32>(RoundUp(segArgsStkPassed.GetSize(), align)));
195 symLoc->SetOffset(segArgsStkPassed.GetSize());
196 segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size);
197 segArgsStkPassed.SetSize(static_cast<uint32>(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize())));
198 }
199 }
200 }
201
LayoutLocalVariables()202 void X64MemLayout::LayoutLocalVariables()
203 {
204 uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize();
205 for (uint32 i = 0; i < symTabSize; ++i) {
206 MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i);
207 if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) {
208 continue;
209 }
210 uint32 stIndex = sym->GetStIndex();
211 TyIdx tyIdx = sym->GetTyIdx();
212 X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<X64SymbolAlloc>();
213 SetSymAllocInfo(stIndex, *symLoc);
214 CHECK_FATAL(!symLoc->IsRegister(), "expect not register");
215
216 symLoc->SetMemSegment(segLocals);
217 MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx);
218 uint32 align = ty->GetAlign();
219 if (ty->GetPrimType() == PTY_agg && align < k8BitSize) {
220 segLocals.SetSize(static_cast<uint32>(RoundUp(segLocals.GetSize(), k8BitSize)));
221 } else {
222 segLocals.SetSize(static_cast<uint32>(RoundUp(segLocals.GetSize(), align)));
223 }
224 symLoc->SetOffset(segLocals.GetSize());
225 segLocals.SetSize(segLocals.GetSize() + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize());
226 }
227 }
228
AssignSpillLocationsToPseudoRegisters()229 void X64MemLayout::AssignSpillLocationsToPseudoRegisters()
230 {
231 MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab();
232
233 /* BUG: n_regs include index 0 which is not a valid preg index. */
234 size_t nRegs = pregTab->Size();
235 spillLocTable.resize(nRegs);
236 for (size_t i = 1; i < nRegs; ++i) {
237 PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType();
238 X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<X64SymbolAlloc>();
239 symLoc->SetMemSegment(segLocals);
240 segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType)));
241 symLoc->SetOffset(segLocals.GetSize());
242 MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType];
243 segLocals.SetSize(segLocals.GetSize() + mirTy->GetSize());
244 spillLocTable[i] = symLoc;
245 }
246 }
247
LayoutReturnRef(int32 & structCopySize,int32 & maxParmStackSize)248 void X64MemLayout::LayoutReturnRef(int32 &structCopySize, int32 &maxParmStackSize)
249 {
250 segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize));
251 maxParmStackSize = static_cast<int32>(segArgsToStkPass.GetSize());
252 if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) {
253 AssignSpillLocationsToPseudoRegisters();
254 }
255 segLocals.SetSize(static_cast<uint32>(RoundUp(segLocals.GetSize(), GetPointerSize())));
256 }
257
LayoutStackFrame(int32 & structCopySize,int32 & maxParmStackSize)258 void X64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize)
259 {
260 LayoutVarargParams();
261 LayoutFormalParams();
262
263 // Need to be aligned ?
264 segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize()));
265 segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize()));
266
267 /* allocate the local variables in the stack */
268 LayoutLocalVariables();
269 LayoutReturnRef(structCopySize, maxParmStackSize);
270
271 // Need to adapt to the cc interface.
272 structCopySize = 0;
273 // Scenes with more than 6 parameters are not yet enabled.
274 maxParmStackSize = 0;
275
276 cgFunc->SetUseFP(cgFunc->UseFP() || static_cast<int32>(StackFrameSize()) > kMaxPimm32);
277 }
278
StackFrameSize() const279 uint64 X64MemLayout::StackFrameSize() const
280 {
281 uint64 total = Locals().GetSize() + segArgsRegPassed.GetSize() + segArgsToStkPass.GetSize() +
282 segGrSaveArea.GetSize() + segVrSaveArea.GetSize() + segSpillReg.GetSize() +
283 cgFunc->GetFunction().GetFrameReseverdSlot(); // frame reserved slot
284 return RoundUp(total, stackPtrAlignment);
285 }
286
GetGRSaveAreaBaseLoc()287 int32 X64MemLayout::GetGRSaveAreaBaseLoc()
288 {
289 int32 total = static_cast<int32>(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment));
290 return total;
291 }
292
GetVRSaveAreaBaseLoc()293 int32 X64MemLayout::GetVRSaveAreaBaseLoc()
294 {
295 int32 total = static_cast<int32>(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment) +
296 RoundUp(GetSizeOfVRSaveArea(), stackPtrAlignment));
297 return total;
298 }
299 } /* namespace maplebe */
300