1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "aarch64_memlayout.h"
17 #include "aarch64_cgfunc.h"
18 #include "becommon.h"
19 #include "mir_nodes.h"
20
21 namespace maplebe {
22 using namespace maple;
23
24 /*
25 * Returns stack space required for a call
26 * which is used to pass arguments that cannot be
27 * passed through registers
28 */
ComputeStackSpaceRequirementForCall(StmtNode & stmt,int32 & aggCopySize,bool isIcall)29 uint32 AArch64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall)
30 {
31 /* instantiate a parm locator */
32 CCImpl &parmLocator = *static_cast<AArch64CGFunc *>(cgFunc)->GetOrCreateLocator(CCImpl::GetCallConvKind(stmt));
33 uint32 sizeOfArgsToStkPass = 0;
34 size_t i = 0;
35 /* An indirect call's first operand is the invocation target */
36 if (isIcall) {
37 ++i;
38 }
39
40 if (std::strcmp(stmt.GetOpName(), "call") == 0) {
41 CallNode *callNode = static_cast<CallNode *>(&stmt);
42 MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx());
43 CHECK_FATAL(fn != nullptr, "get MIRFunction failed");
44 MIRSymbol *symbol = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(fn->GetStIdx(), false);
45 if (symbol->GetName() == "MCC_CallFastNative" || symbol->GetName() == "MCC_CallFastNativeExt" ||
46 symbol->GetName() == "MCC_CallSlowNative0" || symbol->GetName() == "MCC_CallSlowNative1" ||
47 symbol->GetName() == "MCC_CallSlowNative2" || symbol->GetName() == "MCC_CallSlowNative3" ||
48 symbol->GetName() == "MCC_CallSlowNative4" || symbol->GetName() == "MCC_CallSlowNative5" ||
49 symbol->GetName() == "MCC_CallSlowNative6" || symbol->GetName() == "MCC_CallSlowNative7" ||
50 symbol->GetName() == "MCC_CallSlowNative8" || symbol->GetName() == "MCC_CallSlowNativeExt") {
51 ++i;
52 }
53 }
54
55 aggCopySize = 0;
56 for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) {
57 BaseNode *opnd = stmt.Opnd(i);
58 MIRType *ty = nullptr;
59 if (opnd->GetPrimType() != PTY_agg) {
60 ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<uint32>(opnd->GetPrimType())];
61 } else {
62 Opcode opndOpcode = opnd->GetOpCode();
63 DEBUG_ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread");
64 if (opndOpcode == OP_dread) {
65 DreadNode *dread = static_cast<DreadNode *>(opnd);
66 MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx());
67 ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx());
68 if (dread->GetFieldID() != 0) {
69 DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass ||
70 ty->GetKind() == kTypeUnion,
71 "expect struct or class");
72 if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) {
73 ty = static_cast<MIRStructType *>(ty)->GetFieldType(dread->GetFieldID());
74 } else {
75 ty = static_cast<MIRClassType *>(ty)->GetFieldType(dread->GetFieldID());
76 }
77 }
78 } else {
79 /* OP_iread */
80 IreadNode *iread = static_cast<IreadNode *>(opnd);
81 ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx());
82 DEBUG_ASSERT(ty->GetKind() == kTypePointer, "expect pointer");
83 ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast<MIRPtrType *>(ty)->GetPointedTyIdx());
84 if (iread->GetFieldID() != 0) {
85 DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass ||
86 ty->GetKind() == kTypeUnion,
87 "expect struct or class");
88 if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) {
89 ty = static_cast<MIRStructType *>(ty)->GetFieldType(iread->GetFieldID());
90 } else {
91 ty = static_cast<MIRClassType *>(ty)->GetFieldType(iread->GetFieldID());
92 }
93 }
94 }
95 }
96 CCLocInfo ploc;
97 aggCopySize += parmLocator.LocateNextParm(*ty, ploc);
98 if (ploc.reg0 != 0) {
99 continue; /* passed in register, so no effect on actual area */
100 }
101 sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize());
102 }
103 return sizeOfArgsToStkPass;
104 }
105
SetSizeAlignForTypeIdx(uint32 typeIdx,uint32 & size,uint32 & align) const106 void AArch64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const
107 {
108 if (be.GetTypeSize(typeIdx) > k16ByteSize) {
109 /* size > 16 is passed on stack, the formal is just a pointer to the copy on stack. */
110 if (CGOptions::IsArm64ilp32()) {
111 align = k8ByteSize;
112 size = k8ByteSize;
113 } else {
114 align = GetPointerSize();
115 size = GetPointerSize();
116 }
117 } else {
118 align = be.GetTypeAlign(typeIdx);
119 size = static_cast<uint32>(be.GetTypeSize(typeIdx));
120 }
121 }
122
SetSegmentSize(AArch64SymbolAlloc & symbolAlloc,MemSegment & segment,uint32 typeIdx) const123 void AArch64MemLayout::SetSegmentSize(AArch64SymbolAlloc &symbolAlloc, MemSegment &segment, uint32 typeIdx) const
124 {
125 uint32 size;
126 uint32 align;
127 SetSizeAlignForTypeIdx(typeIdx, size, align);
128 segment.SetSize(static_cast<uint32>(RoundUp(static_cast<uint64>(segment.GetSize()), align)));
129 symbolAlloc.SetOffset(segment.GetSize());
130 segment.SetSize(segment.GetSize() + size);
131 segment.SetSize(static_cast<uint32>(RoundUp(static_cast<uint64>(segment.GetSize()), GetPointerSize())));
132 }
133
LayoutVarargParams()134 void AArch64MemLayout::LayoutVarargParams()
135 {
136 uint32 nIntRegs = 0;
137 uint32 nFpRegs = 0;
138 AArch64CallConvImpl parmlocator(be);
139 CCLocInfo ploc;
140 MIRFunction *func = mirFunction;
141 if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) {
142 for (uint32 i = 0; i < func->GetFormalCount(); i++) {
143 if (i == 0) {
144 if (func->IsFirstArgReturn() && func->GetReturnType()->GetPrimType() != PTY_void) {
145 TyIdx tyIdx = func->GetFuncRetStructTyIdx();
146 if (be.GetTypeSize(tyIdx.GetIdx()) <= k16ByteSize) {
147 continue;
148 }
149 }
150 }
151 MIRType *ty = func->GetNthParamType(i);
152 CHECK_FATAL(mirFunction->GetAttr(FUNCATTR_ccall), "only c calling convention support here");
153 parmlocator.LocateNextParm(*ty, ploc, i == 0, func);
154 if (ploc.reg0 != kRinvalid) {
155 if (ploc.reg0 >= R0 && ploc.reg0 <= R7) {
156 nIntRegs++;
157 } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) {
158 nFpRegs++;
159 }
160 }
161 if (ploc.reg1 != kRinvalid) {
162 if (ploc.reg1 >= R0 && ploc.reg1 <= R7) {
163 nIntRegs++;
164 } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) {
165 nFpRegs++;
166 }
167 }
168 if (ploc.reg2 != kRinvalid) {
169 if (ploc.reg2 >= R0 && ploc.reg2 <= R7) {
170 nIntRegs++;
171 } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) {
172 nFpRegs++;
173 }
174 }
175 if (ploc.reg3 != kRinvalid) {
176 if (ploc.reg3 >= R0 && ploc.reg3 <= R7) {
177 nIntRegs++;
178 } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) {
179 nFpRegs++;
180 }
181 }
182 }
183 if (CGOptions::IsArm64ilp32()) {
184 SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * k8ByteSize);
185 } else {
186 SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * GetPointerSize());
187 }
188 if (CGOptions::UseGeneralRegOnly()) {
189 SetSizeOfVRSaveArea(0);
190 } else {
191 if (CGOptions::IsArm64ilp32()) {
192 SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * k8ByteSize * k2ByteSize);
193 } else {
194 SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * GetPointerSize() * k2ByteSize);
195 }
196 }
197 }
198 }
199
LayoutFormalParams()200 void AArch64MemLayout::LayoutFormalParams()
201 {
202 bool isLmbc = (be.GetMIRModule().GetFlavor() == kFlavorLmbc);
203 if (isLmbc && mirFunction->GetFormalCount() == 0) {
204 /*
205 * lmbc : upformalsize - size of formals passed from caller's frame into current function
206 * framesize - total frame size of current function used by Maple IR
207 * outparmsize - portion of frame size of current function used by call parameters
208 */
209 segArgsStkPassed.SetSize(mirFunction->GetOutParmSize());
210 segArgsRegPassed.SetSize(mirFunction->GetOutParmSize());
211 return;
212 }
213
214 CCImpl &parmLocator = *static_cast<AArch64CGFunc *>(cgFunc)->GetOrCreateLocator(cgFunc->GetCurCallConvKind());
215 CCLocInfo ploc;
216 for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) {
217 MIRSymbol *sym = mirFunction->GetFormal(i);
218 uint32 stIndex = sym->GetStIndex();
219 AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<AArch64SymbolAlloc>();
220 SetSymAllocInfo(stIndex, *symLoc);
221 if (i == 0) {
222 if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) {
223 symLoc->SetMemSegment(GetSegArgsRegPassed());
224 symLoc->SetOffset(GetSegArgsRegPassed().GetSize());
225 TyIdx tyIdx = mirFunction->GetFuncRetStructTyIdx();
226 if (be.GetTypeSize(tyIdx.GetIdx()) > k16ByteSize) {
227 if (CGOptions::IsArm64ilp32()) {
228 segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize);
229 } else {
230 segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + GetPointerSize());
231 }
232 }
233 continue;
234 }
235 }
236 MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx);
237 uint32 ptyIdx = ty->GetTypeIndex();
238 parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction);
239 if (ploc.reg0 != kRinvalid) { /* register */
240 symLoc->SetRegisters(static_cast<AArch64reg>(ploc.reg0), static_cast<AArch64reg>(ploc.reg1),
241 static_cast<AArch64reg>(ploc.reg2), static_cast<AArch64reg>(ploc.reg3));
242 if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) {
243 symLoc->SetMemSegment(segRefLocals);
244 SetSegmentSize(*symLoc, segRefLocals, ptyIdx);
245 } else if (!sym->IsPreg()) {
246 uint32 size;
247 uint32 align;
248 SetSizeAlignForTypeIdx(ptyIdx, size, align);
249 symLoc->SetMemSegment(GetSegArgsRegPassed());
250 /* the type's alignment requirement may be smaller than a registser's byte size */
251 if (ty->GetPrimType() == PTY_agg) {
252 /* struct param aligned on 8 byte boundary unless it is small enough */
253 if (CGOptions::IsArm64ilp32()) {
254 align = k8ByteSize;
255 } else {
256 align = GetPointerSize();
257 }
258 }
259 uint32 tSize = 0;
260 if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) ||
261 AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) {
262 align = k16ByteSize;
263 }
264 segArgsRegPassed.SetSize(static_cast<uint32>(RoundUp(segArgsRegPassed.GetSize(), align)));
265 symLoc->SetOffset(segArgsRegPassed.GetSize());
266 segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size);
267 } else if (isLmbc) {
268 segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize);
269 }
270 } else { /* stack */
271 uint32 size;
272 uint32 align;
273 SetSizeAlignForTypeIdx(ptyIdx, size, align);
274 symLoc->SetMemSegment(GetSegArgsStkPassed());
275 segArgsStkPassed.SetSize(static_cast<uint32>(RoundUp(segArgsStkPassed.GetSize(), align)));
276 symLoc->SetOffset(segArgsStkPassed.GetSize());
277 segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size);
278 /* We need it as dictated by the AArch64 ABI $5.4.2 C12 */
279 if (CGOptions::IsArm64ilp32()) {
280 segArgsStkPassed.SetSize(static_cast<uint32>(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize)));
281 } else {
282 segArgsStkPassed.SetSize(static_cast<uint32>(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize())));
283 }
284 if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) {
285 SetLocalRegLocInfo(sym->GetStIdx(), *symLoc);
286 AArch64SymbolAlloc *symLoc1 = memAllocator->GetMemPool()->New<AArch64SymbolAlloc>();
287 symLoc1->SetMemSegment(segRefLocals);
288 SetSegmentSize(*symLoc1, segRefLocals, ptyIdx);
289 SetSymAllocInfo(stIndex, *symLoc1);
290 }
291 }
292 if (cgFunc->GetCG()->GetCGOptions().WithDwarf() && ploc.reg0 == kRinvalid) {
293 cgFunc->AddDIESymbolLocation(sym, symLoc);
294 }
295 }
296 }
297
LayoutLocalVariables(std::vector<MIRSymbol * > & tempVar,std::vector<MIRSymbol * > & returnDelays)298 void AArch64MemLayout::LayoutLocalVariables(std::vector<MIRSymbol *> &tempVar, std::vector<MIRSymbol *> &returnDelays)
299 {
300 if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) {
301 segLocals.SetSize(mirFunction->GetFrameSize() - mirFunction->GetOutParmSize());
302 return;
303 }
304
305 uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize();
306 for (uint32 i = 0; i < symTabSize; ++i) {
307 MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i);
308 if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) {
309 continue;
310 }
311 uint32 stIndex = sym->GetStIndex();
312 TyIdx tyIdx = sym->GetTyIdx();
313 AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<AArch64SymbolAlloc>();
314 SetSymAllocInfo(stIndex, *symLoc);
315 CHECK_FATAL(!symLoc->IsRegister(), "expect not register");
316
317 if (sym->IsRefType()) {
318 if (mirFunction->GetRetRefSym().find(sym) != mirFunction->GetRetRefSym().end()) {
319 /* try to put ret_ref at the end of segRefLocals */
320 returnDelays.emplace_back(sym);
321 continue;
322 }
323 symLoc->SetMemSegment(segRefLocals);
324 segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx)));
325 symLoc->SetOffset(segRefLocals.GetSize());
326 segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx));
327 } else {
328 if (sym->GetName() == "__EARetTemp__" || sym->GetName().substr(0, kEARetTempNameSize) == "__EATemp__") {
329 tempVar.emplace_back(sym);
330 continue;
331 }
332 symLoc->SetMemSegment(segLocals);
333 MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx);
334 uint32 align = be.GetTypeAlign(tyIdx);
335 uint32 tSize = 0;
336 if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) ||
337 AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) {
338 align = k16ByteSize;
339 }
340 if (ty->GetPrimType() == PTY_agg && align < k8BitSize) {
341 segLocals.SetSize(static_cast<uint32>(RoundUp(segLocals.GetSize(), k8BitSize)));
342 } else {
343 segLocals.SetSize(static_cast<uint32>(RoundUp(segLocals.GetSize(), align)));
344 }
345 symLoc->SetOffset(segLocals.GetSize());
346 segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx));
347 }
348 if (cgFunc->GetCG()->GetCGOptions().WithDwarf()) {
349 cgFunc->AddDIESymbolLocation(sym, symLoc);
350 }
351 }
352 }
353
LayoutEAVariales(std::vector<MIRSymbol * > & tempVar)354 void AArch64MemLayout::LayoutEAVariales(std::vector<MIRSymbol *> &tempVar)
355 {
356 for (auto sym : tempVar) {
357 uint32 stIndex = sym->GetStIndex();
358 TyIdx tyIdx = sym->GetTyIdx();
359 AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<AArch64SymbolAlloc>();
360 SetSymAllocInfo(stIndex, *symLoc);
361 DEBUG_ASSERT(!symLoc->IsRegister(), "expect not register");
362 symLoc->SetMemSegment(segRefLocals);
363 segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx)));
364 symLoc->SetOffset(segRefLocals.GetSize());
365 segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx));
366 }
367 }
368
LayoutReturnRef(std::vector<MIRSymbol * > & returnDelays,int32 & structCopySize,int32 & maxParmStackSize)369 void AArch64MemLayout::LayoutReturnRef(std::vector<MIRSymbol *> &returnDelays, int32 &structCopySize,
370 int32 &maxParmStackSize)
371 {
372 for (auto sym : returnDelays) {
373 uint32 stIndex = sym->GetStIndex();
374 TyIdx tyIdx = sym->GetTyIdx();
375 AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<AArch64SymbolAlloc>();
376 SetSymAllocInfo(stIndex, *symLoc);
377 DEBUG_ASSERT(!symLoc->IsRegister(), "expect not register");
378
379 DEBUG_ASSERT(sym->IsRefType(), "expect reftype ");
380 symLoc->SetMemSegment(segRefLocals);
381 segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx)));
382 symLoc->SetOffset(segRefLocals.GetSize());
383 segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx));
384 }
385 if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) {
386 segArgsToStkPass.SetSize(mirFunction->GetOutParmSize() + kDivide2 * k8ByteSize);
387 } else {
388 segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize));
389 }
390 maxParmStackSize = static_cast<int32>(segArgsToStkPass.GetSize());
391 if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) {
392 AssignSpillLocationsToPseudoRegisters();
393 } else {
394 AArch64CGFunc *aarchCGFunc = static_cast<AArch64CGFunc *>(cgFunc);
395 /* 8-VirtualRegNode occupy byte number */
396 aarchCGFunc->SetCatchRegno(cgFunc->NewVReg(kRegTyInt, 8));
397 }
398 segRefLocals.SetSize(static_cast<uint32>(RoundUp(segRefLocals.GetSize(), GetPointerSize())));
399 if (CGOptions::IsArm64ilp32()) {
400 segLocals.SetSize(static_cast<uint32>(RoundUp(segLocals.GetSize(), k8ByteSize)));
401 } else {
402 segLocals.SetSize(static_cast<uint32>(RoundUp(segLocals.GetSize(), GetPointerSize())));
403 }
404 }
405
LayoutActualParams()406 void AArch64MemLayout::LayoutActualParams()
407 {
408 for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) {
409 if (i == 0) {
410 if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) {
411 continue;
412 }
413 }
414 MIRSymbol *sym = mirFunction->GetFormal(i);
415 if (sym->IsPreg()) {
416 continue;
417 }
418 uint32 stIndex = sym->GetStIndex();
419 AArch64SymbolAlloc *symLoc = static_cast<AArch64SymbolAlloc *>(GetSymAllocInfo(stIndex));
420 if (symLoc->GetMemSegment() == &GetSegArgsRegPassed()) { /* register */
421 /*
422 * In O0, we store parameters passed via registers into memory.
423 * So, each of such parameter needs to get assigned storage in stack.
424 * If a function parameter is never accessed in the function body,
425 * and if we don't create its memory operand here, its offset gets
426 * computed when the instruction to store its value into stack
427 * is generated in the prologue when its memory operand is created.
428 * But, the parameter would see a different StackFrameSize than
429 * the parameters that are accessed in the body, because
430 * the size of the storage for FP/LR is added to the stack frame
431 * size in between.
432 * To make offset assignment easier, we create a memory operand
433 * for each of function parameters in advance.
434 * This has to be done after all of formal parameters and local
435 * variables get assigned their respecitve storage, i.e.
436 * CallFrameSize (discounting callee-saved and FP/LR) is known.
437 */
438 MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx);
439 uint32 ptyIdx = ty->GetTypeIndex();
440 static_cast<AArch64CGFunc *>(cgFunc)->GetOrCreateMemOpnd(*sym, 0, be.GetTypeAlign(ptyIdx) * kBitsPerByte);
441 }
442 }
443 }
444
LayoutStackFrame(int32 & structCopySize,int32 & maxParmStackSize)445 void AArch64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize)
446 {
447 LayoutVarargParams();
448 LayoutFormalParams();
449 /*
450 * We do need this as LDR/STR with immediate
451 * requires imm be aligned at a 8/4-byte boundary,
452 * and local varirables may need 8-byte alignment.
453 */
454 if (CGOptions::IsArm64ilp32()) {
455 segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), k8ByteSize));
456 /* we do need this as SP has to be aligned at a 16-bytes bounardy */
457 segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize + k8ByteSize));
458 } else {
459 segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize()));
460 segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize()));
461 }
462 /* allocate the local variables in the stack */
463 std::vector<MIRSymbol *> EATempVar;
464 std::vector<MIRSymbol *> retDelays;
465 LayoutLocalVariables(EATempVar, retDelays);
466 LayoutEAVariales(EATempVar);
467
468 /* handle ret_ref sym now */
469 LayoutReturnRef(retDelays, structCopySize, maxParmStackSize);
470
471 /*
472 * for the actual arguments that cannot be pass through registers
473 * need to allocate space for caller-save registers
474 */
475 LayoutActualParams();
476
477 fixStackSize = static_cast<int32>(RealStackFrameSize());
478 cgFunc->SetUseFP(cgFunc->UseFP() || fixStackSize > kMaxPimm32);
479 }
480
AssignSpillLocationsToPseudoRegisters()481 void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters()
482 {
483 MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab();
484
485 /* BUG: n_regs include index 0 which is not a valid preg index. */
486 size_t nRegs = pregTab->Size();
487 spillLocTable.resize(nRegs);
488 for (size_t i = 1; i < nRegs; ++i) {
489 PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType();
490 AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New<AArch64SymbolAlloc>();
491 symLoc->SetMemSegment(segLocals);
492 segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType)));
493 symLoc->SetOffset(segLocals.GetSize());
494 MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType];
495 segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex()));
496 spillLocTable[i] = symLoc;
497 }
498
499 if (!cgFunc->GetMirModule().IsJavaModule()) {
500 return;
501 }
502
503 /*
504 * Allocate additional stack space for "thrownval".
505 * segLocals need 8 bit align
506 */
507 if (CGOptions::IsArm64ilp32()) {
508 segLocals.SetSize(RoundUp(segLocals.GetSize(), k8ByteSize));
509 } else {
510 segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPointerSize()));
511 }
512 AArch64CGFunc *aarchCGFunc = static_cast<AArch64CGFunc *>(cgFunc);
513 RegOperand &baseOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand();
514 int32 offset = static_cast<int32>(segLocals.GetSize());
515
516 OfstOperand *offsetOpnd = &aarchCGFunc->CreateOfstOpnd(offset + k16BitSize, k64BitSize);
517 MemOperand *throwMem = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, baseOpnd,
518 static_cast<RegOperand *>(nullptr), offsetOpnd, nullptr);
519 aarchCGFunc->SetCatchOpnd(*throwMem);
520 if (CGOptions::IsArm64ilp32()) {
521 segLocals.SetSize(segLocals.GetSize() + k8ByteSize);
522 } else {
523 segLocals.SetSize(segLocals.GetSize() + GetPointerSize());
524 }
525 }
526
StackFrameSize() const527 uint64 AArch64MemLayout::StackFrameSize() const
528 {
529 uint64 total = segArgsRegPassed.GetSize() + static_cast<AArch64CGFunc *>(cgFunc)->SizeOfCalleeSaved() +
530 GetSizeOfRefLocals() + locals().GetSize() + GetSizeOfSpillReg() +
531 cgFunc->GetFunction().GetFrameReseverdSlot();
532
533 if (cgFunc->GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) {
534 if (GetSizeOfGRSaveArea() > 0) {
535 total += RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment);
536 }
537 if (GetSizeOfVRSaveArea() > 0) {
538 total += RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment);
539 }
540 }
541
542 /*
543 * if the function does not have VLA nor alloca,
544 * we allocate space for arguments to stack-pass
545 * in the call frame; otherwise, it has to be allocated for each call and reclaimed afterward.
546 */
547 total += segArgsToStkPass.GetSize();
548 return RoundUp(total, kAarch64StackPtrAlignment);
549 }
550
RealStackFrameSize() const551 uint32 AArch64MemLayout::RealStackFrameSize() const
552 {
553 auto size = StackFrameSize();
554 if (cgFunc->GetCG()->IsStackProtectorStrong() || cgFunc->GetCG()->IsStackProtectorAll()) {
555 size += static_cast<uint32>(kAarch64StackPtrAlignment);
556 }
557 return static_cast<uint32>(size);
558 }
559
GetRefLocBaseLoc() const560 int32 AArch64MemLayout::GetRefLocBaseLoc() const
561 {
562 AArch64CGFunc *aarchCGFunc = static_cast<AArch64CGFunc *>(cgFunc);
563 auto beforeSize = GetSizeOfLocals();
564 if (aarchCGFunc->UsedStpSubPairForCallFrameAllocation()) {
565 return static_cast<int32>(beforeSize);
566 }
567 return static_cast<int32>(beforeSize + kSizeOfFplr);
568 }
569
GetGRSaveAreaBaseLoc()570 int32 AArch64MemLayout::GetGRSaveAreaBaseLoc()
571 {
572 int32 total = static_cast<int32>(RealStackFrameSize() - RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment));
573 total -= static_cast<int32>(SizeOfArgsToStackPass()) + cgFunc->GetFunction().GetFrameReseverdSlot();
574 return total;
575 }
576
GetVRSaveAreaBaseLoc()577 int32 AArch64MemLayout::GetVRSaveAreaBaseLoc()
578 {
579 int32 total =
580 static_cast<int32>((RealStackFrameSize() - RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment)) -
581 RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment));
582 total -= static_cast<int32>(SizeOfArgsToStackPass()) + cgFunc->GetFunction().GetFrameReseverdSlot();
583 return total;
584 }
585 } /* namespace maplebe */
586