• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "aarch64_cgfunc.h"
17 #include "becommon.h"
18 #include "aarch64_call_conv.h"
19 
20 namespace maplebe {
21 using namespace maple;
22 
23 /* external interface to look for pure float struct */
FloatParamRegRequired(const MIRStructType & structType,uint32 & fpSize)24 uint32 AArch64CallConvImpl::FloatParamRegRequired(const MIRStructType &structType, uint32 &fpSize)
25 {
26     PrimType baseType = PTY_begin;
27     size_t elemNum = 0;
28     if (!IsHomogeneousAggregates(structType, baseType, elemNum)) {
29         return 0;
30     }
31     fpSize = GetPrimTypeSize(baseType);
32     return static_cast<uint32>(elemNum);
33 }
34 
AllocateHomogeneousAggregatesRegister(CCLocInfo & pLoc,std::vector<AArch64reg> & regList,uint32 maxRegNum,PrimType baseType,uint32 allocNum,uint32 begin=0)35 static void AllocateHomogeneousAggregatesRegister(CCLocInfo &pLoc, std::vector<AArch64reg> &regList, uint32 maxRegNum,
36                                                   PrimType baseType, uint32 allocNum, [[maybe_unused]] uint32 begin = 0)
37 {
38     CHECK_FATAL(allocNum + begin > 1 && static_cast<uint64>(allocNum) + begin < UINT64_MAX, "value overflow");
39     CHECK_FATAL(allocNum + begin - 1 < maxRegNum, "NIY, out of range.");
40     if (allocNum >= kOneRegister) {
41         pLoc.reg0 = regList[begin++];
42         pLoc.primTypeOfReg0 = baseType;
43     }
44     if (allocNum >= kTwoRegister) {
45         pLoc.reg1 = regList[begin++];
46         pLoc.primTypeOfReg1 = baseType;
47     }
48     if (allocNum >= kThreeRegister) {
49         pLoc.reg2 = regList[begin++];
50         pLoc.primTypeOfReg2 = baseType;
51     }
52     if (allocNum >= kFourRegister) {
53         pLoc.reg3 = regList[begin];
54         pLoc.primTypeOfReg3 = baseType;
55     }
56     pLoc.regCount = static_cast<uint8>(allocNum);
57 }
58 
59 // instantiated with the type of the function return value, it describes how
60 // the return value is to be passed back to the caller
61 // Refer to Procedure Call Standard for the Arm 64-bit
62 // Architecture (AArch64) 2022Q3.  $6.9
63 //  "If the type, T, of the result of a function is such that
64 //     void func(T arg)
65 //   would require that arg be passed as a value in a register (or set of registers)
66 //   according to the rules in Parameter passing, then the result is returned in the
67 //   same registers as would be used for such an argument."
LocateRetVal(const MIRType & retType,CCLocInfo & pLoc)68 void AArch64CallConvImpl::LocateRetVal(const MIRType &retType, CCLocInfo &pLoc)
69 {
70     InitCCLocInfo(pLoc);
71     size_t retSize = retType.GetSize();
72     if (retSize == 0) {
73         return;  // size 0 ret val
74     }
75 
76     PrimType primType = retType.GetPrimType();
77     if (IsPrimitiveFloat(primType) || IsPrimitiveVector(primType)) {
78         // float or vector, return in v0
79         pLoc.reg0 = AArch64Abi::floatReturnRegs[0];
80         pLoc.primTypeOfReg0 = primType;
81         pLoc.regCount = 1;
82         return;
83     }
84     if (IsPrimitiveInteger(primType) && GetPrimTypeBitSize(primType) <= k64BitSize) {
85         // interger and size <= 64-bit, return in x0
86         pLoc.reg0 = AArch64Abi::intReturnRegs[0];
87         pLoc.primTypeOfReg0 = primType;
88         pLoc.regCount = 1;
89         return;
90     }
91     PrimType baseType = PTY_begin;
92     size_t elemNum = 0;
93     if (IsHomogeneousAggregates(retType, baseType, elemNum)) {
94         // homogeneous aggregates, return in v0-v3
95         AllocateHomogeneousAggregatesRegister(pLoc, AArch64Abi::floatReturnRegs, AArch64Abi::kNumFloatParmRegs,
96                                               baseType, static_cast<uint32>(elemNum));
97         return;
98     }
99     if (retSize <= k16ByteSize) {
100         // agg size <= 16-byte or int128, return in x0-x1
101         pLoc.reg0 = AArch64Abi::intReturnRegs[0];
102         pLoc.primTypeOfReg0 = (retSize <= k4ByteSize && !CGOptions::IsBigEndian()) ? PTY_u32 : PTY_u64;
103         if (retSize > k8ByteSize) {
104             pLoc.reg1 = AArch64Abi::intReturnRegs[1];
105             pLoc.primTypeOfReg1 = (retSize <= k12ByteSize && !CGOptions::IsBigEndian()) ? PTY_u32 : PTY_u64;
106         }
107         pLoc.regCount = retSize <= k8ByteSize ? kOneRegister : kTwoRegister;
108         return;
109     }
110 }
111 
AllocateRegisterForAgg(const MIRType & mirType,CCLocInfo & pLoc,uint64 size,uint64 & align)112 uint64 AArch64CallConvImpl::AllocateRegisterForAgg(const MIRType &mirType, CCLocInfo &pLoc, uint64 size, uint64 &align)
113 {
114     uint64 aggCopySize = 0;
115     PrimType baseType = PTY_begin;
116     size_t elemNum = 0;
117     if (IsHomogeneousAggregates(mirType, baseType, elemNum)) {
118         align = GetPrimTypeSize(baseType);
119         DEBUG_ASSERT(nextFloatRegNO + elemNum >= 1, "nextFloatRegNO + elemNum - 1 should be unsigned");
120         if ((nextFloatRegNO + elemNum - 1) < AArch64Abi::kNumFloatParmRegs) {
121             // C.2  If the argument is an HFA or an HVA and there are sufficient unallocated SIMD and
122             //      Floating-point registers (NSRN + number of members <= 8), then the argument is
123             //      allocated to SIMD and Floating-point registers (with one register per member of
124             //      the HFA or HVA). The NSRN is incremented by the number of registers used.
125             //      The argument has now been allocated
126             AllocateHomogeneousAggregatesRegister(pLoc, AArch64Abi::floatReturnRegs, AArch64Abi::kNumFloatParmRegs,
127                                                   baseType, static_cast<uint32>(elemNum), nextFloatRegNO);
128             nextFloatRegNO = static_cast<uint32>(nextFloatRegNO + elemNum);
129         } else {
130             // C.3  If the argument is an HFA or an HVA then the NSRN is set to 8 and the size of the
131             //      argument is rounded up to the nearest multiple of 8 bytes.
132             nextFloatRegNO = AArch64Abi::kNumFloatParmRegs;
133             pLoc.reg0 = kRinvalid;
134         }
135     } else if (size <= k16ByteSize) {
136         // small struct, passed by general purpose register
137         // B.6 If the argument is an alignment adjusted type its value is passed as a copy of the actual
138         //     value. The copy will have an alignment defined as follows:
139         //     (1) For a Fundamental Data Type, the alignment is the natural alignment of that type,
140         //         after any promotions.
141         //     (2) For a Composite Type, the alignment of the copy will have 8-byte alignment if its
142         //         natural alignment is <= 8 and 16-byte alignment if its natural alignment is >= 16.
143         //     The alignment of the copy is used for applying marshaling rules.
144         if (mirType.GetUnadjustedAlign() <= k8ByteSize) {
145             align = k8ByteSize;
146         } else {
147             align = k16ByteSize;
148         }
149         AllocateGPRegister(mirType, pLoc, size, align);
150     } else {
151         // large struct, a pointer to the copy is used
152         pLoc.reg0 = AllocateGPRegister();
153         pLoc.primTypeOfReg0 = PTY_a64;
154         pLoc.memSize = k8ByteSizeInt;
155         aggCopySize = RoundUp(size, k8ByteSize);
156     }
157     return aggCopySize;
158 }
159 
160 // allocate general purpose register
AllocateGPRegister(const MIRType & mirType,CCLocInfo & pLoc,uint64 size,uint64 align)161 void AArch64CallConvImpl::AllocateGPRegister(const MIRType &mirType, CCLocInfo &pLoc, uint64 size, uint64 align)
162 {
163     if (IsPrimitiveInteger(mirType.GetPrimType()) && size <= k8ByteSize) {
164         // C.9  If the argument is an Integral or Pointer Type, the size of the argument is less
165         //      than or equal to 8 bytes and the NGRN is less than 8, the argument is copied to
166         //      the least significant bits in x[NGRN]. The NGRN is incremented by one.
167         //      The argument has now been allocated.
168         pLoc.reg0 = AllocateGPRegister();
169         pLoc.primTypeOfReg0 = mirType.GetPrimType();
170         return;
171     }
172     if (align == k16ByteSize) {
173         // C.10  If the argument has an alignment of 16 then the NGRN is rounded up to the next
174         //       even number.
175         nextGeneralRegNO = (nextGeneralRegNO + 1U) & ~1U;
176     }
177     if (mirType.GetPrimType() == PTY_i128 || mirType.GetPrimType() == PTY_u128) {
178         // C.11  If the argument is an Integral Type, the size of the argument is equal to 16
179         //       and the NGRN is less than 7, the argument is copied to x[NGRN] and x[NGRN+1].
180         //       x[NGRN] shall contain the lower addressed double-word of the memory
181         //       representation of the argument. The NGRN is incremented by two.
182         //       The argument has now been allocated.
183         if (nextGeneralRegNO < AArch64Abi::kNumIntParmRegs - 1) {
184             DEBUG_ASSERT(size == k16ByteSize, "NIY, size must be 16-byte.");
185             pLoc.reg0 = AllocateGPRegister();
186             pLoc.primTypeOfReg0 = PTY_u64;
187             pLoc.reg1 = AllocateGPRegister();
188             pLoc.primTypeOfReg1 = PTY_u64;
189             return;
190         }
191     } else if (size <= k16ByteSize) {
192         // C.12  If the argument is a Composite Type and the size in double-words of the argument
193         //       is not more than 8 minus NGRN, then the argument is copied into consecutive
194         //       general-purpose registers, starting at x[NGRN]. The argument is passed as though
195         //       it had been loaded into the registers from a double-word-aligned address with
196         //       an appropriate sequence of LDR instructions loading consecutive registers from
197         //       memory (the contents of any unused parts of the registers are unspecified by this
198         //       standard). The NGRN is incremented by the number of registers used.
199         //       The argument has now been allocated.
200         DEBUG_ASSERT(mirType.GetPrimType() == PTY_agg, "NIY, primType must be PTY_agg.");
201         auto regNum = (size <= k8ByteSize) ? kOneRegister : kTwoRegister;
202         DEBUG_ASSERT(nextGeneralRegNO + regNum >= 1, "nextGeneralRegNO + regNum - 1 should be unsigned");
203         if (nextGeneralRegNO + regNum - 1 < AArch64Abi::kNumIntParmRegs) {
204             pLoc.reg0 = AllocateGPRegister();
205             pLoc.primTypeOfReg0 = (size <= k4ByteSize && !CGOptions::IsBigEndian()) ? PTY_u32 : PTY_u64;
206             if (regNum == kTwoRegister) {
207                 pLoc.reg1 = AllocateGPRegister();
208                 pLoc.primTypeOfReg1 = (size <= k12ByteSize && !CGOptions::IsBigEndian()) ? PTY_u32 : PTY_u64;
209             }
210             return;
211         }
212     }
213 
214     // C.13  The NGRN is set to 8.
215     pLoc.reg0 = kRinvalid;
216     nextGeneralRegNO = AArch64Abi::kNumIntParmRegs;
217 }
218 
SetupCCLocInfoRegCount(CCLocInfo & pLoc)219 static void SetupCCLocInfoRegCount(CCLocInfo &pLoc)
220 {
221     if (pLoc.reg0 == kRinvalid) {
222         return;
223     }
224     pLoc.regCount = kOneRegister;
225     if (pLoc.reg1 == kRinvalid) {
226         return;
227     }
228     pLoc.regCount++;
229     if (pLoc.reg2 == kRinvalid) {
230         return;
231     }
232     pLoc.regCount++;
233     if (pLoc.reg3 == kRinvalid) {
234         return;
235     }
236     pLoc.regCount++;
237 }
238 
239 /*
240  * Refer to ARM IHI 0055C_beta: Procedure Call Standard for
241  * the ARM 64-bit Architecture. $5.4.2
242  *
243  * For internal only functions, we may want to implement
244  * our own rules as Apple IOS has done. Maybe we want to
245  * generate two versions for each of externally visible functions,
246  * one conforming to the ARM standard ABI, and the other for
247  * internal only use.
248  *
249  * LocateNextParm should be called with each parameter in the parameter list
250  * starting from the beginning, one call per parameter in sequence; it returns
251  * the information on how each parameter is passed in pLoc
252  *
253  * *** CAUTION OF USE: ***
254  * If LocateNextParm is called for function formals, third argument isFirst is true.
255  * LocateNextParm is then checked against a function parameter list.  All other calls
256  * of LocateNextParm are against caller's argument list must not have isFirst set,
257  * or it will be checking the caller's enclosing function.
258  */
259 
LocateNextParm(const MIRType & mirType,CCLocInfo & pLoc,bool isFirst,MIRFuncType * tFunc)260 uint64 AArch64CallConvImpl::LocateNextParm(const MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFuncType *tFunc)
261 {
262     InitCCLocInfo(pLoc);
263     uint64 typeSize = mirType.GetSize();
264     if (typeSize == 0) {
265         return 0;
266     }
267 
268     if (isFirst) {
269         DEBUG_ASSERT(beCommon.GetMIRModule().CurFunction() != nullptr, "curFunction should not be nullptr");
270         auto *func = (tFunc != nullptr) ? tFunc : beCommon.GetMIRModule().CurFunction()->GetMIRFuncType();
271         if (func->FirstArgReturn()) {
272             // For return struct in memory, the pointer returns in x8.
273             SetupToReturnThroughMemory(pLoc);
274             return GetPointerSize();
275         }
276     }
277 
278     uint64 typeAlign = mirType.GetAlign();
279 
280     pLoc.memSize = static_cast<int32>(typeSize);
281 
282     uint64 aggCopySize = 0;
283     if (IsPrimitiveFloat(mirType.GetPrimType()) || IsPrimitiveVector(mirType.GetPrimType())) {
284         // float or vector, passed by float or SIMD register
285         pLoc.reg0 = AllocateSIMDFPRegister();
286         pLoc.primTypeOfReg0 = mirType.GetPrimType();
287     } else if (IsPrimitiveInteger(mirType.GetPrimType())) {
288         // integer, passed by general purpose register
289         AllocateGPRegister(mirType, pLoc, typeSize, typeAlign);
290     } else {
291         CHECK_FATAL(mirType.GetPrimType() == PTY_agg, "NIY");
292         aggCopySize = AllocateRegisterForAgg(mirType, pLoc, typeSize, typeAlign);
293     }
294 
295     SetupCCLocInfoRegCount(pLoc);
296     if (pLoc.reg0 == kRinvalid) {
297         // being passed in memory
298         typeAlign = (typeAlign <= k8ByteSize) ? k8ByteSize : typeAlign;
299         nextStackArgAdress = static_cast<int32>(RoundUp(nextStackArgAdress, typeAlign));
300         pLoc.memOffset = static_cast<int32>(nextStackArgAdress);
301         // large struct, passed with pointer
302         nextStackArgAdress += static_cast<int32>(aggCopySize != 0 ? k8ByteSize : typeSize);
303     }
304     return aggCopySize;
305 }
306 
SetupSecondRetReg(const MIRType & retTy2,CCLocInfo & pLoc) const307 void AArch64CallConvImpl::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const
308 {
309     DEBUG_ASSERT(pLoc.reg1 == kRinvalid, "make sure reg1 equal kRinvalid");
310     PrimType pType = retTy2.GetPrimType();
311     switch (pType) {
312         case PTY_void:
313             break;
314         case PTY_u1:
315         case PTY_u8:
316         case PTY_i8:
317         case PTY_u16:
318         case PTY_i16:
319         case PTY_a32:
320         case PTY_u32:
321         case PTY_i32:
322         case PTY_ptr:
323         case PTY_ref:
324         case PTY_a64:
325         case PTY_u64:
326         case PTY_i64:
327             pLoc.reg1 = AArch64Abi::intReturnRegs[1];
328             pLoc.primTypeOfReg1 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; /* promote the type */
329             break;
330         default:
331             CHECK_FATAL(false, "NYI");
332     }
333 }
334 
LocateNextParm(const MIRType & mirType,CCLocInfo & pLoc,bool isFirst,MIRFuncType * tFunc)335 uint64 AArch64WebKitJSCC::LocateNextParm(const MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFuncType *tFunc)
336 {
337     std::vector<ArgumentClass> classes {};
338     int32 alignedTySize = ClassificationArg(beCommon, mirType, classes);
339     pLoc.memSize = alignedTySize;
340     if (classes[0] == kIntegerClass) {
341         if (alignedTySize == k4ByteSize || alignedTySize == k8ByteSize) {
342             pLoc.reg0 = AllocateGPParmRegister();
343         } else {
344             CHECK_FATAL(false, "no should not go here");
345         }
346     } else if (classes[0] == kFloatClass) {
347         CHECK_FATAL(false, "float should passed on stack!");
348     }
349     if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) {
350         /* being passed in memory */
351         pLoc.memOffset = nextStackArgAdress;
352         nextStackArgAdress = pLoc.memOffset + alignedTySize;
353     }
354     return 0;
355 }
356 
LocateRetVal(const MIRType & retType,CCLocInfo & pLoc)357 void AArch64WebKitJSCC::LocateRetVal(const MIRType &retType, CCLocInfo &pLoc)
358 {
359     InitCCLocInfo(pLoc);
360     std::vector<ArgumentClass> classes {}; /* Max of four Regs. */
361     int32 alignedTySize = ClassificationRet(beCommon, retType, classes);
362     if (alignedTySize == 0) {
363         return; /* size 0 ret val */
364     }
365     if (classes[0] == kIntegerClass) {
366         if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) {
367             pLoc.reg0 = AllocateGPRetRegister();
368             pLoc.regCount += 1;
369             pLoc.primTypeOfReg0 = alignedTySize == k4ByteSize ? PTY_i32 : PTY_i64;
370         } else {
371             CHECK_FATAL(false, "should not go here");
372         }
373     } else if (classes[0] == kFloatClass) {
374         if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) {
375             pLoc.reg0 = AllocateSIMDFPRetRegister();
376             pLoc.regCount += 1;
377             pLoc.primTypeOfReg0 = alignedTySize == k4ByteSize ? PTY_f32 : PTY_f64;
378         } else {
379             CHECK_FATAL(false, "should not go here");
380         }
381     }
382     if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) {
383         CHECK_FATAL(false, "should not happen");
384     }
385     return;
386 }
387 
ClassificationRet(const BECommon & be,const MIRType & mirType,std::vector<ArgumentClass> & classes) const388 int32 AArch64WebKitJSCC::ClassificationRet(const BECommon &be, const MIRType &mirType,
389                                            std::vector<ArgumentClass> &classes) const
390 {
391     switch (mirType.GetPrimType()) {
392         /*
393          * Arguments of types void, (signed and unsigned) _Bool, char, short, int,
394          * long, long long, and pointers are in the INTEGER class.
395          */
396         case PTY_u32:
397         case PTY_i32:
398             classes.push_back(kIntegerClass);
399             return k4ByteSize;
400         case PTY_a64:
401         case PTY_ptr:
402         case PTY_ref:
403         case PTY_u64:
404         case PTY_i64:
405             classes.push_back(kIntegerClass);
406             return k8ByteSize;
407         case PTY_f32:
408             classes.push_back(kFloatClass);
409             return k4ByteSize;
410         case PTY_f64:
411             classes.push_back(kFloatClass);
412             return k8ByteSize;
413         default:
414             CHECK_FATAL(false, "NYI");
415     }
416 }
417 
ClassificationArg(const BECommon & be,const MIRType & mirType,std::vector<ArgumentClass> & classes) const418 int32 AArch64WebKitJSCC::ClassificationArg(const BECommon &be, const MIRType &mirType,
419                                            std::vector<ArgumentClass> &classes) const
420 {
421     switch (mirType.GetPrimType()) {
422         /*
423          * Arguments of types void, (signed and unsigned) _Bool, char, short, int,
424          * long, long long, and pointers are in the INTEGER class.
425          */
426         case PTY_void:
427         case PTY_u1:
428         case PTY_u8:
429         case PTY_i8:
430         case PTY_u16:
431         case PTY_i16:
432         case PTY_a32:
433         case PTY_u32:
434         case PTY_i32:
435             classes.push_back(kIntegerClass);
436             return k4ByteSize;
437         case PTY_a64:
438         case PTY_ptr:
439         case PTY_ref:
440         case PTY_u64:
441         case PTY_i64:
442             classes.push_back(kIntegerClass);
443             return k8ByteSize;
444         case PTY_f32:
445             classes.push_back(kMemoryClass);
446             return k4ByteSize;
447         case PTY_f64:
448             classes.push_back(kMemoryClass);
449             return k8ByteSize;
450         default:
451             CHECK_FATAL(false, "NYI");
452     }
453     return 0;
454 }
455 
InitReturnInfo(MIRType & retTy,CCLocInfo & pLoc)456 void AArch64WebKitJSCC::InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc)
457 {
458     // don't see why this function exisits?
459     LocateRetVal(retTy, pLoc);
460 }
461 
SetupSecondRetReg(const MIRType & retTy2,CCLocInfo & pLoc) const462 void AArch64WebKitJSCC::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const
463 {
464     // already done in locate retval;
465     return;
466 }
467 
LocateNextParm(const MIRType & mirType,CCLocInfo & pLoc,bool isFirst,MIRFuncType * tFunc)468 uint64 GHCCC::LocateNextParm(const MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFuncType *tFunc)
469 {
470     std::vector<ArgumentClass> classes {};
471     int32 alignedTySize = ClassificationArg(beCommon, mirType, classes);
472     pLoc.memSize = alignedTySize;
473     if (classes[0] == kIntegerClass) {
474         if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) {
475             pLoc.reg0 = AllocateGPParmRegister();
476         } else {
477             CHECK_FATAL(false, "no should not go here");
478         }
479     } else if (classes[0] == kFloatClass) {
480         if (alignedTySize == k4ByteSize) {
481             pLoc.reg0 = AllocateSIMDFPParmRegisterF32();
482         } else if (alignedTySize == k8ByteSize) {
483             pLoc.reg0 = AllocateSIMDFPParmRegisterF64();
484         } else if (alignedTySize == k16ByteSize) {
485             pLoc.reg0 = AllocateSIMDFPParmRegisterF128();
486         } else {
487             CHECK_FATAL(false, "no should not go here");
488         }
489     }
490     if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) {
491         /* being passed in memory */
492         CHECK_FATAL(false, "GHC does not support stack pass");
493     }
494     return 0;
495 }
496 
LocateRetVal(const MIRType & retType,CCLocInfo & pLoc)497 void GHCCC::LocateRetVal(const MIRType &retType, CCLocInfo &pLoc)
498 {
499     CHECK_FATAL(false, "GHC does not return");
500 }
501 
ClassificationArg(const BECommon & be,const MIRType & mirType,std::vector<ArgumentClass> & classes) const502 int32 GHCCC::ClassificationArg(const BECommon &be, const MIRType &mirType, std::vector<ArgumentClass> &classes) const
503 {
504     switch (mirType.GetPrimType()) {
505         /*
506          * Arguments of types void, (signed and unsigned) _Bool, char, short, int,
507          * long, long long, and pointers are in the INTEGER class.
508          */
509         case PTY_void:
510         case PTY_u1:
511         case PTY_u8:
512         case PTY_i8:
513         case PTY_u16:
514         case PTY_i16:
515         case PTY_a32:
516         case PTY_u32:
517         case PTY_i32:
518         case PTY_a64:
519         case PTY_ptr:
520         case PTY_ref:
521         case PTY_u64:
522         case PTY_i64:
523             classes.push_back(kIntegerClass);
524             return k8ByteSize;
525         case PTY_f32:
526             classes.push_back(kFloatClass);
527             return k4ByteSize;
528         case PTY_f64:
529         case PTY_v2i32:
530         case PTY_v4i16:
531         case PTY_v8i8:
532         case PTY_v2f32:
533             classes.push_back(kFloatClass);
534             return k8ByteSize;
535         case PTY_v2i64:
536         case PTY_v4i32:
537         case PTY_v8i16:
538         case PTY_v16i8:
539         case PTY_v4f32:
540         case PTY_f128:
541             classes.push_back(kFloatClass);
542             return k16ByteSize;
543         default:
544             CHECK_FATAL(false, "NYI");
545     }
546     return 0;
547 }
548 
InitReturnInfo(MIRType & retTy,CCLocInfo & pLoc)549 void GHCCC::InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc)
550 {
551     // don't see why this function exisits?
552     LocateRetVal(retTy, pLoc);
553 }
554 
SetupSecondRetReg(const MIRType & retTy2,CCLocInfo & pLoc) const555 void GHCCC::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const
556 {
557     // already done in locate retval;
558     CHECK_FATAL(false, "GHC does not return");
559     return;
560 }
561 /*
562  * From "ARM Procedure Call Standard for ARM 64-bit Architecture"
563  *     ARM IHI 0055C_beta, 6th November 2013
564  * $ 5.1 machine Registers
565  * $ 5.1.1 General-Purpose Registers
566  *  <Table 2>                Note
567  *  SP       Stack Pointer
568  *  R30/LR   Link register   Stores the return address.
569  *                           We push it into stack along with FP on function
570  *                           entry using STP and restore it on function exit
571  *                           using LDP even if the function is a leaf (i.e.,
572  *                           it does not call any other function) because it
573  *                           is free (we have to store FP anyway).  So, if a
574  *                           function is a leaf, we may use it as a temporary
575  *                           register.
576  *  R29/FP   Frame Pointer
577  *  R19-R28  Callee-saved
578  *           registers
579  *  R18      Platform reg    Can we use it as a temporary register?
580  *  R16,R17  IP0,IP1         Maybe used as temporary registers. Should be
581  *                           given lower priorities. (i.e., we push them
582  *                           into the free register stack before the others)
583  *  R9-R15                   Temporary registers, caller-saved
584  *  Note:
585  *  R16 and R17 may be used by a linker as a scratch register between
586  *  a routine and any subroutine it calls. They can also be used within a
587  *  routine to hold intermediate values between subroutine calls.
588  *
589  *  The role of R18 is platform specific. If a platform ABI has need of
590  *  a dedicated general purpose register to carry inter-procedural state
591  *  (for example, the thread context) then it should use this register for
592  *  that purpose. If the platform ABI has no such requirements, then it should
593  *  use R18 as an additional temporary register. The platform ABI specification
594  *  must document the usage for this register.
595  *
596  *  A subroutine invocation must preserve the contents of the registers R19-R29
597  *  and SP. All 64 bits of each value stored in R19-R29 must be preserved, even
598  *  when using the ILP32 data model.
599  *
600  *  $ 5.1.2 SIMD and Floating-Point Registers
601  *
602  *  The first eight registers, V0-V7, are used to pass argument values into
603  *  a subroutine and to return result values from a function. They may also
604  *  be used to hold intermediate values within a routine.
605  *
606  *  V8-V15 must be preserved by a callee across subroutine calls; the
607  *  remaining registers do not need to be preserved( or caller-saved).
608  *  Additionally, only the bottom 64 bits of each value stored in V8-
609  *  V15 need to be preserved.
610  */
611 } /* namespace maplebe */
612