• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "aarch64_cgfunc.h"
17 #include "aarch64_call_conv.h"
18 
19 namespace maplebe {
20 using namespace maple;
21 
22 // instantiated with the type of the function return value, it describes how
23 // the return value is to be passed back to the caller
24 // Refer to Procedure Call Standard for the Arm 64-bit
25 // Architecture (AArch64) 2022Q3.  $6.9
26 //  "If the type, T, of the result of a function is such that
27 //     void func(T arg)
28 //   would require that arg be passed as a value in a register (or set of registers)
29 //   according to the rules in Parameter passing, then the result is returned in the
30 //   same registers as would be used for such an argument."
LocateRetVal(const MIRType & retType,CCLocInfo & pLoc)31 void AArch64CallConvImpl::LocateRetVal(const MIRType &retType, CCLocInfo &pLoc)
32 {
33     InitCCLocInfo(pLoc);
34     size_t retSize = retType.GetSize();
35     if (retSize == 0) {
36         return;  // size 0 ret val
37     }
38 
39     PrimType primType = retType.GetPrimType();
40     if (IsPrimitiveFloat(primType)) {
41         // float or vector, return in v0
42         pLoc.reg0 = AArch64Abi::floatReturnRegs[0];
43         pLoc.primTypeOfReg0 = primType;
44         pLoc.regCount = 1;
45         return;
46     }
47     if (IsPrimitiveInteger(primType) && GetPrimTypeBitSize(primType) <= k64BitSize) {
48         // interger and size <= 64-bit, return in x0
49         pLoc.reg0 = AArch64Abi::intReturnRegs[0];
50         pLoc.primTypeOfReg0 = primType;
51         pLoc.regCount = 1;
52         return;
53     }
54     CHECK_FATAL(false, "NIY");
55 }
56 
57 // allocate general purpose register
AllocateGPRegister(const MIRType & mirType,CCLocInfo & pLoc,uint64 size,uint64 align)58 void AArch64CallConvImpl::AllocateGPRegister(const MIRType &mirType, CCLocInfo &pLoc, uint64 size, uint64 align)
59 {
60     if (IsPrimitiveInteger(mirType.GetPrimType()) && size <= k8ByteSize) {
61         // C.9  If the argument is an Integral or Pointer Type, the size of the argument is less
62         //      than or equal to 8 bytes and the NGRN is less than 8, the argument is copied to
63         //      the least significant bits in x[NGRN]. The NGRN is incremented by one.
64         //      The argument has now been allocated.
65         pLoc.reg0 = AllocateGPRegister();
66         pLoc.primTypeOfReg0 = mirType.GetPrimType();
67         return;
68     }
69     CHECK_FATAL(false, "NIY");
70 }
71 
SetupCCLocInfoRegCount(CCLocInfo & pLoc)72 static void SetupCCLocInfoRegCount(CCLocInfo &pLoc)
73 {
74     if (pLoc.reg0 == kRinvalid) {
75         return;
76     }
77     pLoc.regCount = kOneRegister;
78     if (pLoc.reg1 == kRinvalid) {
79         return;
80     }
81     pLoc.regCount++;
82     if (pLoc.reg2 == kRinvalid) {
83         return;
84     }
85     pLoc.regCount++;
86     if (pLoc.reg3 == kRinvalid) {
87         return;
88     }
89     pLoc.regCount++;
90 }
91 
92 /*
93  * Refer to ARM IHI 0055C_beta: Procedure Call Standard for
94  * the ARM 64-bit Architecture. $5.4.2
95  *
96  * For internal only functions, we may want to implement
97  * our own rules as Apple IOS has done. Maybe we want to
98  * generate two versions for each of externally visible functions,
99  * one conforming to the ARM standard ABI, and the other for
100  * internal only use.
101  *
102  * LocateNextParm should be called with each parameter in the parameter list
103  * starting from the beginning, one call per parameter in sequence; it returns
104  * the information on how each parameter is passed in pLoc
105  *
106  * *** CAUTION OF USE: ***
107  * If LocateNextParm is called for function formals, third argument isFirst is true.
108  * LocateNextParm is then checked against a function parameter list.  All other calls
109  * of LocateNextParm are against caller's argument list must not have isFirst set,
110  * or it will be checking the caller's enclosing function.
111  */
112 
LocateNextParm(const MIRType & mirType,CCLocInfo & pLoc,bool isFirst,MIRFuncType * tFunc)113 uint64 AArch64CallConvImpl::LocateNextParm(const MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFuncType *tFunc)
114 {
115     InitCCLocInfo(pLoc);
116     uint64 typeSize = mirType.GetSize();
117     if (typeSize == 0) {
118         return 0;
119     }
120 
121     if (isFirst) {
122         DEBUG_ASSERT(beCommon.GetMIRModule().CurFunction() != nullptr, "curFunction should not be nullptr");
123         auto *func = (tFunc != nullptr) ? tFunc : beCommon.GetMIRModule().CurFunction()->GetMIRFuncType();
124         if (func->FirstArgReturn()) {
125             // For return struct in memory, the pointer returns in x8.
126             SetupToReturnThroughMemory(pLoc);
127             return GetPointerSize();
128         }
129     }
130 
131     uint64 typeAlign = mirType.GetAlign();
132 
133     pLoc.memSize = static_cast<int32>(typeSize);
134 
135     uint64 aggCopySize = 0;
136     if (IsPrimitiveFloat(mirType.GetPrimType())) {
137         // float or vector, passed by float or SIMD register
138         pLoc.reg0 = AllocateSIMDFPRegister();
139         pLoc.primTypeOfReg0 = mirType.GetPrimType();
140     } else if (IsPrimitiveInteger(mirType.GetPrimType())) {
141         // integer, passed by general purpose register
142         AllocateGPRegister(mirType, pLoc, typeSize, typeAlign);
143     } else {
144         CHECK_FATAL(false, "NIY");
145     }
146 
147     SetupCCLocInfoRegCount(pLoc);
148     if (pLoc.reg0 == kRinvalid) {
149         // being passed in memory
150         typeAlign = (typeAlign <= k8ByteSize) ? k8ByteSize : typeAlign;
151         nextStackArgAdress = static_cast<int32>(RoundUp(nextStackArgAdress, typeAlign));
152         pLoc.memOffset = static_cast<int32>(nextStackArgAdress);
153         // large struct, passed with pointer
154         nextStackArgAdress += static_cast<int32>(aggCopySize != 0 ? k8ByteSize : typeSize);
155     }
156     return aggCopySize;
157 }
158 
LocateNextParm(const MIRType & mirType,CCLocInfo & pLoc,bool isFirst,MIRFuncType * tFunc)159 uint64 AArch64WebKitJSCC::LocateNextParm(const MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFuncType *tFunc)
160 {
161     std::vector<ArgumentClass> classes {};
162     int32 alignedTySize = ClassificationArg(beCommon, mirType, classes);
163     pLoc.memSize = alignedTySize;
164     if (classes[0] == kIntegerClass) {
165         if (alignedTySize == k4ByteSize || alignedTySize == k8ByteSize) {
166             pLoc.reg0 = AllocateGPParmRegister();
167         } else {
168             CHECK_FATAL(false, "no should not go here");
169         }
170     } else if (classes[0] == kFloatClass) {
171         CHECK_FATAL(false, "float should passed on stack!");
172     }
173     if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) {
174         /* being passed in memory */
175         pLoc.memOffset = nextStackArgAdress;
176         nextStackArgAdress = pLoc.memOffset + alignedTySize;
177     }
178     return 0;
179 }
180 
LocateRetVal(const MIRType & retType,CCLocInfo & pLoc)181 void AArch64WebKitJSCC::LocateRetVal(const MIRType &retType, CCLocInfo &pLoc)
182 {
183     InitCCLocInfo(pLoc);
184     std::vector<ArgumentClass> classes {}; /* Max of four Regs. */
185     int32 alignedTySize = ClassificationRet(beCommon, retType, classes);
186     if (alignedTySize == 0) {
187         return; /* size 0 ret val */
188     }
189     if (classes[0] == kIntegerClass) {
190         if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) {
191             pLoc.reg0 = AllocateGPRetRegister();
192             pLoc.regCount += 1;
193             pLoc.primTypeOfReg0 = alignedTySize == k4ByteSize ? PTY_i32 : PTY_i64;
194         } else {
195             CHECK_FATAL(false, "should not go here");
196         }
197     } else if (classes[0] == kFloatClass) {
198         if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) {
199             pLoc.reg0 = AllocateSIMDFPRetRegister();
200             pLoc.regCount += 1;
201             pLoc.primTypeOfReg0 = alignedTySize == k4ByteSize ? PTY_f32 : PTY_f64;
202         } else {
203             CHECK_FATAL(false, "should not go here");
204         }
205     }
206     if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) {
207         CHECK_FATAL(false, "should not happen");
208     }
209     return;
210 }
211 
ClassificationRet(const BECommon & be,const MIRType & mirType,std::vector<ArgumentClass> & classes) const212 int32 AArch64WebKitJSCC::ClassificationRet(const BECommon &be, const MIRType &mirType,
213                                            std::vector<ArgumentClass> &classes) const
214 {
215     switch (mirType.GetPrimType()) {
216         /*
217          * Arguments of types void, (signed and unsigned) _Bool, char, short, int,
218          * long, long long, and pointers are in the INTEGER class.
219          */
220         case PTY_u32:
221         case PTY_i32:
222             classes.push_back(kIntegerClass);
223             return k4ByteSize;
224         case PTY_a64:
225         case PTY_ptr:
226         case PTY_ref:
227         case PTY_u64:
228         case PTY_i64:
229             classes.push_back(kIntegerClass);
230             return k8ByteSize;
231         case PTY_f32:
232             classes.push_back(kFloatClass);
233             return k4ByteSize;
234         case PTY_f64:
235             classes.push_back(kFloatClass);
236             return k8ByteSize;
237         default:
238             CHECK_FATAL(false, "NYI");
239     }
240 }
241 
ClassificationArg(const BECommon & be,const MIRType & mirType,std::vector<ArgumentClass> & classes) const242 int32 AArch64WebKitJSCC::ClassificationArg(const BECommon &be, const MIRType &mirType,
243                                            std::vector<ArgumentClass> &classes) const
244 {
245     switch (mirType.GetPrimType()) {
246         /*
247          * Arguments of types void, (signed and unsigned) _Bool, char, short, int,
248          * long, long long, and pointers are in the INTEGER class.
249          */
250         case PTY_void:
251         case PTY_u1:
252         case PTY_u8:
253         case PTY_i8:
254         case PTY_u16:
255         case PTY_i16:
256         case PTY_u32:
257         case PTY_i32:
258             classes.push_back(kIntegerClass);
259             return k4ByteSize;
260         case PTY_a64:
261         case PTY_ptr:
262         case PTY_ref:
263         case PTY_u64:
264         case PTY_i64:
265             classes.push_back(kIntegerClass);
266             return k8ByteSize;
267         case PTY_f32:
268             classes.push_back(kMemoryClass);
269             return k4ByteSize;
270         case PTY_f64:
271             classes.push_back(kMemoryClass);
272             return k8ByteSize;
273         default:
274             CHECK_FATAL(false, "NYI");
275     }
276     return 0;
277 }
278 
InitReturnInfo(MIRType & retTy,CCLocInfo & pLoc)279 void AArch64WebKitJSCC::InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc)
280 {
281     // don't see why this function exisits?
282     LocateRetVal(retTy, pLoc);
283 }
284 /*
285  * From "ARM Procedure Call Standard for ARM 64-bit Architecture"
286  *     ARM IHI 0055C_beta, 6th November 2013
287  * $ 5.1 machine Registers
288  * $ 5.1.1 General-Purpose Registers
289  *  <Table 2>                Note
290  *  SP       Stack Pointer
291  *  R30/LR   Link register   Stores the return address.
292  *                           We push it into stack along with FP on function
293  *                           entry using STP and restore it on function exit
294  *                           using LDP even if the function is a leaf (i.e.,
295  *                           it does not call any other function) because it
296  *                           is free (we have to store FP anyway).  So, if a
297  *                           function is a leaf, we may use it as a temporary
298  *                           register.
299  *  R29/FP   Frame Pointer
300  *  R19-R28  Callee-saved
301  *           registers
302  *  R18      Platform reg    Can we use it as a temporary register?
303  *  R16,R17  IP0,IP1         Maybe used as temporary registers. Should be
304  *                           given lower priorities. (i.e., we push them
305  *                           into the free register stack before the others)
306  *  R9-R15                   Temporary registers, caller-saved
307  *  Note:
308  *  R16 and R17 may be used by a linker as a scratch register between
309  *  a routine and any subroutine it calls. They can also be used within a
310  *  routine to hold intermediate values between subroutine calls.
311  *
312  *  The role of R18 is platform specific. If a platform ABI has need of
313  *  a dedicated general purpose register to carry inter-procedural state
314  *  (for example, the thread context) then it should use this register for
315  *  that purpose. If the platform ABI has no such requirements, then it should
316  *  use R18 as an additional temporary register. The platform ABI specification
317  *  must document the usage for this register.
318  *
319  *  A subroutine invocation must preserve the contents of the registers R19-R29
320  *  and SP. All 64 bits of each value stored in R19-R29 must be preserved, even
321  *  when using the ILP32 data model.
322  *
323  *  $ 5.1.2 SIMD and Floating-Point Registers
324  *
325  *  The first eight registers, V0-V7, are used to pass argument values into
326  *  a subroutine and to return result values from a function. They may also
327  *  be used to hold intermediate values within a routine.
328  *
329  *  V8-V15 must be preserved by a callee across subroutine calls; the
330  *  remaining registers do not need to be preserved( or caller-saved).
331  *  Additionally, only the bottom 64 bits of each value stored in V8-
332  *  V15 need to be preserved.
333  */
334 } /* namespace maplebe */
335