1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "x64_cgfunc.h"
17 #include "becommon.h"
18 #include "abi.h"
19 #include "x64_call_conv.h"
20 namespace maplebe {
21 using namespace maple;
22 using namespace x64;
23
ClassifyAggregate(MIRType & mirType,uint64 sizeOfTy,std::vector<ArgumentClass> & classes) const24 int32 CCallConventionInfo::ClassifyAggregate(MIRType &mirType, uint64 sizeOfTy,
25 std::vector<ArgumentClass> &classes) const
26 {
27 /*
28 * 1. If the size of an object is larger than four eightbytes, or it contains unaligned
29 * fields, it has class MEMORY;
30 * 2. for the processors that do not support the __m256 type, if the size of an object
31 * is larger than two eightbytes and the first eightbyte is not SSE or any other eightbyte
32 * is not SSEUP, it still has class MEMORY.
33 * This in turn ensures that for rocessors that do support the __m256 type, if the size of
34 * an object is four eightbytes and the first eightbyte is SSE and all other eightbytes are
35 * SSEUP, it can be passed in a register.
36 *(Currently, assume that m256 is not supported)
37 */
38 if (sizeOfTy > k2EightBytesSize) {
39 classes.push_back(kMemoryClass);
40 } else if (sizeOfTy > k1EightBytesSize) {
41 classes.push_back(kIntegerClass);
42 classes.push_back(kIntegerClass);
43 } else {
44 classes.push_back(kIntegerClass);
45 }
46 return static_cast<int32>(sizeOfTy);
47 }
48
Classification(const BECommon & be,MIRType & mirType,std::vector<ArgumentClass> & classes) const49 int32 CCallConventionInfo::Classification(const BECommon &be, MIRType &mirType,
50 std::vector<ArgumentClass> &classes) const
51 {
52 switch (mirType.GetPrimType()) {
53 /*
54 * Arguments of types void, (signed and unsigned) _Bool, char, short, int,
55 * long, long long, and pointers are in the INTEGER class.
56 */
57 case PTY_void:
58 case PTY_u1:
59 case PTY_u8:
60 case PTY_i8:
61 case PTY_u16:
62 case PTY_i16:
63 case PTY_a32:
64 case PTY_u32:
65 case PTY_i32:
66 case PTY_a64:
67 case PTY_ptr:
68 case PTY_ref:
69 case PTY_u64:
70 case PTY_i64:
71 classes.push_back(kIntegerClass);
72 return k8ByteSize;
73 /*
74 * Arguments of type __int128 offer the same operations as INTEGERs,
75 * yet they do not fit into one general purpose register but require
76 * two registers.
77 */
78 case PTY_i128:
79 case PTY_u128:
80 classes.push_back(kIntegerClass);
81 classes.push_back(kIntegerClass);
82 return k16ByteSize;
83 case PTY_f32:
84 case PTY_f64:
85 classes.push_back(kFloatClass);
86 return k8ByteSize;
87 case PTY_agg: {
88 /*
89 * The size of each argument gets rounded up to eightbytes,
90 * Therefore the stack will always be eightbyte aligned.
91 */
92 uint64 sizeOfTy = RoundUp(be.GetTypeSize(mirType.GetTypeIndex()), k8ByteSize);
93 if (sizeOfTy == 0) {
94 return 0;
95 }
96 /* If the size of an object is larger than four eightbytes, it has class MEMORY */
97 if ((sizeOfTy > k4EightBytesSize)) {
98 classes.push_back(kMemoryClass);
99 return static_cast<int32>(sizeOfTy);
100 }
101 return ClassifyAggregate(mirType, sizeOfTy, classes);
102 }
103 default:
104 CHECK_FATAL(false, "NYI");
105 }
106 return 0;
107 }
108
Classification(const BECommon & be,MIRType & mirType,std::vector<ArgumentClass> & classes) const109 int32 WebKitJSCallConventionInfo::Classification(const BECommon &be, MIRType &mirType,
110 std::vector<ArgumentClass> &classes) const
111 {
112 switch (mirType.GetPrimType()) {
113 /*
114 * Arguments of types void, (signed and unsigned) _Bool, char, short, int,
115 * long, long long, and pointers are in the INTEGER class.
116 */
117 case PTY_void:
118 case PTY_u1:
119 case PTY_u8:
120 case PTY_i8:
121 case PTY_u16:
122 case PTY_i16:
123 case PTY_a32:
124 case PTY_u32:
125 case PTY_i32:
126 classes.push_back(kIntegerClass);
127 return k4ByteSize;
128 case PTY_a64:
129 case PTY_ptr:
130 case PTY_ref:
131 case PTY_u64:
132 case PTY_i64:
133 classes.push_back(kIntegerClass);
134 return k8ByteSize;
135 case PTY_f32:
136 classes.push_back(kFloatClass);
137 return k4ByteSize;
138 case PTY_f64:
139 classes.push_back(kFloatClass);
140 return k8ByteSize;
141 default:
142 CHECK_FATAL(false, "NYI");
143 }
144 return 0;
145 }
146
Classification(const BECommon & be,MIRType & mirType,std::vector<ArgumentClass> & classes) const147 int32 GHCCallConventionInfo::Classification(const BECommon &be, MIRType &mirType,
148 std::vector<ArgumentClass> &classes) const
149 {
150 switch (mirType.GetPrimType()) {
151 case PTY_u1:
152 case PTY_u8:
153 case PTY_i8:
154 case PTY_u16:
155 case PTY_i16:
156 case PTY_a32:
157 case PTY_u32:
158 case PTY_i32:
159 case PTY_a64:
160 case PTY_ptr:
161 case PTY_ref:
162 case PTY_u64:
163 case PTY_i64:
164 classes.push_back(kIntegerClass);
165 return k8ByteSize;
166 default:
167 CHECK_FATAL(false, "NYI");
168 }
169 return 0;
170 }
171
InitCCLocInfo(CCLocInfo & pLoc) const172 void X64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const
173 {
174 pLoc.reg0 = kRinvalid;
175 pLoc.reg1 = kRinvalid;
176 pLoc.reg2 = kRinvalid;
177 pLoc.reg3 = kRinvalid;
178 pLoc.memOffset = nextStackArgAdress;
179 pLoc.fpSize = 0;
180 pLoc.numFpPureRegs = 0;
181 }
182
LocateNextParm(MIRType & mirType,CCLocInfo & pLoc,bool isFirst,MIRFunction * tFunc)183 int32 X64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFunction *tFunc)
184 {
185 InitCCLocInfo(pLoc);
186 std::vector<ArgumentClass> classes {};
187 int32 alignedTySize = GetCallConvInfo().Classification(beCommon, mirType, classes);
188 if (alignedTySize == 0) {
189 return 0;
190 }
191 pLoc.memSize = alignedTySize;
192 ++paramNum;
193 if (classes[0] == kIntegerClass) {
194 if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) {
195 pLoc.reg0 = AllocateGPParmRegister();
196 DEBUG_ASSERT(nextGeneralParmRegNO <= GetCallConvInfo().GetIntParamRegsNum(), "RegNo should be pramRegNO");
197 } else if (alignedTySize == k16ByteSize) {
198 AllocateTwoGPParmRegisters(pLoc);
199 DEBUG_ASSERT(nextGeneralParmRegNO <= GetCallConvInfo().GetIntParamRegsNum(), "RegNo should be pramRegNO");
200 }
201 } else if (classes[0] == kFloatClass) {
202 if (alignedTySize == k8ByteSize) {
203 pLoc.reg0 = AllocateSIMDFPRegister();
204 DEBUG_ASSERT(nextGeneralParmRegNO <= kNumFloatParmRegs, "RegNo should be pramRegNO");
205 } else {
206 CHECK_FATAL(false, "niy");
207 }
208 }
209 if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) {
210 /* being passed in memory */
211 nextStackArgAdress = pLoc.memOffset + alignedTySize;
212 }
213 return 0;
214 }
215
LocateRetVal(MIRType & retType,CCLocInfo & pLoc)216 int32 X64CallConvImpl::LocateRetVal(MIRType &retType, CCLocInfo &pLoc)
217 {
218 InitCCLocInfo(pLoc);
219 std::vector<ArgumentClass> classes {}; /* Max of four Regs. */
220 uint32 alignedTySize = static_cast<uint32>(GetCallConvInfo().Classification(beCommon, retType, classes));
221 if (alignedTySize == 0) {
222 return 0; /* size 0 ret val */
223 }
224 if (classes[0] == kIntegerClass) {
225 /* If the class is INTEGER, the next available register of the sequence %rax, */
226 /* %rdx is used. */
227 CHECK_FATAL(alignedTySize <= k16ByteSize, "LocateRetVal: illegal number of regs");
228 if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) {
229 pLoc.regCount = kOneRegister;
230 pLoc.reg0 = AllocateGPReturnRegister();
231 DEBUG_ASSERT(nextGeneralReturnRegNO <= GetCallConvInfo().GetIntReturnRegsNum(),
232 "RegNo should be pramRegNO");
233 } else if (alignedTySize == k16ByteSize) {
234 pLoc.regCount = kTwoRegister;
235 AllocateTwoGPReturnRegisters(pLoc);
236 DEBUG_ASSERT(nextGeneralReturnRegNO <= GetCallConvInfo().GetIntReturnRegsNum(),
237 "RegNo should be pramRegNO");
238 }
239 if (nextGeneralReturnRegNO == kOneRegister) {
240 pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType();
241 } else if (nextGeneralReturnRegNO == kTwoRegister) {
242 pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType();
243 pLoc.primTypeOfReg1 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType();
244 }
245 return 0;
246 } else if (classes[0] == kFloatClass) {
247 /* If the class is SSE, the next available vector register of the sequence %xmm0, */
248 /* %xmm1 is used. */
249 CHECK_FATAL(alignedTySize <= k16ByteSize, "LocateRetVal: illegal number of regs");
250 if (alignedTySize == k8ByteSize) {
251 pLoc.regCount = 1;
252 pLoc.reg0 = AllocateSIMDFPReturnRegister();
253 DEBUG_ASSERT(nextFloatRetRegNO <= kNumFloatReturnRegs, "RegNo should be pramRegNO");
254 } else if (alignedTySize == k16ByteSize) {
255 CHECK_FATAL(false, "niy");
256 }
257 if (nextFloatRetRegNO == kOneRegister) {
258 pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_f64 : retType.GetPrimType();
259 } else if (nextFloatRetRegNO == kTwoRegister) {
260 CHECK_FATAL(false, "niy");
261 }
262 return 0;
263 }
264 if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) {
265 /*
266 * the caller provides space for the return value and passes
267 * the address of this storage in %rdi as if it were the first
268 * argument to the function. In effect, this address becomes a
269 * “hidden” first argument.
270 * On return %rax will contain the address that has been passed
271 * in by the caller in %rdi.
272 * Currently, this scenario is not fully supported.
273 */
274 pLoc.reg0 = AllocateGPReturnRegister();
275 return 0;
276 }
277 CHECK_FATAL(false, "NYI");
278 return 0;
279 }
280 } /* namespace maplebe */
281