1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "x64_MPISel.h"
17 #include "x64_memlayout.h"
18 #include "x64_cgfunc.h"
19 #include "x64_isa_tbl.h"
20 #include "x64_cg.h"
21 #include "isel.h"
22
23 namespace maplebe {
24 /* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */
GetOrCreateMemOpndFromSymbol(const MIRSymbol & symbol,FieldID fieldId) const25 MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const
26 {
27 PrimType symType;
28 int32 fieldOffset = 0;
29 if (fieldId == 0) {
30 symType = symbol.GetType()->GetPrimType();
31 } else {
32 MIRType *mirType = symbol.GetType();
33 DEBUG_ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure");
34 MIRStructType *structType = static_cast<MIRStructType *>(mirType);
35 symType = structType->GetFieldType(fieldId)->GetPrimType();
36 fieldOffset = static_cast<int32>(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first);
37 }
38 uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType);
39 return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset);
40 }
GetOrCreateMemOpndFromSymbol(const MIRSymbol & symbol,uint32 opndSize,int64 offset) const41 MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const
42 {
43 MIRStorageClass storageClass = symbol.GetStorageClass();
44 MemOperand *result = nullptr;
45 RegOperand *stackBaseReg = nullptr;
46 if ((storageClass == kScAuto) || (storageClass == kScFormal)) {
47 auto *symloc = static_cast<X64SymbolAlloc *>(cgFunc->GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex()));
48 DEBUG_ASSERT(symloc != nullptr, "sym loc should have been defined");
49 stackBaseReg = static_cast<X64CGFunc *>(cgFunc)->GetBaseReg(*symloc);
50 int stOfst = cgFunc->GetBaseOffset(*symloc);
51 /* Create field symbols in aggregate structure */
52 result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize);
53 result->SetBaseRegister(*stackBaseReg);
54 result->SetOffsetOperand(GetCurFunc()->GetOpndBuilder()->CreateImm(k64BitSize, stOfst + offset));
55 CHECK_FATAL(result != nullptr, "NIY");
56 return *result;
57 }
58 if ((storageClass == kScGlobal) || (storageClass == kScExtern) || (storageClass == kScPstatic) ||
59 (storageClass == kScFstatic)) {
60 stackBaseReg = &GetCurFunc()->GetOpndBuilder()->CreatePReg(x64::RIP, k64BitSize, kRegTyInt);
61 result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize);
62 ImmOperand &stOfstOpnd = GetCurFunc()->GetOpndBuilder()->CreateImm(symbol, offset, 0);
63 result->SetBaseRegister(*stackBaseReg);
64 result->SetOffsetOperand(stOfstOpnd);
65 CHECK_FATAL(result != nullptr, "NIY");
66 return *result;
67 }
68 CHECK_FATAL(false, "NIY");
69 return *result;
70 }
71
SelectReturn(NaryStmtNode & retNode,Operand & opnd)72 void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd)
73 {
74 MIRType *retType = cgFunc->GetFunction().GetReturnType();
75 X64CallConvImpl retLocator(cgFunc->GetBecommon());
76 CCLocInfo retMech;
77 retLocator.LocateRetVal(*retType, retMech);
78 if (retMech.GetRegCount() == 0) {
79 return;
80 }
81 std::vector<RegOperand *> retRegs;
82 if (!cgFunc->GetFunction().StructReturnedInRegs() || retNode.Opnd(0)->GetOpCode() == OP_constval) {
83 PrimType oriPrimType = retMech.GetPrimTypeOfReg0();
84 regno_t retReg = retMech.GetReg0();
85 DEBUG_ASSERT(retReg != kRinvalid, "NIY");
86 RegOperand &retOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, GetPrimTypeBitSize(oriPrimType),
87 cgFunc->GetRegTyFromPrimTy(oriPrimType));
88 retRegs.push_back(&retOpnd);
89 SelectCopy(retOpnd, opnd, oriPrimType, retNode.Opnd(0)->GetPrimType());
90 } else {
91 CHECK_FATAL(opnd.IsMemoryAccessOperand(), "NIY");
92 MemOperand &memOpnd = static_cast<MemOperand &>(opnd);
93 ImmOperand *offsetOpnd = memOpnd.GetOffsetOperand();
94 RegOperand *baseOpnd = memOpnd.GetBaseRegister();
95
96 PrimType oriPrimType0 = retMech.GetPrimTypeOfReg0();
97 regno_t retReg0 = retMech.GetReg0();
98 DEBUG_ASSERT(retReg0 != kRinvalid, "NIY");
99 RegOperand &retOpnd0 = cgFunc->GetOpndBuilder()->CreatePReg(retReg0, GetPrimTypeBitSize(oriPrimType0),
100 cgFunc->GetRegTyFromPrimTy(oriPrimType0));
101 MemOperand &rhsMemOpnd0 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType0));
102 rhsMemOpnd0.SetBaseRegister(*baseOpnd);
103 rhsMemOpnd0.SetOffsetOperand(*offsetOpnd);
104 retRegs.push_back(&retOpnd0);
105 SelectCopy(retOpnd0, rhsMemOpnd0, oriPrimType0);
106
107 regno_t retReg1 = retMech.GetReg1();
108 if (retReg1 != kRinvalid) {
109 PrimType oriPrimType1 = retMech.GetPrimTypeOfReg1();
110 RegOperand &retOpnd1 = cgFunc->GetOpndBuilder()->CreatePReg(retReg1, GetPrimTypeBitSize(oriPrimType1),
111 cgFunc->GetRegTyFromPrimTy(oriPrimType1));
112 MemOperand &rhsMemOpnd1 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType1));
113 ImmOperand &newOffsetOpnd = static_cast<ImmOperand &>(*offsetOpnd->Clone(*cgFunc->GetMemoryPool()));
114 newOffsetOpnd.SetValue(newOffsetOpnd.GetValue() + GetPrimTypeSize(oriPrimType0));
115 rhsMemOpnd1.SetBaseRegister(*baseOpnd);
116 rhsMemOpnd1.SetOffsetOperand(newOffsetOpnd);
117 retRegs.push_back(&retOpnd1);
118 SelectCopy(retOpnd1, rhsMemOpnd1, oriPrimType1);
119 }
120 }
121 /* for optimization ,insert pseudo ret ,in case rax,rdx is removed*/
122 SelectPseduoForReturn(retRegs);
123 }
124
SelectPseduoForReturn(std::vector<RegOperand * > & retRegs)125 void X64MPIsel::SelectPseduoForReturn(std::vector<RegOperand *> &retRegs)
126 {
127 for (auto retReg : retRegs) {
128 MOperator mop = x64::MOP_pseudo_ret_int;
129 Insn &pInsn = cgFunc->GetInsnBuilder()->BuildInsn(mop, X64CG::kMd[mop]);
130 cgFunc->GetCurBB()->AppendInsn(pInsn);
131 pInsn.AddOpndChain(*retReg);
132 }
133 }
134
SelectReturn()135 void X64MPIsel::SelectReturn()
136 {
137 /* jump to epilogue */
138 MOperator mOp = x64::MOP_jmpq_l;
139 LabelNode *endLabel = cgFunc->GetEndLabel();
140 auto endLabelName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(endLabel->GetLabelIdx());
141 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(endLabelName.c_str(), endLabel->GetLabelIdx());
142 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
143 jmpInsn.AddOpndChain(targetOpnd);
144 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
145 cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB());
146 }
147
CreateCallStructParamPassByStack(MemOperand & memOpnd,int32 symSize,int32 baseOffset)148 void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset)
149 {
150 int32 copyTime = static_cast<int32>(RoundUp(symSize, GetPointerSize()) / GetPointerSize());
151 for (int32 i = 0; i < copyTime; ++i) {
152 MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
153 addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister());
154 ImmOperand &newImmOpnd =
155 static_cast<ImmOperand &>(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool()));
156 newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize());
157 addrMemOpnd.SetOffsetOperand(newImmOpnd);
158 RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt);
159 Operand &stMemOpnd =
160 cgFunc->GetOpndBuilder()->CreateMem(spOpnd, (baseOffset + i * GetPointerSize()), k64BitSize);
161 SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64);
162 }
163 }
164
CreateCallStructParamPassByReg(MemOperand & memOpnd,regno_t regNo,uint32 parmNum)165 void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum)
166 {
167 CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing");
168 RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt);
169 MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
170 addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister());
171 ImmOperand &newImmOpnd = static_cast<ImmOperand &>(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool()));
172 newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize());
173 addrMemOpnd.SetOffsetOperand(newImmOpnd);
174 paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64});
175 }
176
GetMemOpndInfoFromAggregateNode(BaseNode & argExpr)177 std::tuple<Operand *, size_t, MIRType *> X64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr)
178 {
179 /* get mirType info */
180 auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr);
181 MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType);
182 /* get symbol memOpnd info */
183 MemOperand *symMemOpnd = nullptr;
184 if (argExpr.GetOpCode() == OP_dread) {
185 AddrofNode &dread = static_cast<AddrofNode &>(argExpr);
186 MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx());
187 CHECK_NULL_FATAL(symbol);
188 symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID());
189 } else if (argExpr.GetOpCode() == OP_iread) {
190 IreadNode &iread = static_cast<IreadNode &>(argExpr);
191 symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset);
192 } else {
193 CHECK_FATAL(false, "unsupported opcode");
194 }
195 return {symMemOpnd, symInfo.size, mirType};
196 }
197
SelectParmListForAggregate(BaseNode & argExpr,X64CallConvImpl & parmLocator,bool isArgUnused)198 void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused)
199 {
200 auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr);
201 DEBUG_ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd");
202 MemOperand &memOpnd = static_cast<MemOperand &>(*argOpnd);
203
204 CCLocInfo ploc;
205 parmLocator.LocateNextParm(*mirType, ploc);
206 if (isArgUnused) {
207 return;
208 }
209
210 /* create call struct param pass */
211 if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) {
212 CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset);
213 } else {
214 CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state");
215 CreateCallStructParamPassByReg(memOpnd, ploc.reg0, kFirstReg);
216 if (ploc.reg1 != kRinvalid) {
217 CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondOpnd);
218 }
219 if (ploc.reg2 != kRinvalid) {
220 CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdOpnd);
221 }
222 if (ploc.reg3 != kRinvalid) {
223 CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthOpnd);
224 }
225 }
226 }
227
228 /*
229 * SelectParmList generates an instrunction for each of the parameters
230 * to load the parameter value into the corresponding register.
231 * We return a list of registers to the call instruction because
232 * they may be needed in the register allocation phase.
233 * fp Num is a return value which is the number of vector
234 * registers used;
235 */
SelectParmList(StmtNode & naryNode,ListOperand & srcOpnds,uint32 & fpNum)236 void X64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, uint32 &fpNum)
237 {
238 paramPassByReg.clear();
239 fpNum = 0;
240 /* for IcallNode, the 0th operand is the function pointer */
241 size_t argBegin = 0;
242 if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) {
243 ++argBegin;
244 }
245
246 MIRFunction *callee = nullptr;
247 if (naryNode.GetOpCode() == OP_call) {
248 PUIdx calleePuIdx = static_cast<CallNode &>(naryNode).GetPUIdx();
249 callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx);
250 }
251 X64CallConvImpl parmLocator(cgFunc->GetBecommon(), X64CallConvImpl::GetCallConvKind(naryNode));
252 CCLocInfo ploc;
253 for (size_t i = argBegin; i < naryNode.NumOpnds(); ++i) {
254 BaseNode *argExpr = naryNode.Opnd(i);
255 DEBUG_ASSERT(argExpr != nullptr, "not null check");
256 PrimType primType = argExpr->GetPrimType();
257 DEBUG_ASSERT(primType != PTY_void, "primType should not be void");
258 bool isArgUnused = (callee != nullptr && callee->GetFuncDesc().IsArgUnused(i));
259 if (primType == PTY_agg) {
260 SelectParmListForAggregate(*argExpr, parmLocator, isArgUnused);
261 continue;
262 }
263
264 Operand *argOpnd = HandleExpr(naryNode, *argExpr);
265 DEBUG_ASSERT(argOpnd != nullptr, "not null check");
266 MIRType *mirType = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<uint32>(primType)];
267 parmLocator.LocateNextParm(*mirType, ploc);
268
269 /* skip unused args */
270 if (isArgUnused) {
271 continue;
272 }
273
274 if (ploc.reg0 != x64::kRinvalid) {
275 /* load to the register. */
276 RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, GetPrimTypeBitSize(primType),
277 cgFunc->GetRegTyFromPrimTy(primType));
278 paramPassByReg.push_back({&parmRegOpnd, argOpnd, primType});
279 if (x64::IsFPSIMDRegister(static_cast<X64reg>(ploc.reg0))) {
280 fpNum++;
281 }
282 } else {
283 /* load to stack memory */
284 RegOperand &baseOpnd =
285 cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
286 MemOperand &actMemOpnd =
287 cgFunc->GetOpndBuilder()->CreateMem(baseOpnd, ploc.memOffset, GetPrimTypeBitSize(primType));
288 SelectCopy(actMemOpnd, *argOpnd, primType);
289 }
290 DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NIY");
291 }
292
293 /* param pass by reg */
294 for (auto [regOpnd, argOpnd, primType] : paramPassByReg) {
295 DEBUG_ASSERT(regOpnd != nullptr, "not null check");
296 DEBUG_ASSERT(argOpnd != nullptr, "not null check");
297 SelectCopy(*regOpnd, *argOpnd, primType);
298 srcOpnds.PushOpnd(*regOpnd);
299 }
300 }
301
SelectSpecialRegread(PregIdx pregIdx,PrimType primType)302 RegOperand &X64MPIsel::SelectSpecialRegread(PregIdx pregIdx, PrimType primType)
303 {
304 switch (-pregIdx) {
305 case kSregFp: {
306 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RFP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
307 }
308 case kSregSp: {
309 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
310 }
311 default: {
312 CHECK_FATAL(false, "ERROR: Not supported special register!");
313 }
314 }
315 }
316
IsParamStructCopy(const MIRSymbol & symbol)317 bool X64MPIsel::IsParamStructCopy(const MIRSymbol &symbol)
318 {
319 if (symbol.GetStorageClass() == kScFormal &&
320 cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) {
321 return true;
322 }
323 return false;
324 }
325
SelectIntAggCopyReturn(MemOperand & symbolMem,uint64 aggSize)326 void X64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize)
327 {
328 CHECK_FATAL((aggSize > 0) && (aggSize <= k16ByteSize), "out of range.");
329 RegOperand *baseOpnd = symbolMem.GetBaseRegister();
330 int32 stOffset = symbolMem.GetOffsetOperand()->GetValue();
331 bool isCopyOneReg = (aggSize <= k8ByteSize);
332 uint32 extraSize = (aggSize % k8ByteSize) * kBitsPerByte;
333 if (extraSize == 0) {
334 extraSize = k64BitSize;
335 } else if (extraSize <= k8BitSize) {
336 extraSize = k8BitSize;
337 } else if (extraSize <= k16BitSize) {
338 extraSize = k16BitSize;
339 } else if (extraSize <= k32BitSize) {
340 extraSize = k32BitSize;
341 } else {
342 extraSize = k64BitSize;
343 }
344 /* generate move from return registers(rax, rdx) to mem of symbol */
345 PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraSize);
346 /* mov %rax mem */
347 RegOperand ®Rhs0 =
348 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, (isCopyOneReg ? extraSize : k64BitSize), kRegTyInt);
349 MemOperand &memSymbo0 = cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, static_cast<int32>(stOffset),
350 isCopyOneReg ? extraSize : k64BitSize);
351 SelectCopy(memSymbo0, regRhs0, isCopyOneReg ? extraTy : PTY_u64);
352 /* mov %rdx mem */
353 if (!isCopyOneReg) {
354 RegOperand ®Rhs1 = cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, extraSize, kRegTyInt);
355 MemOperand &memSymbo1 =
356 cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, static_cast<int32>(stOffset + k8ByteSize), extraSize);
357 SelectCopy(memSymbo1, regRhs1, extraTy);
358 }
359 return;
360 }
361
SelectAggCopy(MemOperand & lhs,MemOperand & rhs,uint32 copySize)362 void X64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize)
363 {
364 /* in x86-64, 8 bytes data is copied at a time */
365 uint32 copyTimes = copySize / k8ByteSize;
366 uint32 extraCopySize = copySize % k8ByteSize;
367 ImmOperand *stOfstLhs = lhs.GetOffsetOperand();
368 ImmOperand *stOfstRhs = rhs.GetOffsetOperand();
369 RegOperand *baseLhs = lhs.GetBaseRegister();
370 RegOperand *baseRhs = rhs.GetBaseRegister();
371 if (copySize < 40U) {
372 for (uint32 i = 0; i < copyTimes; ++i) {
373 /* prepare dest addr */
374 MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
375 memOpndLhs.SetBaseRegister(*baseLhs);
376 ImmOperand &newStOfstLhs = static_cast<ImmOperand &>(*stOfstLhs->Clone(*cgFunc->GetMemoryPool()));
377 newStOfstLhs.SetValue(newStOfstLhs.GetValue() + i * k8ByteSize);
378 memOpndLhs.SetOffsetOperand(newStOfstLhs);
379 /* prepare src addr */
380 MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
381 memOpndRhs.SetBaseRegister(*baseRhs);
382 ImmOperand &newStOfstRhs = static_cast<ImmOperand &>(*stOfstRhs->Clone(*cgFunc->GetMemoryPool()));
383 newStOfstRhs.SetValue(newStOfstRhs.GetValue() + i * k8ByteSize);
384 memOpndRhs.SetOffsetOperand(newStOfstRhs);
385 /* copy data */
386 SelectCopy(memOpndLhs, memOpndRhs, PTY_a64);
387 }
388 } else {
389 /* adopt memcpy */
390 std::vector<Operand *> opndVec;
391 opndVec.push_back(PrepareMemcpyParm(lhs, MOP_leaq_m_r));
392 opndVec.push_back(PrepareMemcpyParm(rhs, MOP_leaq_m_r));
393 opndVec.push_back(PrepareMemcpyParm(copySize));
394 SelectLibCall("memcpy", opndVec, PTY_a64, nullptr, PTY_void);
395 return;
396 }
397 /* take care of extra content at the end less than the unit */
398 if (extraCopySize == 0) {
399 return;
400 }
401 extraCopySize = ((extraCopySize <= k4ByteSize) ? k4ByteSize : k8ByteSize) * kBitsPerByte;
402 PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraCopySize);
403 MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize);
404 memOpndLhs.SetBaseRegister(*baseLhs);
405 ImmOperand &newStOfstLhs = static_cast<ImmOperand &>(*stOfstLhs->Clone(*cgFunc->GetMemoryPool()));
406 newStOfstLhs.SetValue(newStOfstLhs.GetValue() + copyTimes * k8ByteSize);
407 memOpndLhs.SetOffsetOperand(newStOfstLhs);
408 MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize);
409 memOpndRhs.SetBaseRegister(*baseRhs);
410 ImmOperand &newStOfstRhs = static_cast<ImmOperand &>(*stOfstRhs->Clone(*cgFunc->GetMemoryPool()));
411 newStOfstRhs.SetValue(newStOfstRhs.GetValue() + copyTimes * k8ByteSize);
412 memOpndRhs.SetOffsetOperand(newStOfstRhs);
413 SelectCopy(memOpndLhs, memOpndRhs, extraTy);
414 }
415
SelectLibCall(const std::string & funcName,std::vector<Operand * > & opndVec,PrimType primType,Operand * retOpnd,PrimType retType)416 void X64MPIsel::SelectLibCall(const std::string &funcName, std::vector<Operand *> &opndVec, PrimType primType,
417 Operand *retOpnd, PrimType retType)
418 {
419 /* generate libcall */
420 std::vector<PrimType> pt(opndVec.size(), primType);
421 SelectLibCallNArg(funcName, opndVec, pt, retOpnd, retType);
422 return;
423 }
424
SelectLibCallNArg(const std::string & funcName,std::vector<Operand * > & opndVec,std::vector<PrimType> pt,Operand * retOpnd,PrimType retPrimType)425 void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector<Operand *> &opndVec,
426 std::vector<PrimType> pt, Operand *retOpnd, PrimType retPrimType)
427 {
428 std::string newName = funcName;
429 MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal);
430 st->SetNameStrIdx(newName);
431 st->SetStorageClass(kScExtern);
432 st->SetSKind(kStFunc);
433
434 /* setup the type of the callee function */
435 std::vector<TyIdx> vec;
436 std::vector<TypeAttrs> vecAt;
437 for (size_t i = 0; i < opndVec.size(); ++i) {
438 vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast<size_t>(pt[i])]->GetTypeIndex());
439 vecAt.emplace_back(TypeAttrs());
440 }
441
442 /* only support no return function */
443 MIRType *mirRetType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast<size_t>(retPrimType));
444 CHECK_NULL_FATAL(mirRetType);
445 st->SetTyIdx(
446 cgFunc->GetBecommon().BeGetOrCreateFunctionType(mirRetType->GetTypeIndex(), vec, vecAt)->GetTypeIndex());
447
448 /* setup actual parameters */
449 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
450
451 X64CallConvImpl parmLocator(cgFunc->GetBecommon());
452 CCLocInfo ploc;
453 for (size_t i = 0; i < opndVec.size(); ++i) {
454 DEBUG_ASSERT(pt[i] != PTY_void, "primType check");
455 MIRType *ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<size_t>(pt[i])];
456 Operand *stOpnd = opndVec[i];
457 DEBUG_ASSERT(stOpnd->IsRegister(), "exp result should be reg");
458 RegOperand *expRegOpnd = static_cast<RegOperand *>(stOpnd);
459 parmLocator.LocateNextParm(*ty, ploc);
460 if (ploc.reg0 != 0) { /* load to the register */
461 RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, expRegOpnd->GetSize(),
462 cgFunc->GetRegTyFromPrimTy(pt[i]));
463 SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]);
464 paramOpnds.PushOpnd(parmRegOpnd);
465 }
466 DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI");
467 }
468
469 MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false);
470 Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym);
471 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
472 Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds);
473
474 /* no ret function */
475 if (retOpnd == nullptr) {
476 return;
477 }
478
479 CCLocInfo retMech;
480 parmLocator.LocateRetVal(*(GlobalTables::GetTypeTable().GetTypeTable().at(retPrimType)), retMech);
481 if (retMech.GetRegCount() <= 0 || retMech.GetRegCount() > 1) {
482 CHECK_FATAL(false, "just support one register return");
483 }
484 if (mirRetType != nullptr) {
485 callInsn.SetRetSize(static_cast<uint32>(mirRetType->GetSize()));
486 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(mirRetType->GetPrimType()));
487 }
488 CHECK_FATAL(retOpnd->IsRegister(), "niy");
489 RegOperand *regOpnd = static_cast<RegOperand *>(retOpnd);
490 regno_t retRegNo = retMech.GetReg0();
491 if (regOpnd->GetRegisterNumber() != retRegNo) {
492 RegOperand &phyRetOpnd =
493 cgFunc->GetOpndBuilder()->CreatePReg(retRegNo, regOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(retPrimType));
494 SelectCopy(*retOpnd, phyRetOpnd, retPrimType);
495 }
496 return;
497 }
498
SelectFloatingConst(MIRConst & floatingConst,PrimType primType) const499 Operand *X64MPIsel::SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const
500 {
501 CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const");
502 uint32 labelIdxTmp = cgFunc->GetLabelIdx();
503 Operand *result = nullptr;
504 if (primType == PTY_f64) {
505 result = SelectLiteral(static_cast<MIRDoubleConst &>(floatingConst), cgFunc->GetFunction(), labelIdxTmp++);
506 } else {
507 result = SelectLiteral(static_cast<MIRFloatConst &>(floatingConst), cgFunc->GetFunction(), labelIdxTmp++);
508 }
509 cgFunc->SetLabelIdx(labelIdxTmp);
510 return result;
511 }
512
PrepareMemcpyParm(MemOperand & memOperand,MOperator mOp)513 RegOperand *X64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp)
514 {
515 RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
516 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
517 addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult);
518 cgFunc->GetCurBB()->AppendInsn(addrInsn);
519 return ®Result;
520 }
521
PrepareMemcpyParm(uint64 copySize)522 RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize)
523 {
524 RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
525 ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, copySize);
526 SelectCopy(regResult, sizeOpnd, PTY_i64);
527 return ®Result;
528 }
529
SelectAggDassign(MirTypeInfo & lhsInfo,MemOperand & symbolMem,Operand & opndRhs)530 void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs)
531 {
532 /* rhs is Func Return, it must be from Regread */
533 if (opndRhs.IsRegister()) {
534 SelectIntAggCopyReturn(symbolMem, lhsInfo.size);
535 return;
536 }
537 /* In generally, rhs is from Dread/Iread */
538 CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem");
539 MemOperand &memRhs = static_cast<MemOperand &>(opndRhs);
540 SelectAggCopy(symbolMem, memRhs, lhsInfo.size);
541 }
542
SelectAggIassign(IassignNode & stmt,Operand & AddrOpnd,Operand & opndRhs)543 void X64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs)
544 {
545 /* mirSymbol info */
546 MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt);
547 MIRType *stmtMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx());
548
549 /* In generally, RHS is from Dread/Iread */
550 CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem");
551 MemOperand &memRhs = static_cast<MemOperand &>(opndRhs);
552 ImmOperand *stOfstSrc = memRhs.GetOffsetOperand();
553 RegOperand *baseSrc = memRhs.GetBaseRegister();
554
555 if (stmtMirType->GetPrimType() == PTY_agg) {
556 /* generate move to regs for agg return */
557 RegOperand *result[kFourRegister] = {nullptr}; /* up to 2 int or 4 fp */
558 uint32 numRegs = (symbolInfo.size <= k8ByteSize) ? kOneRegister : kTwoRegister;
559 PrimType retPrimType = (symbolInfo.size <= k4ByteSize) ? PTY_u32 : PTY_u64;
560 for (uint32 i = 0; i < numRegs; i++) {
561 MemOperand &rhsMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(retPrimType));
562 rhsMemOpnd.SetBaseRegister(*baseSrc);
563 ImmOperand &newStOfstSrc = static_cast<ImmOperand &>(*stOfstSrc->Clone(*cgFunc->GetMemoryPool()));
564 newStOfstSrc.SetValue(newStOfstSrc.GetValue() + i * k8ByteSize);
565 rhsMemOpnd.SetOffsetOperand(newStOfstSrc);
566 regno_t regNo = (i == 0) ? x64::RAX : x64::RDX;
567 result[i] = &cgFunc->GetOpndBuilder()->CreatePReg(regNo, GetPrimTypeBitSize(retPrimType),
568 cgFunc->GetRegTyFromPrimTy(retPrimType));
569 SelectCopy(*(result[i]), rhsMemOpnd, retPrimType);
570 }
571 } else {
572 RegOperand *lhsAddrOpnd = &SelectCopy2Reg(AddrOpnd, stmt.Opnd(0)->GetPrimType());
573 MemOperand &symbolMem =
574 cgFunc->GetOpndBuilder()->CreateMem(*lhsAddrOpnd, symbolInfo.offset, GetPrimTypeBitSize(PTY_u64));
575 SelectAggCopy(symbolMem, memRhs, symbolInfo.size);
576 }
577 }
578
AppendCall(x64::X64MOP_t mOp,Operand & targetOpnd,ListOperand & paramOpnds,ListOperand & retOpnds)579 Insn &X64MPIsel::AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds)
580 {
581 Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
582 callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds);
583 cgFunc->GetCurBB()->AppendInsn(callInsn);
584 cgFunc->GetCurBB()->SetHasCall();
585 cgFunc->GetFunction().SetHasCall();
586 return callInsn;
587 }
588
SelectCalleeReturn(MIRType * retType,ListOperand & retOpnds)589 void X64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds)
590 {
591 if (retType == nullptr) {
592 return;
593 }
594 auto retSize = retType->GetSize() * kBitsPerByte;
595 if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) {
596 if (retSize > k0BitSize) {
597 retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt));
598 }
599 if (retSize > k64BitSize) {
600 retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, k64BitSize, kRegTyInt));
601 }
602 }
603 }
604
SelectCall(CallNode & callNode)605 void X64MPIsel::SelectCall(CallNode &callNode)
606 {
607 MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx());
608 MIRSymbol *fsym = GlobalTables::GetGsymTable().GetSymbolFromStidx(fn->GetStIdx().Idx(), false);
609 Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*fsym);
610
611 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
612 uint32 fpNum = 0;
613 SelectParmList(callNode, paramOpnds, fpNum);
614 /* x64abi: rax = with variable arguments passes information about the number of vector registers used */
615 if (fn->IsVarargs()) {
616 ImmOperand &fpNumImm = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, fpNum);
617 RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt);
618 SelectCopy(raxOpnd, fpNumImm, PTY_i64);
619 }
620
621 MIRType *retType = fn->GetReturnType();
622 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
623 SelectCalleeReturn(retType, retOpnds);
624
625 Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds);
626 if (retType != nullptr) {
627 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
628 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
629 }
630 const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo();
631 for (const auto &elem : deoptBundleInfo) {
632 auto valueKind = elem.second.GetMapleValueKind();
633 if (valueKind == MapleValue::kPregKind) {
634 auto *opnd = cgFunc->GetOpndFromPregIdx(elem.second.GetPregIdx());
635 CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand");
636 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
637 } else if (valueKind == MapleValue::kConstKind) {
638 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), PTY_i32);
639 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
640 } else {
641 CHECK_FATAL(false, "not supported currently");
642 }
643 }
644 cgFunc->AppendStackMapInsn(callInsn);
645 }
646
SelectIcall(IcallNode & iCallNode)647 void X64MPIsel::SelectIcall(IcallNode &iCallNode)
648 {
649 Operand *opnd0 = HandleExpr(iCallNode, *iCallNode.GetNopndAt(0));
650 RegOperand &targetOpnd = SelectCopy2Reg(*opnd0, iCallNode.Opnd(0)->GetPrimType());
651 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
652 uint32 fpNum = 0;
653 SelectParmList(iCallNode, paramOpnds, fpNum);
654
655 MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode.GetRetTyIdx());
656 if (iCallNode.GetOpCode() == OP_icallproto) {
657 CHECK_FATAL((retType->GetKind() == kTypeFunction), "NIY, must be func");
658 auto calleeType = static_cast<MIRFuncType *>(retType);
659 retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleeType->GetRetTyIdx());
660 }
661 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
662 SelectCalleeReturn(retType, retOpnds);
663
664 Insn &callInsn = AppendCall(x64::MOP_callq_r, targetOpnd, paramOpnds, retOpnds);
665 if (retType != nullptr) {
666 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
667 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
668 }
669 const auto &deoptBundleInfo = iCallNode.GetDeoptBundleInfo();
670 for (const auto &elem : deoptBundleInfo) {
671 auto valueKind = elem.second.GetMapleValueKind();
672 if (valueKind == MapleValue::kPregKind) {
673 auto *opnd = cgFunc->GetOpndFromPregIdx(elem.second.GetPregIdx());
674 CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand");
675 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
676 } else if (valueKind == MapleValue::kConstKind) {
677 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), PTY_i32);
678 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
679 } else {
680 CHECK_FATAL(false, "not supported currently");
681 }
682 }
683 cgFunc->AppendStackMapInsn(callInsn);
684 }
685
ProcessReturnReg(PrimType primType,int32 sReg)686 Operand &X64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg)
687 {
688 return GetTargetRetOperand(primType, sReg);
689 }
690
SelectGoto(GotoNode & stmt)691 void X64MPIsel::SelectGoto(GotoNode &stmt)
692 {
693 MOperator mOp = x64::MOP_jmpq_l;
694 auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset());
695 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset());
696 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
697 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
698 jmpInsn.AddOpndChain(targetOpnd);
699 cgFunc->GetCurBB()->SetKind(BB::kBBGoto);
700 return;
701 }
702
SelectIgoto(Operand & opnd0)703 void X64MPIsel::SelectIgoto(Operand &opnd0)
704 {
705 CHECK_FATAL(opnd0.IsRegister(), "only register implemented!");
706 MOperator mOp = x64::MOP_jmpq_r;
707 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
708 jmpInsn.AddOpndChain(opnd0);
709 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
710 return;
711 }
712
713 /* This function is to generate an inline function to generate the va_list data structure */
714 /* type $__va_list <struct {
715 @__stack <* void> align(8),
716 @__gr_top <* void> align(8),
717 @__vr_top <* void> align(8),
718 @__gr_offs i32 align(4),
719 @__vr_offs i32 align(4)}>
720 }
721 */
GenCVaStartIntrin(RegOperand & opnd,uint32 stkOffset)722 void X64MPIsel::GenCVaStartIntrin(RegOperand &opnd, uint32 stkOffset)
723 {
724 /* FPLR only pushed in regalloc() after intrin function */
725 RegOperand &fpOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RFP, k64BitSize, kRegTyInt);
726
727 uint32 fpLrLength = k16BitSize;
728 /* __stack */
729 if (stkOffset != 0) {
730 stkOffset += fpLrLength;
731 }
732
733 /* isvary reset StackFrameSize */
734 ImmOperand &vaListOnPassArgStackOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset);
735 RegOperand &vReg = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
736 SelectAdd(vReg, fpOpnd, vaListOnPassArgStackOffset, GetLoweredPtrType());
737
738 // The 8-byte data in the a structure needs to use this mop.
739 MOperator mOp = x64::MOP_movq_r_m;
740
741 /* mem operand in va_list struct (lhs) */
742 MemOperand &vaList = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, 0, k64BitSize);
743 Insn &fillInStkOffsetInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
744 fillInStkOffsetInsn.AddOpndChain(vReg).AddOpndChain(vaList);
745 cgFunc->GetCurBB()->AppendInsn(fillInStkOffsetInsn);
746
747 /* __gr_top ; it's the same as __stack before the 1st va_arg */
748 stkOffset = 0;
749 ImmOperand &grTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset);
750 SelectSub(vReg, fpOpnd, grTopOffset, PTY_a64);
751
752 /* mem operand in va_list struct (lhs) */
753 MemOperand &vaListGRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k8BitSize, k64BitSize);
754 Insn &fillInGRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
755 fillInGRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListGRTop);
756 cgFunc->GetCurBB()->AppendInsn(fillInGRTopInsn);
757
758 /* __vr_top */
759 int32 grAreaSize = static_cast<int32>(static_cast<X64MemLayout *>(cgFunc->GetMemlayout())->GetSizeOfGRSaveArea());
760 stkOffset += static_cast<uint32>(grAreaSize);
761 stkOffset += k8BitSize;
762 ImmOperand &vaListVRTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset);
763 SelectSub(vReg, fpOpnd, vaListVRTopOffset, PTY_a64);
764
765 MemOperand &vaListVRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k16BitSize, k64BitSize);
766 Insn &fillInVRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
767 fillInVRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListVRTop);
768 cgFunc->GetCurBB()->AppendInsn(fillInVRTopInsn);
769
770 // The 4-byte data in the a structure needs to use this mop.
771 mOp = x64::MOP_movl_r_m;
772
773 /* __gr_offs */
774 int32 grOffs = 0 - grAreaSize;
775 ImmOperand &vaListGROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, grOffs);
776 RegOperand &grOffsRegOpnd = SelectCopy2Reg(vaListGROffsOffset, PTY_a32);
777
778 MemOperand &vaListGROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize, k64BitSize);
779 Insn &fillInGROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
780 fillInGROffsInsn.AddOpndChain(grOffsRegOpnd).AddOpndChain(vaListGROffs);
781 cgFunc->GetCurBB()->AppendInsn(fillInGROffsInsn);
782
783 /* __vr_offs */
784 int32 vrOffs = static_cast<int32>(
785 0UL - static_cast<int32>(static_cast<X64MemLayout *>(cgFunc->GetMemlayout())->GetSizeOfVRSaveArea()));
786 ImmOperand &vaListVROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, vrOffs);
787 RegOperand &vrOffsRegOpnd = SelectCopy2Reg(vaListVROffsOffset, PTY_a32);
788
789 MemOperand &vaListVROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize + 4, k64BitSize);
790 Insn &fillInVROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
791 fillInVROffsInsn.AddOpndChain(vrOffsRegOpnd).AddOpndChain(vaListVROffs);
792 cgFunc->GetCurBB()->AppendInsn(fillInVROffsInsn);
793 }
794
SelectOverFlowCall(const IntrinsiccallNode & intrnNode)795 void X64MPIsel::SelectOverFlowCall(const IntrinsiccallNode &intrnNode)
796 {
797 DEBUG_ASSERT(intrnNode.NumOpnds() == kOpndNum2, "must be 2 operands");
798 MIRIntrinsicID intrinsic = intrnNode.GetIntrinsic();
799 // add
800 PrimType type = intrnNode.Opnd(0)->GetPrimType();
801 PrimType type2 = intrnNode.Opnd(1)->GetPrimType();
802 CHECK_FATAL(type == PTY_i32 || type == PTY_u32, "only support i32 or u32 here");
803 CHECK_FATAL(type2 == PTY_i32 || type2 == PTY_u32, "only support i32 or u32 here");
804 RegOperand &opnd0 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(0)),
805 intrnNode.Opnd(0)->GetPrimType()); /* first argument of intrinsic */
806 RegOperand &opnd1 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(1)),
807 intrnNode.Opnd(1)->GetPrimType()); /* first argument of intrinsic */
808 RegOperand &resReg =
809 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), cgFunc->GetRegTyFromPrimTy(type));
810 if (intrinsic == INTRN_ADD_WITH_OVERFLOW) {
811 SelectAdd(resReg, opnd0, opnd1, type);
812 } else if (intrinsic == INTRN_SUB_WITH_OVERFLOW) {
813 SelectSub(resReg, opnd0, opnd1, type);
814 } else if (intrinsic == INTRN_MUL_WITH_OVERFLOW) {
815 SelectMpy(resReg, opnd0, opnd1, type);
816 } else {
817 CHECK_FATAL(false, "niy");
818 }
819
820 // store
821 auto *p2nrets = &intrnNode.GetReturnVec();
822 if (p2nrets->size() == k1ByteSize) {
823 StIdx stIdx = (*p2nrets)[0].first;
824 CHECK_NULL_FATAL(cgFunc->GetBecommon().GetMIRModule().CurFunction());
825 MIRSymbol *sym =
826 cgFunc->GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx());
827 DEBUG_ASSERT(sym != nullptr, "nullptr check");
828 MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*sym, 1);
829 MemOperand &memOperand2 = GetOrCreateMemOpndFromSymbol(*sym, 2);
830 SelectCopy(memOperand, resReg, type);
831 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_seto_m, X64CG::kMd[MOP_seto_m]);
832 insn.AddOpndChain(memOperand2);
833 cgFunc->GetCurBB()->AppendInsn(insn);
834 } else {
835 CHECK_FATAL(false, "should not happen");
836 }
837 return;
838 }
839
840 /* The second parameter in function va_start does not need to be concerned here,
841 * it is mainly used in proepilog */
SelectCVaStart(const IntrinsiccallNode & intrnNode)842 void X64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode)
843 {
844 DEBUG_ASSERT(intrnNode.NumOpnds() == kOpndNum2, "must be 2 operands");
845 /* 2 operands, but only 1 needed. Don't need to emit code for second operand
846 *
847 * va_list is a passed struct with an address, load its address
848 */
849 BaseNode *argExpr = intrnNode.Opnd(0);
850 Operand *opnd = HandleExpr(intrnNode, *argExpr);
851 RegOperand &opnd0 = SelectCopy2Reg(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */
852
853 /* Find beginning of unnamed arg on stack.
854 * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...)
855 * where struct S has size 32, address of r and s are on stack but they are named.
856 */
857 X64CallConvImpl parmLocator(cgFunc->GetBecommon());
858 CCLocInfo pLoc;
859 uint32 stkSize = 0;
860 for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); i++) {
861 MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(cgFunc->GetFunction().GetNthParamTyIdx(i));
862 parmLocator.LocateNextParm(*ty, pLoc);
863 if (pLoc.reg0 == kRinvalid) { /* on stack */
864 stkSize = static_cast<uint32_t>(pLoc.memOffset + pLoc.memSize);
865 }
866 }
867
868 stkSize = static_cast<uint32>(RoundUp(stkSize, GetPointerSize()));
869
870 GenCVaStartIntrin(opnd0, stkSize);
871
872 return;
873 }
874
SelectIntrinsicCall(IntrinsiccallNode & intrinsiccallNode)875 void X64MPIsel::SelectIntrinsicCall(IntrinsiccallNode &intrinsiccallNode)
876 {
877 MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic();
878 if (intrinsic == INTRN_C_va_start) {
879 SelectCVaStart(intrinsiccallNode);
880 return;
881 }
882 if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) {
883 return;
884 }
885 // JS
886 if (intrinsic == INTRN_ADD_WITH_OVERFLOW || intrinsic == INTRN_SUB_WITH_OVERFLOW ||
887 intrinsic == INTRN_MUL_WITH_OVERFLOW) {
888 SelectOverFlowCall(intrinsiccallNode);
889 return;
890 }
891
892 CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the X64 CG.", intrinsic, GetIntrinsicName(intrinsic));
893 }
894
SelectRangeGoto(RangeGotoNode & rangeGotoNode,Operand & srcOpnd)895 void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd)
896 {
897 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64);
898 std::vector<uint64> sizeArray;
899 const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable();
900 sizeArray.emplace_back(switchTable.size());
901 MemPool *memPool = cgFunc->GetMemoryPool();
902 MIRArrayType *arrayType = memPool->New<MIRArrayType>(etype->GetTypeIndex(), sizeArray);
903 MIRAggConst *arrayConst = memPool->New<MIRAggConst>(cgFunc->GetMirModule(), *arrayType);
904 for (const auto &itPair : switchTable) {
905 LabelIdx labelIdx = itPair.second;
906 cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx);
907 MIRConst *mirConst = memPool->New<MIRLblConst>(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype);
908 arrayConst->AddItem(mirConst, 0);
909 }
910 MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal);
911 lblSt->SetStorageClass(kScFstatic);
912 lblSt->SetSKind(kStConst);
913 lblSt->SetTyIdx(arrayType->GetTypeIndex());
914 lblSt->SetKonst(arrayConst);
915 std::string lblStr(".L_");
916 uint32 labelIdxTmp = cgFunc->GetLabelIdx();
917 lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++));
918 cgFunc->SetLabelIdx(labelIdxTmp);
919 lblSt->SetNameStrIdx(lblStr);
920 cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt);
921 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0);
922 /* get index */
923 PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType();
924 RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType);
925 int32 minIdx = switchTable[0].first;
926 ImmOperand &opnd1 =
927 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), -minIdx - rangeGotoNode.GetTagOffset());
928 RegOperand &indexOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt);
929 SelectAdd(indexOpnd, opnd0, opnd1, srcType);
930
931 /* load the displacement into a register by accessing memory at base + index * 8 */
932 /* mov .L_xxx_LOCAL_CONST.x(%baseReg, %indexOpnd, 8), %dstRegOpnd */
933 MemOperand &dstMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(PTY_a64));
934 RegOperand &baseReg = cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(PTY_i64), kRegTyInt);
935 dstMemOpnd.SetBaseRegister(baseReg);
936 dstMemOpnd.SetIndexRegister(indexOpnd);
937 dstMemOpnd.SetOffsetOperand(stOpnd);
938 dstMemOpnd.SetScaleOperand(cgFunc->GetOpndBuilder()->CreateImm(baseReg.GetSize(), k8ByteSize));
939
940 /* jumping to the absolute address which is stored in dstRegOpnd */
941 MOperator mOp = x64::MOP_jmpq_m;
942 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
943 jmpInsn.AddOpndChain(dstMemOpnd);
944 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
945 }
946
SelectAddrof(AddrofNode & expr,const BaseNode & parent)947 Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent)
948 {
949 /* get mirSymbol info*/
950 MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx());
951 /* <prim-type> of AddrofNode must be either ptr, a32 or a64 */
952 PrimType ptype = expr.GetPrimType();
953 RegOperand &resReg =
954 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(ptype), cgFunc->GetRegTyFromPrimTy(ptype));
955 CHECK_NULL_FATAL(symbol);
956 MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID());
957 uint32 pSize = GetPrimTypeSize(ptype);
958 MOperator mOp;
959 if (pSize <= k4ByteSize) {
960 mOp = x64::MOP_leal_m_r;
961 } else if (pSize <= k8ByteSize) {
962 mOp = x64::MOP_leaq_m_r;
963 } else {
964 CHECK_FATAL(false, "NIY");
965 }
966 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
967 addrInsn.AddOpndChain(memOperand).AddOpndChain(resReg);
968 cgFunc->GetCurBB()->AppendInsn(addrInsn);
969 return &resReg;
970 }
971
SelectAddrofFunc(AddroffuncNode & expr,const BaseNode & parent)972 Operand *X64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent)
973 {
974 uint32 instrSize = static_cast<uint32>(expr.SizeOfInstr());
975 /* <prim-type> must be either a32 or a64. */
976 PrimType primType = (instrSize == k8ByteSize) ? PTY_a64 : (instrSize == k4ByteSize) ? PTY_a32 : PTY_begin;
977 CHECK_FATAL(primType != PTY_begin, "prim-type of Func Addr must be either a32 or a64!");
978 MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx());
979 MIRSymbol *symbol = mirFunction->GetFuncSymbol();
980 CHECK_NULL_FATAL(symbol);
981 MIRStorageClass storageClass = symbol->GetStorageClass();
982 RegOperand &resReg =
983 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType));
984 if (storageClass == maple::kScText && symbol->GetSKind() == maple::kStFunc) {
985 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*symbol, 0, 0);
986 X64MOP_t mOp = x64::MOP_movabs_i_r;
987 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
988 addrInsn.AddOpndChain(stOpnd).AddOpndChain(resReg);
989 cgFunc->GetCurBB()->AppendInsn(addrInsn);
990 } else {
991 CHECK_FATAL(false, "NIY");
992 }
993 return &resReg;
994 }
995
SelectAddrofLabel(AddroflabelNode & expr,const BaseNode & parent)996 Operand *X64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent)
997 {
998 PrimType primType = expr.GetPrimType();
999 uint32 bitSize = GetPrimTypeBitSize(primType);
1000 RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1001 RegOperand &baseOpnd =
1002 cgFunc->GetOpndBuilder()->CreatePReg(x64::RIP, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1003
1004 auto labelStr = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(expr.GetOffset());
1005 MIRSymbol *labelSym = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal);
1006 DEBUG_ASSERT(labelSym != nullptr, "null ptr check");
1007 labelSym->SetStorageClass(kScFstatic);
1008 labelSym->SetSKind(kStConst);
1009 labelSym->SetNameStrIdx(labelStr);
1010 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64);
1011 DEBUG_ASSERT(etype != nullptr, "null ptr check");
1012 auto *labelConst =
1013 cgFunc->GetMemoryPool()->New<MIRLblConst>(expr.GetOffset(), cgFunc->GetFunction().GetPuidx(), *etype);
1014 DEBUG_ASSERT(labelConst != nullptr, "null ptr check");
1015 labelSym->SetKonst(labelConst);
1016 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0);
1017
1018 MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(bitSize);
1019 memOpnd.SetBaseRegister(baseOpnd);
1020 memOpnd.SetOffsetOperand(stOpnd);
1021
1022 X64MOP_t mOp = x64::MOP_begin;
1023 if (bitSize <= k32BitSize) {
1024 mOp = x64::MOP_leal_m_r;
1025 } else if (bitSize <= k64BitSize) {
1026 mOp = x64::MOP_leaq_m_r;
1027 } else {
1028 CHECK_FATAL(false, "NIY");
1029 }
1030 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
1031 addrInsn.AddOpndChain(memOpnd).AddOpndChain(resOpnd);
1032 cgFunc->GetCurBB()->AppendInsn(addrInsn);
1033 return &resOpnd;
1034 }
1035
1036 /*
1037 * unorded ZF, PF, CF ==> 1,1,1
1038 * above ZF, PF, CF ==> 0,0,0
1039 * below ZF, PF, CF ==> 0,0,1
1040 * equal ZF, PF, CF ==> 1,0,0
1041 *
1042 * To distinguish between less than(only check whether CF = 1 or not) and unorderd(CF=1),
1043 * So ** judging gt/ge by swaping operands is used to represent lt/le in float**
1044 */
PickJmpInsn(Opcode brOp,Opcode cmpOp,bool isFloat,bool isSigned)1045 static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned)
1046 {
1047 switch (cmpOp) {
1048 case OP_ne:
1049 return (brOp == OP_brtrue) ? MOP_jne_l : MOP_je_l;
1050 case OP_eq:
1051 return (brOp == OP_brtrue) ? MOP_je_l : MOP_jne_l;
1052 case OP_lt:
1053 return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jl_l : MOP_jb_l))
1054 : (isSigned ? MOP_jge_l : MOP_jae_l);
1055 case OP_le:
1056 return (brOp == OP_brtrue) ? (isFloat ? MOP_jae_l : (isSigned ? MOP_jle_l : MOP_jbe_l))
1057 : (isSigned ? MOP_jg_l : MOP_ja_l);
1058 case OP_gt:
1059 return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jg_l : MOP_ja_l))
1060 : (isSigned ? MOP_jle_l : MOP_jbe_l);
1061 case OP_ge:
1062 return (brOp == OP_brtrue) ? (isSigned ? MOP_jge_l : MOP_jae_l) : (isSigned ? MOP_jl_l : MOP_jb_l);
1063 default:
1064 CHECK_FATAL(false, "PickJmpInsn error");
1065 }
1066 }
1067
1068 /*
1069 * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node
1070 * such as a dread for example
1071 */
SelectCondGoto(CondGotoNode & stmt,BaseNode & condNode,Operand & opnd0)1072 void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0)
1073 {
1074 Opcode opcode = stmt.GetOpCode();
1075 X64MOP_t jmpOperator = x64::MOP_begin;
1076 if (opnd0.IsImmediate()) {
1077 DEBUG_ASSERT(opnd0.IsIntImmediate(), "only support int immediate");
1078 DEBUG_ASSERT(opcode == OP_brtrue || opcode == OP_brfalse, "unsupported opcode");
1079 ImmOperand &immOpnd0 = static_cast<ImmOperand &>(opnd0);
1080 if ((opcode == OP_brtrue && !(immOpnd0.GetValue() != 0)) ||
1081 (opcode == OP_brfalse && !(immOpnd0.GetValue() == 0))) {
1082 return;
1083 }
1084 jmpOperator = x64::MOP_jmpq_l;
1085 cgFunc->SetCurBBKind(BB::kBBGoto);
1086 } else {
1087 PrimType primType;
1088 Opcode condOpcode = condNode.GetOpCode();
1089 // op_ne
1090 if (!kOpcodeInfo.IsCompare(condOpcode)) {
1091 primType = condNode.GetPrimType();
1092 ImmOperand &imm0 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), 0);
1093 SelectCmp(opnd0, imm0, primType);
1094 condOpcode = OP_ne;
1095 } else {
1096 primType = static_cast<CompareNode &>(condNode).GetOpndType();
1097 }
1098 bool isFloat = IsPrimitiveFloat(primType);
1099 jmpOperator = PickJmpInsn(opcode, condOpcode, isFloat, IsSignedInteger(primType));
1100 cgFunc->SetCurBBKind(BB::kBBIf);
1101 }
1102 /* gen targetOpnd, .L.xxx__xx */
1103 auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset());
1104 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset());
1105 /* select jump Insn */
1106 Insn &jmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(jmpOperator, X64CG::kMd[jmpOperator]));
1107 jmpInsn.AddOpndChain(targetOpnd);
1108 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
1109 }
1110
SelectStrLiteral(ConststrNode & constStr)1111 Operand *X64MPIsel::SelectStrLiteral(ConststrNode &constStr)
1112 {
1113 std::string labelStr;
1114 labelStr.append(".LUstr_");
1115 labelStr.append(std::to_string(constStr.GetStrIdx()));
1116 MIRSymbol *labelSym =
1117 GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(labelStr));
1118 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64);
1119 auto *c = cgFunc->GetMemoryPool()->New<MIRStrConst>(constStr.GetStrIdx(), *etype);
1120 if (labelSym == nullptr) {
1121 labelSym = cgFunc->GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c->GetType());
1122 labelSym->SetStorageClass(kScFstatic);
1123 labelSym->SetSKind(kStConst);
1124 /* c may be local, we need a global node here */
1125 labelSym->SetKonst(cgFunc->NewMirConst(*c));
1126 }
1127 if (c->GetPrimType() == PTY_ptr) {
1128 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0);
1129 RegOperand &addrOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, cgFunc->GetRegTyFromPrimTy(PTY_a64));
1130 Insn &addrOfInsn = (cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]));
1131 addrOfInsn.AddOpndChain(stOpnd).AddOpndChain(addrOpnd);
1132 cgFunc->GetCurBB()->AppendInsn(addrOfInsn);
1133 return &addrOpnd;
1134 }
1135 CHECK_FATAL(false, "Unsupported const string type");
1136 return nullptr;
1137 }
1138
GetTargetRetOperand(PrimType primType,int32 sReg)1139 Operand &X64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg)
1140 {
1141 uint32 bitSize = GetPrimTypeBitSize(primType);
1142 regno_t retReg = 0;
1143 switch (sReg) {
1144 case kSregRetval0:
1145 retReg = IsPrimitiveFloat(primType) ? x64::V0 : x64::RAX;
1146 break;
1147 case kSregRetval1:
1148 retReg = x64::RDX;
1149 break;
1150 default:
1151 CHECK_FATAL(false, "GetTargetRetOperand: NIY");
1152 break;
1153 }
1154 RegOperand &parmRegOpnd =
1155 cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1156 return parmRegOpnd;
1157 }
1158
SelectMpy(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1159 Operand *X64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1160 {
1161 PrimType dtype = node.GetPrimType();
1162 RegOperand *resOpnd = nullptr;
1163 if (!IsPrimitiveVector(dtype)) {
1164 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1165 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType());
1166 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType());
1167 SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype);
1168 } else {
1169 /* vector operand */
1170 CHECK_FATAL(false, "NIY");
1171 }
1172
1173 return resOpnd;
1174 }
1175
SelectMpy(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1176 void X64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1177 {
1178 uint32 bitSize = GetPrimTypeBitSize(primType);
1179 SelectCopy(resOpnd, opnd0, primType);
1180 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType);
1181 if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) {
1182 X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_imulq_r_r
1183 : (bitSize == k32BitSize) ? x64::MOP_imull_r_r
1184 : (bitSize == k16BitSize) ? x64::MOP_imulw_r_r
1185 : x64::MOP_begin;
1186 CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping");
1187 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1188 insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd);
1189 cgFunc->GetCurBB()->AppendInsn(insn);
1190 } else if (IsPrimitiveFloat(primType)) {
1191 X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_mulfd_r_r
1192 : (bitSize == k32BitSize) ? x64::MOP_mulfs_r_r
1193 : x64::MOP_begin;
1194 CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping");
1195 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1196 insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd);
1197 cgFunc->GetCurBB()->AppendInsn(insn);
1198 }
1199 }
1200
1201 /*
1202 * Dividend(EDX:EAX) / Divisor(reg/mem32) = Quotient(EAX) Remainder(EDX)
1203 * IDIV instruction perform signed division of EDX:EAX by the contents of 32-bit register or memory location and
1204 * store the quotient in EAX and the remainder in EDX.
1205 * The instruction truncates non-integral results towards 0. The sign of the remainder is always the same as the sign
1206 * of the dividend, and the absolute value of the remainder is less than the absolute value of the divisor.
1207 * An overflow generates a #DE (divide error) exception, rather than setting the OF flag.
1208 * To avoid overflow problems, precede this instruction with a CDQ instruction to sign-extend the dividend Divisor.
1209 * CDQ Sign-extend EAX into EDX:EAX. This action helps avoid overflow problems in signed number arithmetic.
1210 */
SelectDiv(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1211 Operand *X64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1212 {
1213 PrimType primType = node.GetPrimType();
1214 Operand *resOpnd = nullptr;
1215 if (!IsPrimitiveVector(primType)) {
1216 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType());
1217 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType());
1218 resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode());
1219 } else {
1220 /* vector operand */
1221 CHECK_FATAL(false, "NIY");
1222 }
1223 return resOpnd;
1224 }
1225
SelectRem(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1226 Operand *X64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1227 {
1228 PrimType primType = node.GetPrimType();
1229 Operand *resOpnd = nullptr;
1230 if (!IsPrimitiveVector(primType)) {
1231 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType());
1232 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType());
1233 resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode());
1234 } else {
1235 /* vector operand */
1236 CHECK_FATAL(false, "NIY");
1237 }
1238 return resOpnd;
1239 }
1240
SelectDivRem(RegOperand & opnd0,RegOperand & opnd1,PrimType primType,Opcode opcode)1241 Operand *X64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode)
1242 {
1243 DEBUG_ASSERT(opcode == OP_div || opcode == OP_rem, "unsupported opcode");
1244 if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) {
1245 uint32 bitSize = GetPrimTypeBitSize(primType);
1246 /* copy dividend to eax */
1247 RegOperand &raxOpnd =
1248 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1249 SelectCopy(raxOpnd, opnd0, primType);
1250
1251 RegOperand &rdxOpnd =
1252 cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1253 bool isSigned = IsSignedInteger(primType);
1254 if (isSigned) {
1255 /* cdq edx:eax = sign-extend of eax*/
1256 X64MOP_t cvtMOp = (bitSize == k64BitSize) ? x64::MOP_cqo
1257 : (bitSize == k32BitSize) ? x64::MOP_cdq
1258 : (bitSize == k16BitSize) ? x64::MOP_cwd
1259 : x64::MOP_begin;
1260 CHECK_FATAL(cvtMOp != x64::MOP_begin, "NIY mapping");
1261 Insn &cvtInsn = cgFunc->GetInsnBuilder()->BuildInsn(cvtMOp, raxOpnd, rdxOpnd);
1262 cgFunc->GetCurBB()->AppendInsn(cvtInsn);
1263 } else {
1264 /* set edx = 0 */
1265 SelectCopy(rdxOpnd, cgFunc->GetOpndBuilder()->CreateImm(bitSize, 0), primType);
1266 }
1267 /* div */
1268 X64MOP_t divMOp = (bitSize == k64BitSize) ? (isSigned ? x64::MOP_idivq_r : x64::MOP_divq_r)
1269 : (bitSize == k32BitSize) ? (isSigned ? x64::MOP_idivl_r : x64::MOP_divl_r)
1270 : (bitSize == k16BitSize) ? (isSigned ? x64::MOP_idivw_r : x64::MOP_divw_r)
1271 : x64::MOP_begin;
1272 CHECK_FATAL(divMOp != x64::MOP_begin, "NIY mapping");
1273 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, raxOpnd, rdxOpnd);
1274 cgFunc->GetCurBB()->AppendInsn(insn);
1275 /* return */
1276 RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1277 SelectCopy(resOpnd, ((opcode == OP_div) ? raxOpnd : rdxOpnd), primType);
1278 return &resOpnd;
1279 } else if (IsPrimitiveFloat(primType)) {
1280 uint32 bitSize = GetPrimTypeBitSize(primType);
1281 auto &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1282 SelectCopy(resOpnd, opnd0, primType);
1283 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_divsd_r, opnd1, resOpnd);
1284 cgFunc->GetCurBB()->AppendInsn(insn);
1285 return &resOpnd;
1286 } else {
1287 CHECK_FATAL(false, "NIY");
1288 }
1289 }
1290
SelectLnot(const UnaryNode & node,Operand & opnd0,const BaseNode & parent)1291 Operand *X64MPIsel::SelectLnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent)
1292 {
1293 PrimType dtype = node.GetPrimType();
1294 RegOperand *resOpnd = nullptr;
1295 if (!IsPrimitiveVector(dtype)) {
1296 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1297 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType());
1298 ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(dtype), 0);
1299 if (IsPrimitiveFloat(dtype)) {
1300 SelectCmpFloatEq(*resOpnd, regOpnd0, immOpnd, dtype, dtype);
1301 } else {
1302 SelectCmp(regOpnd0, immOpnd, dtype);
1303 SelectCmpResult(*resOpnd, OP_eq, dtype, dtype);
1304 }
1305 } else {
1306 /* vector operand */
1307 CHECK_FATAL(false, "NIY");
1308 }
1309 return resOpnd;
1310 }
1311
1312 /*
1313 * unorded ZF, PF, CF ==> 1,1,1
1314 * above ZF, PF, CF ==> 0,0,0
1315 * below ZF, PF, CF ==> 0,0,1
1316 * equal ZF, PF, CF ==> 1,0,0
1317 *
1318 * To distinguish between less than(only check whether CF = 1 or not) and unorderd(CF=1),
1319 * So ** lt/le in float is replaced by judging gt/ge and swaping operands **
1320 *
1321 * float eq using cmpeqsd, same with llvm
1322 */
SelectCmpOp(CompareNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1323 Operand *X64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1324 {
1325 PrimType dtype = node.GetPrimType();
1326 PrimType primOpndType = node.GetOpndType();
1327 RegOperand *resOpnd = nullptr;
1328 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primOpndType, node.Opnd(0)->GetPrimType());
1329 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primOpndType, node.Opnd(1)->GetPrimType());
1330 if (!IsPrimitiveVector(node.GetPrimType())) {
1331 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1332 auto nodeOp = node.GetOpCode();
1333 Opcode parentOp = parent.GetOpCode();
1334 bool isFloat = IsPrimitiveFloat(primOpndType);
1335 bool isJump = (parentOp == OP_brfalse || parentOp == OP_brtrue || parentOp == OP_select);
1336 // float eq
1337 if (isFloat && (nodeOp == maple::OP_eq) && (!isJump)) {
1338 SelectCmpFloatEq(*resOpnd, regOpnd0, regOpnd1, dtype, primOpndType);
1339 return resOpnd;
1340 }
1341
1342 bool isSwap = (isFloat && (nodeOp == maple::OP_le || nodeOp == maple::OP_lt) && (parentOp != OP_brfalse));
1343 SelectCmp(regOpnd0, regOpnd1, primOpndType, isSwap);
1344 if (isJump) {
1345 return resOpnd;
1346 }
1347 SelectCmpResult(*resOpnd, nodeOp, dtype, primOpndType);
1348 } else {
1349 /* vector operand */
1350 CHECK_FATAL(false, "NIY");
1351 }
1352 return resOpnd;
1353 }
1354
SelectCmp(Operand & opnd0,Operand & opnd1,PrimType primType,bool isSwap)1355 void X64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType, bool isSwap)
1356 {
1357 x64::X64MOP_t cmpMOp = x64::MOP_begin;
1358 if (IsPrimitiveInteger(primType)) {
1359 cmpMOp = GetCmpMop(opnd0.GetKind(), opnd1.GetKind(), primType);
1360 } else if (IsPrimitiveFloat(primType)) {
1361 cmpMOp = x64::MOP_ucomisd_r_r;
1362 } else {
1363 CHECK_FATAL(false, "NIY");
1364 }
1365 DEBUG_ASSERT(cmpMOp != x64::MOP_begin, "unsupported mOp");
1366 Insn &cmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(cmpMOp, X64CG::kMd[cmpMOp]));
1367 if (isSwap) {
1368 cmpInsn.AddOpndChain(opnd0).AddOpndChain(opnd1);
1369 } else {
1370 cmpInsn.AddOpndChain(opnd1).AddOpndChain(opnd0);
1371 }
1372 cgFunc->GetCurBB()->AppendInsn(cmpInsn);
1373 }
1374
SelectCmpFloatEq(RegOperand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primResType,PrimType primOpndType)1375 void X64MPIsel::SelectCmpFloatEq(RegOperand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primResType,
1376 PrimType primOpndType)
1377 {
1378 /* float eq using cmpeqsd is same with llvm */
1379 x64::X64MOP_t eqMOp = x64::MOP_cmpeqsd_r_r;
1380 Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(eqMOp, X64CG::kMd[eqMOp]);
1381
1382 auto ®Opnd1 = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primOpndType),
1383 cgFunc->GetRegTyFromPrimTy(primOpndType));
1384 SelectCopy(regOpnd1, opnd1, primOpndType);
1385 /* CMPEQSD xmm1, xmm2 => CMPSD xmm1, xmm2, 0 */
1386 setInsn.AddOpndChain(opnd0).AddOpndChain(regOpnd1);
1387 cgFunc->GetCurBB()->AppendInsn(setInsn);
1388
1389 /* set result -> u64/u32 */
1390 auto tmpResType = (primOpndType == maple::PTY_f64) ? PTY_u64 : PTY_u32;
1391 RegOperand &tmpResOpnd =
1392 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(tmpResType), cgFunc->GetRegTyFromPrimTy(tmpResType));
1393 SelectRetypeFloat(tmpResOpnd, regOpnd1, tmpResType, primOpndType);
1394 /* cvt u64/u32 -> primType */
1395 SelectIntCvt(resOpnd, tmpResOpnd, primResType, tmpResType);
1396 }
1397
SelectCmpResult(RegOperand & resOpnd,Opcode opCode,PrimType primType,PrimType primOpndType)1398 void X64MPIsel::SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType)
1399 {
1400 bool isFloat = IsPrimitiveFloat(primOpndType);
1401 bool isSigned = (!IsPrimitiveUnsigned(primOpndType) && !IsPrimitiveFloat(primOpndType));
1402 /* set result -> u8 */
1403 RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k8BitSize, cgFunc->GetRegTyFromPrimTy(PTY_u8));
1404 x64::X64MOP_t setMOp = GetSetCCMop(opCode, tmpResOpnd.GetKind(), isSigned, isFloat);
1405 DEBUG_ASSERT(setMOp != x64::MOP_begin, "unsupported mOp");
1406 Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(setMOp, X64CG::kMd[setMOp]);
1407 setInsn.AddOpndChain(tmpResOpnd);
1408 cgFunc->GetCurBB()->AppendInsn(setInsn);
1409 /* cvt u8 -> primType */
1410 SelectIntCvt(resOpnd, tmpResOpnd, primType, PTY_u8);
1411 }
1412
SelectSelect(TernaryNode & expr,Operand & cond,Operand & trueOpnd,Operand & falseOpnd,const BaseNode & parent)1413 Operand *X64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd,
1414 const BaseNode &parent)
1415 {
1416 PrimType dtype = expr.GetPrimType();
1417 RegOperand &resOpnd =
1418 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1419 RegOperand &trueRegOpnd = SelectCopy2Reg(trueOpnd, dtype, expr.Opnd(1)->GetPrimType());
1420 RegOperand &falseRegOpnd = SelectCopy2Reg(falseOpnd, dtype, expr.Opnd(2)->GetPrimType());
1421 Opcode cmpOpcode;
1422 PrimType cmpPrimType;
1423 if (kOpcodeInfo.IsCompare(expr.Opnd(0)->GetOpCode())) {
1424 CompareNode *cmpNode = static_cast<CompareNode *>(expr.Opnd(0));
1425 DEBUG_ASSERT(cmpNode != nullptr, "null ptr check");
1426 cmpOpcode = cmpNode->GetOpCode();
1427 cmpPrimType = cmpNode->GetOpndType();
1428 } else {
1429 cmpPrimType = expr.Opnd(0)->GetPrimType();
1430 cmpOpcode = OP_ne;
1431 ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(cmpPrimType), 0);
1432 SelectCmp(cond, immOpnd, cmpPrimType);
1433 }
1434 SelectSelect(resOpnd, trueRegOpnd, falseRegOpnd, dtype, cmpOpcode, cmpPrimType);
1435 return &resOpnd;
1436 }
1437
SelectSelect(Operand & resOpnd,Operand & trueOpnd,Operand & falseOpnd,PrimType primType,Opcode cmpOpcode,PrimType cmpPrimType)1438 void X64MPIsel::SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType,
1439 Opcode cmpOpcode, PrimType cmpPrimType)
1440 {
1441 CHECK_FATAL(!IsPrimitiveFloat(primType), "NIY");
1442 bool isSigned = !IsPrimitiveUnsigned(primType);
1443 uint32 bitSize = GetPrimTypeBitSize(primType);
1444 if (bitSize == k8BitSize) {
1445 /* cmov unsupported 8bit, cvt to 32bit */
1446 PrimType cvtType = isSigned ? PTY_i32 : PTY_u32;
1447 RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k32BitSize, kRegTyInt);
1448 Operand &tmpTrueOpnd = SelectCopy2Reg(trueOpnd, cvtType, primType);
1449 Operand &tmpFalseOpnd = SelectCopy2Reg(falseOpnd, cvtType, primType);
1450 SelectSelect(tmpResOpnd, tmpTrueOpnd, tmpFalseOpnd, cvtType, cmpOpcode, cmpPrimType);
1451 SelectCopy(resOpnd, tmpResOpnd, primType, cvtType);
1452 return;
1453 }
1454 RegOperand &tmpOpnd = SelectCopy2Reg(trueOpnd, primType);
1455 SelectCopy(resOpnd, falseOpnd, primType);
1456 x64::X64MOP_t cmovMop = GetCMovCCMop(cmpOpcode, bitSize, !IsPrimitiveUnsigned(cmpPrimType));
1457 DEBUG_ASSERT(cmovMop != x64::MOP_begin, "unsupported mOp");
1458 Insn &comvInsn = cgFunc->GetInsnBuilder()->BuildInsn(cmovMop, X64CG::kMd[cmovMop]);
1459 comvInsn.AddOpndChain(tmpOpnd).AddOpndChain(resOpnd);
1460 cgFunc->GetCurBB()->AppendInsn(comvInsn);
1461 }
1462
SelectMinOrMax(bool isMin,Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1463 void X64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1464 {
1465 if (IsPrimitiveInteger(primType)) {
1466 SelectCmp(opnd0, opnd1, primType);
1467 Opcode cmpOpcode = isMin ? OP_lt : OP_gt;
1468 SelectSelect(resOpnd, opnd0, opnd1, primType, cmpOpcode, primType);
1469 } else {
1470 // float lt/le need to swap operands, and using seta
1471 CHECK_FATAL(false, "NIY type max or min");
1472 }
1473 }
1474
SelectCexp(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1475 Operand *X64MPIsel::SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1476 {
1477 PrimType primType = node.GetPrimType();
1478 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType);
1479 Operand &retReg =
1480 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType));
1481 std::vector<Operand *> opndVec = {®Opnd0};
1482 SelectLibCall("exp", opndVec, primType, &retReg, primType);
1483 return &retReg;
1484 }
1485
SelectCctz(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1486 Operand *X64MPIsel::SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1487 {
1488 CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!");
1489 PrimType origPrimType = node.Opnd(0)->GetPrimType();
1490 RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType);
1491
1492 bool is64BitCtz = node.GetIntrinsic() == INTRN_C_ctz64;
1493 MOperator mopBsf = is64BitCtz ? x64::MOP_bsfq_r_r : x64::MOP_bsfl_r_r;
1494 Insn &bsfInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsf, X64CG::kMd[mopBsf]);
1495 bsfInsn.AddOpndChain(opnd).AddOpndChain(opnd);
1496 cgFunc->GetCurBB()->AppendInsn(bsfInsn);
1497
1498 PrimType retType = node.GetPrimType();
1499 RegOperand &destReg =
1500 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), cgFunc->GetRegTyFromPrimTy(retType));
1501 // ctz i32 (u32) => cvt u32 -> i32
1502 // ctz i32 (u64) => cvt u64 -> i32
1503 SelectIntCvt(destReg, opnd, retType, origPrimType);
1504 return &destReg;
1505 }
1506
SelectCclz(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1507 Operand *X64MPIsel::SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1508 {
1509 CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!");
1510 CHECK_FATAL(node.GetIntrinsic() == INTRN_C_clz32, "only support clz32");
1511 PrimType origPrimType = node.Opnd(0)->GetPrimType();
1512 RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType);
1513 // bsr opnd tmp2
1514 ImmOperand &imm =
1515 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(origPrimType), -1);
1516 RegOperand &tmp1 = SelectCopy2Reg(imm, origPrimType);
1517 RegOperand &tmp2 =
1518 cgFunc->GetOpndBuilder()->CreateVReg(
1519 GetPrimTypeBitSize(origPrimType), cgFunc->GetRegTyFromPrimTy(origPrimType));
1520 MOperator mopBsr = x64::MOP_bsrl_r_r;
1521 Insn &bsrInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsr, X64CG::kMd[mopBsr]);
1522 bsrInsn.AddOpndChain(opnd).AddOpndChain(tmp2);
1523 cgFunc->GetCurBB()->AppendInsn(bsrInsn);
1524 // cmove -1, tmp2
1525 MOperator mopComv = x64::MOP_cmovel_r_r;
1526 Insn &cmovInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopComv, X64CG::kMd[mopComv]);
1527 cmovInsn.AddOpndChain(tmp1).AddOpndChain(tmp2);
1528 cgFunc->GetCurBB()->AppendInsn(cmovInsn);
1529 // neg tmp2
1530 MOperator mopNeg = x64::MOP_negl_r;
1531 Insn &negInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopNeg, X64CG::kMd[mopNeg]);
1532 negInsn.AddOpndChain(tmp2);
1533 cgFunc->GetCurBB()->AppendInsn(negInsn);
1534 // add res 31 tmp2
1535 ImmOperand &imm2 =
1536 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(origPrimType), k32BitSize - 1);
1537 RegOperand &tmp3 =
1538 cgFunc->GetOpndBuilder()->CreateVReg(
1539 GetPrimTypeBitSize(origPrimType), cgFunc->GetRegTyFromPrimTy(origPrimType));
1540 SelectAdd(tmp3, imm2, tmp2, origPrimType);
1541 PrimType retType = node.GetPrimType();
1542 RegOperand &destReg =
1543 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), cgFunc->GetRegTyFromPrimTy(retType));
1544 SelectIntCvt(destReg, tmp3, retType, origPrimType);
1545 return &destReg;
1546 }
1547
SelectBswap(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1548 Operand *X64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1549 {
1550 PrimType dtype = node.GetPrimType();
1551 auto bitWidth = GetPrimTypeBitSize(dtype);
1552 // bswap only support 32/64-bit, xchg support 16-bit -- xchg al, ah
1553 CHECK_FATAL(bitWidth == k16BitSize || bitWidth == k32BitSize || bitWidth == k64BitSize,
1554 "NIY, unsupported bitWidth.");
1555
1556 RegOperand *resOpnd = nullptr;
1557
1558 if (bitWidth == k16BitSize) {
1559 /*
1560 * For 16-bit, use xchg, such as: xchg ah, al. So, the register must support high 8-bit.
1561 * For x64, we can use RAX(AH:AL), RBX(BH:BL), RCX(CH:CL), RDX(DH:DL).
1562 * The RA does not perform special processing for the high 8-bit case.
1563 * So, we use the RAX regiser in here.
1564 */
1565 resOpnd = &cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitWidth, cgFunc->GetRegTyFromPrimTy(dtype));
1566 SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType());
1567 RegOperand &lowerOpnd =
1568 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, cgFunc->GetRegTyFromPrimTy(dtype));
1569 RegOperand &highOpnd =
1570 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, cgFunc->GetRegTyFromPrimTy(dtype));
1571 highOpnd.SetHigh8Bit();
1572 x64::X64MOP_t xchgMop = MOP_xchgb_r_r;
1573 Insn &xchgInsn = cgFunc->GetInsnBuilder()->BuildInsn(xchgMop, X64CG::kMd[xchgMop]);
1574 xchgInsn.AddOpndChain(highOpnd).AddOpndChain(lowerOpnd);
1575 cgFunc->GetCurBB()->AppendInsn(xchgInsn);
1576 } else {
1577 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(bitWidth, cgFunc->GetRegTyFromPrimTy(dtype));
1578 SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType());
1579 x64::X64MOP_t bswapMop = (bitWidth == k64BitSize) ? MOP_bswapq_r : MOP_bswapl_r;
1580 Insn &bswapInsn = cgFunc->GetInsnBuilder()->BuildInsn(bswapMop, X64CG::kMd[bswapMop]);
1581 bswapInsn.AddOperand(*resOpnd);
1582 cgFunc->GetCurBB()->AppendInsn(bswapInsn);
1583 }
1584 return resOpnd;
1585 }
1586
GetTargetStackPointer(PrimType primType)1587 RegOperand &X64MPIsel::GetTargetStackPointer(PrimType primType)
1588 {
1589 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, GetPrimTypeBitSize(primType),
1590 cgFunc->GetRegTyFromPrimTy(primType));
1591 }
1592
GetTargetBasicPointer(PrimType primType)1593 RegOperand &X64MPIsel::GetTargetBasicPointer(PrimType primType)
1594 {
1595 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(primType),
1596 cgFunc->GetRegTyFromPrimTy(primType));
1597 }
1598
SelectRetypeFloat(RegOperand & resOpnd,Operand & opnd0,PrimType toType,PrimType fromType)1599 void X64MPIsel::SelectRetypeFloat(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType)
1600 {
1601 uint32 fromSize = GetPrimTypeBitSize(fromType);
1602 [[maybe_unused]] uint32 toSize = GetPrimTypeBitSize(toType);
1603 DEBUG_ASSERT(fromSize == toSize, "retype bit widith doesn' match");
1604 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType);
1605 MOperator mOp = x64::MOP_begin;
1606 if (fromSize == k32BitSize) {
1607 mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movd_fr_r : x64::MOP_movd_r_fr;
1608 } else if (fromSize == k64BitSize) {
1609 mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movq_fr_r : x64::MOP_movq_r_fr;
1610 } else {
1611 CHECK_FATAL(false, "niy");
1612 }
1613 CHECK_FATAL(mOp != x64::MOP_begin, "NIY");
1614 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1615 (void)insn.AddOpndChain(regOpnd0).AddOpndChain(resOpnd);
1616 cgFunc->GetCurBB()->AppendInsn(insn);
1617 return;
1618 }
1619
SelectSqrt(UnaryNode & node,Operand & src,const BaseNode & parent)1620 Operand *X64MPIsel::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent)
1621 {
1622 PrimType dtype = node.GetPrimType();
1623 if (!IsPrimitiveFloat(dtype)) {
1624 DEBUG_ASSERT(false, "should be float type");
1625 return nullptr;
1626 }
1627 auto bitSize = GetPrimTypeBitSize(dtype);
1628 MOperator mOp = x64::MOP_begin;
1629 if (bitSize == k64BitSize) {
1630 mOp = MOP_sqrtd_r_r;
1631 } else if (bitSize == k32BitSize) {
1632 mOp = MOP_sqrts_r_r;
1633 } else {
1634 CHECK_FATAL(false, "niy");
1635 }
1636 RegOperand ®Opnd0 = SelectCopy2Reg(src, dtype);
1637 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1638 Operand &retReg = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(dtype));
1639
1640 (void)insn.AddOpndChain(regOpnd0).AddOpndChain(retReg);
1641 cgFunc->GetCurBB()->AppendInsn(insn);
1642 return &retReg;
1643 }
SelectAsm(AsmNode & node)1644 void X64MPIsel::SelectAsm(AsmNode &node)
1645 {
1646 cgFunc->SetHasAsm();
1647 CHECK_FATAL(false, "NIY");
1648 }
1649 } // namespace maplebe
1650