1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "x64_MPISel.h"
17 #include "x64_memlayout.h"
18 #include "x64_cgfunc.h"
19 #include "x64_isa_tbl.h"
20 #include "x64_cg.h"
21 #include "isel.h"
22
23 namespace maplebe {
24 /* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */
GetOrCreateMemOpndFromSymbol(const MIRSymbol & symbol,FieldID fieldId) const25 MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const
26 {
27 PrimType symType;
28 int32 fieldOffset = 0;
29 if (fieldId == 0) {
30 symType = symbol.GetType()->GetPrimType();
31 } else {
32 MIRType *mirType = symbol.GetType();
33 DEBUG_ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure");
34 MIRStructType *structType = static_cast<MIRStructType *>(mirType);
35 symType = structType->GetFieldType(fieldId)->GetPrimType();
36 fieldOffset = static_cast<uint32>(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first);
37 }
38 uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType);
39 return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset);
40 }
GetOrCreateMemOpndFromSymbol(const MIRSymbol & symbol,uint32 opndSize,int64 offset) const41 MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const
42 {
43 MIRStorageClass storageClass = symbol.GetStorageClass();
44 MemOperand *result = nullptr;
45 RegOperand *stackBaseReg = nullptr;
46 if ((storageClass == kScAuto) || (storageClass == kScFormal)) {
47 auto *symloc = static_cast<X64SymbolAlloc *>(cgFunc->GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex()));
48 DEBUG_ASSERT(symloc != nullptr, "sym loc should have been defined");
49 stackBaseReg = static_cast<X64CGFunc *>(cgFunc)->GetBaseReg(*symloc);
50 int stOfst = cgFunc->GetBaseOffset(*symloc);
51 /* Create field symbols in aggregate structure */
52 result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize);
53 result->SetBaseRegister(*stackBaseReg);
54 result->SetOffsetOperand(GetCurFunc()->GetOpndBuilder()->CreateImm(k64BitSize, stOfst + offset));
55 CHECK_FATAL(result != nullptr, "NIY");
56 return *result;
57 }
58 if ((storageClass == kScGlobal) || (storageClass == kScExtern) || (storageClass == kScPstatic) ||
59 (storageClass == kScFstatic)) {
60 stackBaseReg = &GetCurFunc()->GetOpndBuilder()->CreatePReg(x64::RIP, k64BitSize, kRegTyInt);
61 result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize);
62 ImmOperand &stOfstOpnd = GetCurFunc()->GetOpndBuilder()->CreateImm(symbol, offset, 0);
63 result->SetBaseRegister(*stackBaseReg);
64 result->SetOffsetOperand(stOfstOpnd);
65 CHECK_FATAL(result != nullptr, "NIY");
66 return *result;
67 }
68 CHECK_FATAL(false, "NIY");
69 return *result;
70 }
71
SelectReturn(NaryStmtNode & retNode,Operand & opnd)72 void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd)
73 {
74 MIRType *retType = cgFunc->GetFunction().GetReturnType();
75 X64CallConvImpl retLocator(cgFunc->GetBecommon());
76 CCLocInfo retMech;
77 retLocator.LocateRetVal(*retType, retMech);
78 if (retMech.GetRegCount() == 0) {
79 return;
80 }
81 std::vector<RegOperand *> retRegs;
82 if (!cgFunc->GetFunction().StructReturnedInRegs() || retNode.Opnd(0)->GetOpCode() == OP_constval) {
83 PrimType oriPrimType = retMech.GetPrimTypeOfReg0();
84 regno_t retReg = retMech.GetReg0();
85 DEBUG_ASSERT(retReg != kRinvalid, "NIY");
86 RegOperand &retOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, GetPrimTypeBitSize(oriPrimType),
87 cgFunc->GetRegTyFromPrimTy(oriPrimType));
88 retRegs.push_back(&retOpnd);
89 SelectCopy(retOpnd, opnd, oriPrimType, retNode.Opnd(0)->GetPrimType());
90 } else {
91 CHECK_FATAL(opnd.IsMemoryAccessOperand(), "NIY");
92 MemOperand &memOpnd = static_cast<MemOperand &>(opnd);
93 ImmOperand *offsetOpnd = memOpnd.GetOffsetOperand();
94 RegOperand *baseOpnd = memOpnd.GetBaseRegister();
95
96 PrimType oriPrimType0 = retMech.GetPrimTypeOfReg0();
97 regno_t retReg0 = retMech.GetReg0();
98 DEBUG_ASSERT(retReg0 != kRinvalid, "NIY");
99 RegOperand &retOpnd0 = cgFunc->GetOpndBuilder()->CreatePReg(retReg0, GetPrimTypeBitSize(oriPrimType0),
100 cgFunc->GetRegTyFromPrimTy(oriPrimType0));
101 MemOperand &rhsMemOpnd0 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType0));
102 rhsMemOpnd0.SetBaseRegister(*baseOpnd);
103 rhsMemOpnd0.SetOffsetOperand(*offsetOpnd);
104 retRegs.push_back(&retOpnd0);
105 SelectCopy(retOpnd0, rhsMemOpnd0, oriPrimType0);
106
107 regno_t retReg1 = retMech.GetReg1();
108 if (retReg1 != kRinvalid) {
109 PrimType oriPrimType1 = retMech.GetPrimTypeOfReg1();
110 RegOperand &retOpnd1 = cgFunc->GetOpndBuilder()->CreatePReg(retReg1, GetPrimTypeBitSize(oriPrimType1),
111 cgFunc->GetRegTyFromPrimTy(oriPrimType1));
112 MemOperand &rhsMemOpnd1 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType1));
113 ImmOperand &newOffsetOpnd = static_cast<ImmOperand &>(*offsetOpnd->Clone(*cgFunc->GetMemoryPool()));
114 newOffsetOpnd.SetValue(newOffsetOpnd.GetValue() + GetPrimTypeSize(oriPrimType0));
115 rhsMemOpnd1.SetBaseRegister(*baseOpnd);
116 rhsMemOpnd1.SetOffsetOperand(newOffsetOpnd);
117 retRegs.push_back(&retOpnd1);
118 SelectCopy(retOpnd1, rhsMemOpnd1, oriPrimType1);
119 }
120 }
121 /* for optimization ,insert pseudo ret ,in case rax,rdx is removed*/
122 SelectPseduoForReturn(retRegs);
123 }
124
SelectPseduoForReturn(std::vector<RegOperand * > & retRegs)125 void X64MPIsel::SelectPseduoForReturn(std::vector<RegOperand *> &retRegs)
126 {
127 for (auto retReg : retRegs) {
128 MOperator mop = x64::MOP_pseudo_ret_int;
129 Insn &pInsn = cgFunc->GetInsnBuilder()->BuildInsn(mop, X64CG::kMd[mop]);
130 cgFunc->GetCurBB()->AppendInsn(pInsn);
131 pInsn.AddOpndChain(*retReg);
132 }
133 }
134
SelectReturn()135 void X64MPIsel::SelectReturn()
136 {
137 /* jump to epilogue */
138 MOperator mOp = x64::MOP_jmpq_l;
139 LabelNode *endLabel = cgFunc->GetEndLabel();
140 auto endLabelName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(endLabel->GetLabelIdx());
141 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(endLabelName.c_str(), endLabel->GetLabelIdx());
142 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
143 jmpInsn.AddOpndChain(targetOpnd);
144 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
145 cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB());
146 }
147
CreateCallStructParamPassByStack(MemOperand & memOpnd,int32 symSize,int32 baseOffset)148 void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset)
149 {
150 int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize();
151 for (int32 i = 0; i < copyTime; ++i) {
152 MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
153 addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister());
154 ImmOperand &newImmOpnd =
155 static_cast<ImmOperand &>(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool()));
156 newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize());
157 addrMemOpnd.SetOffsetOperand(newImmOpnd);
158 RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt);
159 Operand &stMemOpnd =
160 cgFunc->GetOpndBuilder()->CreateMem(spOpnd, (baseOffset + i * GetPointerSize()), k64BitSize);
161 SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64);
162 }
163 }
164
CreateCallStructParamPassByReg(MemOperand & memOpnd,regno_t regNo,uint32 parmNum)165 void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum)
166 {
167 CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing");
168 RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt);
169 MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
170 addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister());
171 ImmOperand &newImmOpnd = static_cast<ImmOperand &>(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool()));
172 newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize());
173 addrMemOpnd.SetOffsetOperand(newImmOpnd);
174 paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64});
175 }
176
GetMemOpndInfoFromAggregateNode(BaseNode & argExpr)177 std::tuple<Operand *, size_t, MIRType *> X64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr)
178 {
179 /* get mirType info */
180 auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr);
181 MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType);
182 /* get symbol memOpnd info */
183 MemOperand *symMemOpnd = nullptr;
184 if (argExpr.GetOpCode() == OP_dread) {
185 AddrofNode &dread = static_cast<AddrofNode &>(argExpr);
186 MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx());
187 symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID());
188 } else if (argExpr.GetOpCode() == OP_iread) {
189 IreadNode &iread = static_cast<IreadNode &>(argExpr);
190 symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset);
191 } else {
192 CHECK_FATAL(false, "unsupported opcode");
193 }
194 return {symMemOpnd, symInfo.size, mirType};
195 }
196
SelectParmListForAggregate(BaseNode & argExpr,X64CallConvImpl & parmLocator,bool isArgUnused)197 void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused)
198 {
199 auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr);
200 DEBUG_ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd");
201 MemOperand &memOpnd = static_cast<MemOperand &>(*argOpnd);
202
203 CCLocInfo ploc;
204 parmLocator.LocateNextParm(*mirType, ploc);
205 if (isArgUnused) {
206 return;
207 }
208
209 /* create call struct param pass */
210 if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) {
211 CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset);
212 } else {
213 CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state");
214 CreateCallStructParamPassByReg(memOpnd, ploc.reg0, kFirstReg);
215 if (ploc.reg1 != kRinvalid) {
216 CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondOpnd);
217 }
218 if (ploc.reg2 != kRinvalid) {
219 CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdOpnd);
220 }
221 if (ploc.reg3 != kRinvalid) {
222 CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthOpnd);
223 }
224 }
225 }
226
227 /*
228 * SelectParmList generates an instrunction for each of the parameters
229 * to load the parameter value into the corresponding register.
230 * We return a list of registers to the call instruction because
231 * they may be needed in the register allocation phase.
232 * fp Num is a return value which is the number of vector
233 * registers used;
234 */
SelectParmList(StmtNode & naryNode,ListOperand & srcOpnds,uint32 & fpNum)235 void X64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, uint32 &fpNum)
236 {
237 paramPassByReg.clear();
238 fpNum = 0;
239 /* for IcallNode, the 0th operand is the function pointer */
240 size_t argBegin = 0;
241 if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) {
242 ++argBegin;
243 }
244
245 MIRFunction *callee = nullptr;
246 if (naryNode.GetOpCode() == OP_call) {
247 PUIdx calleePuIdx = static_cast<CallNode &>(naryNode).GetPUIdx();
248 callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx);
249 }
250 X64CallConvImpl parmLocator(cgFunc->GetBecommon(), X64CallConvImpl::GetCallConvKind(naryNode));
251 CCLocInfo ploc;
252 for (size_t i = argBegin; i < naryNode.NumOpnds(); ++i) {
253 BaseNode *argExpr = naryNode.Opnd(i);
254 DEBUG_ASSERT(argExpr != nullptr, "not null check");
255 PrimType primType = argExpr->GetPrimType();
256 DEBUG_ASSERT(primType != PTY_void, "primType should not be void");
257 bool isArgUnused = (callee != nullptr && callee->GetFuncDesc().IsArgUnused(i));
258 if (primType == PTY_agg) {
259 SelectParmListForAggregate(*argExpr, parmLocator, isArgUnused);
260 continue;
261 }
262
263 Operand *argOpnd = HandleExpr(naryNode, *argExpr);
264 DEBUG_ASSERT(argOpnd != nullptr, "not null check");
265 MIRType *mirType = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<uint32>(primType)];
266 parmLocator.LocateNextParm(*mirType, ploc);
267
268 /* skip unused args */
269 if (isArgUnused) {
270 continue;
271 }
272
273 if (ploc.reg0 != x64::kRinvalid) {
274 /* load to the register. */
275 RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, GetPrimTypeBitSize(primType),
276 cgFunc->GetRegTyFromPrimTy(primType));
277 paramPassByReg.push_back({&parmRegOpnd, argOpnd, primType});
278 if (x64::IsFPSIMDRegister(static_cast<X64reg>(ploc.reg0))) {
279 fpNum++;
280 }
281 } else {
282 /* load to stack memory */
283 RegOperand &baseOpnd =
284 cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
285 MemOperand &actMemOpnd =
286 cgFunc->GetOpndBuilder()->CreateMem(baseOpnd, ploc.memOffset, GetPrimTypeBitSize(primType));
287 SelectCopy(actMemOpnd, *argOpnd, primType);
288 }
289 DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NIY");
290 }
291
292 /* param pass by reg */
293 for (auto [regOpnd, argOpnd, primType] : paramPassByReg) {
294 DEBUG_ASSERT(regOpnd != nullptr, "not null check");
295 DEBUG_ASSERT(argOpnd != nullptr, "not null check");
296 SelectCopy(*regOpnd, *argOpnd, primType);
297 srcOpnds.PushOpnd(*regOpnd);
298 }
299 }
300
SelectSpecialRegread(PregIdx pregIdx,PrimType primType)301 RegOperand &X64MPIsel::SelectSpecialRegread(PregIdx pregIdx, PrimType primType)
302 {
303 switch (-pregIdx) {
304 case kSregFp: {
305 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RFP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
306 }
307 case kSregSp: {
308 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
309 }
310 default: {
311 CHECK_FATAL(false, "ERROR: Not supported special register!");
312 }
313 }
314 }
315
IsParamStructCopy(const MIRSymbol & symbol)316 bool X64MPIsel::IsParamStructCopy(const MIRSymbol &symbol)
317 {
318 if (symbol.GetStorageClass() == kScFormal &&
319 cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) {
320 return true;
321 }
322 return false;
323 }
324
SelectIntAggCopyReturn(MemOperand & symbolMem,uint64 aggSize)325 void X64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize)
326 {
327 CHECK_FATAL((aggSize > 0) && (aggSize <= k16ByteSize), "out of range.");
328 RegOperand *baseOpnd = symbolMem.GetBaseRegister();
329 int32 stOffset = symbolMem.GetOffsetOperand()->GetValue();
330 bool isCopyOneReg = (aggSize <= k8ByteSize);
331 uint32 extraSize = (aggSize % k8ByteSize) * kBitsPerByte;
332 if (extraSize == 0) {
333 extraSize = k64BitSize;
334 } else if (extraSize <= k8BitSize) {
335 extraSize = k8BitSize;
336 } else if (extraSize <= k16BitSize) {
337 extraSize = k16BitSize;
338 } else if (extraSize <= k32BitSize) {
339 extraSize = k32BitSize;
340 } else {
341 extraSize = k64BitSize;
342 }
343 /* generate move from return registers(rax, rdx) to mem of symbol */
344 PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraSize);
345 /* mov %rax mem */
346 RegOperand ®Rhs0 =
347 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, (isCopyOneReg ? extraSize : k64BitSize), kRegTyInt);
348 MemOperand &memSymbo0 = cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, static_cast<int32>(stOffset),
349 isCopyOneReg ? extraSize : k64BitSize);
350 SelectCopy(memSymbo0, regRhs0, isCopyOneReg ? extraTy : PTY_u64);
351 /* mov %rdx mem */
352 if (!isCopyOneReg) {
353 RegOperand ®Rhs1 = cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, extraSize, kRegTyInt);
354 MemOperand &memSymbo1 =
355 cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, static_cast<int32>(stOffset + k8ByteSize), extraSize);
356 SelectCopy(memSymbo1, regRhs1, extraTy);
357 }
358 return;
359 }
360
SelectAggCopy(MemOperand & lhs,MemOperand & rhs,uint32 copySize)361 void X64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize)
362 {
363 /* in x86-64, 8 bytes data is copied at a time */
364 uint32 copyTimes = copySize / k8ByteSize;
365 uint32 extraCopySize = copySize % k8ByteSize;
366 ImmOperand *stOfstLhs = lhs.GetOffsetOperand();
367 ImmOperand *stOfstRhs = rhs.GetOffsetOperand();
368 RegOperand *baseLhs = lhs.GetBaseRegister();
369 RegOperand *baseRhs = rhs.GetBaseRegister();
370 if (copySize < 40U) {
371 for (uint32 i = 0; i < copyTimes; ++i) {
372 /* prepare dest addr */
373 MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
374 memOpndLhs.SetBaseRegister(*baseLhs);
375 ImmOperand &newStOfstLhs = static_cast<ImmOperand &>(*stOfstLhs->Clone(*cgFunc->GetMemoryPool()));
376 newStOfstLhs.SetValue(newStOfstLhs.GetValue() + i * k8ByteSize);
377 memOpndLhs.SetOffsetOperand(newStOfstLhs);
378 /* prepare src addr */
379 MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize);
380 memOpndRhs.SetBaseRegister(*baseRhs);
381 ImmOperand &newStOfstRhs = static_cast<ImmOperand &>(*stOfstRhs->Clone(*cgFunc->GetMemoryPool()));
382 newStOfstRhs.SetValue(newStOfstRhs.GetValue() + i * k8ByteSize);
383 memOpndRhs.SetOffsetOperand(newStOfstRhs);
384 /* copy data */
385 SelectCopy(memOpndLhs, memOpndRhs, PTY_a64);
386 }
387 } else {
388 /* adopt memcpy */
389 std::vector<Operand *> opndVec;
390 opndVec.push_back(PrepareMemcpyParm(lhs, MOP_leaq_m_r));
391 opndVec.push_back(PrepareMemcpyParm(rhs, MOP_leaq_m_r));
392 opndVec.push_back(PrepareMemcpyParm(copySize));
393 SelectLibCall("memcpy", opndVec, PTY_a64, nullptr, PTY_void);
394 return;
395 }
396 /* take care of extra content at the end less than the unit */
397 if (extraCopySize == 0) {
398 return;
399 }
400 extraCopySize = ((extraCopySize <= k4ByteSize) ? k4ByteSize : k8ByteSize) * kBitsPerByte;
401 PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraCopySize);
402 MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize);
403 memOpndLhs.SetBaseRegister(*baseLhs);
404 ImmOperand &newStOfstLhs = static_cast<ImmOperand &>(*stOfstLhs->Clone(*cgFunc->GetMemoryPool()));
405 newStOfstLhs.SetValue(newStOfstLhs.GetValue() + copyTimes * k8ByteSize);
406 memOpndLhs.SetOffsetOperand(newStOfstLhs);
407 MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize);
408 memOpndRhs.SetBaseRegister(*baseRhs);
409 ImmOperand &newStOfstRhs = static_cast<ImmOperand &>(*stOfstRhs->Clone(*cgFunc->GetMemoryPool()));
410 newStOfstRhs.SetValue(newStOfstRhs.GetValue() + copyTimes * k8ByteSize);
411 memOpndRhs.SetOffsetOperand(newStOfstRhs);
412 SelectCopy(memOpndLhs, memOpndRhs, extraTy);
413 }
414
SelectLibCall(const std::string & funcName,std::vector<Operand * > & opndVec,PrimType primType,Operand * retOpnd,PrimType retType)415 void X64MPIsel::SelectLibCall(const std::string &funcName, std::vector<Operand*> &opndVec, PrimType primType,
416 Operand* retOpnd, PrimType retType)
417 {
418 /* generate libcall */
419 std::vector<PrimType> pt(opndVec.size(), primType);
420 SelectLibCallNArg(funcName, opndVec, pt, retOpnd, retType);
421 return;
422 }
423
SelectLibCallNArg(const std::string & funcName,std::vector<Operand * > & opndVec,std::vector<PrimType> pt,Operand * retOpnd,PrimType retPrimType)424 void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector<Operand *> &opndVec,
425 std::vector<PrimType> pt, Operand* retOpnd, PrimType retPrimType)
426 {
427 std::string newName = funcName;
428 MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal);
429 st->SetNameStrIdx(newName);
430 st->SetStorageClass(kScExtern);
431 st->SetSKind(kStFunc);
432
433 /* setup the type of the callee function */
434 std::vector<TyIdx> vec;
435 std::vector<TypeAttrs> vecAt;
436 for (size_t i = 0; i < opndVec.size(); ++i) {
437 vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast<size_t>(pt[i])]->GetTypeIndex());
438 vecAt.emplace_back(TypeAttrs());
439 }
440
441 /* only support no return function */
442 MIRType *mirRetType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast<size_t>(retPrimType));
443 st->SetTyIdx(
444 cgFunc->GetBecommon().BeGetOrCreateFunctionType(mirRetType->GetTypeIndex(), vec, vecAt)->GetTypeIndex());
445
446 /* setup actual parameters */
447 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
448
449 X64CallConvImpl parmLocator(cgFunc->GetBecommon());
450 CCLocInfo ploc;
451 for (size_t i = 0; i < opndVec.size(); ++i) {
452 DEBUG_ASSERT(pt[i] != PTY_void, "primType check");
453 MIRType *ty;
454 ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<size_t>(pt[i])];
455 Operand *stOpnd = opndVec[i];
456 DEBUG_ASSERT(stOpnd->IsRegister(), "exp result should be reg");
457 RegOperand *expRegOpnd = static_cast<RegOperand *>(stOpnd);
458 parmLocator.LocateNextParm(*ty, ploc);
459 if (ploc.reg0 != 0) { /* load to the register */
460 RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, expRegOpnd->GetSize(),
461 cgFunc->GetRegTyFromPrimTy(pt[i]));
462 SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]);
463 paramOpnds.PushOpnd(parmRegOpnd);
464 }
465 DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI");
466 }
467
468 MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false);
469 Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym);
470 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
471 Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds);
472
473 bool isFloat = IsPrimitiveFloat(retPrimType);
474 Insn::RetType insnRetType = isFloat ? Insn::kRegFloat : Insn::kRegInt;
475 callInsn.SetRetType(insnRetType);
476 /* no ret function */
477 if (retOpnd == nullptr) {
478 return;
479 }
480
481 CCLocInfo retMech;
482 parmLocator.LocateRetVal(*(GlobalTables::GetTypeTable().GetTypeTable().at(retPrimType)), retMech);
483 if (retMech.GetRegCount() <= 0 || retMech.GetRegCount() > 1) {
484 CHECK_FATAL(false, "just support one register return");
485 }
486 if (mirRetType != nullptr) {
487 callInsn.SetRetSize(static_cast<uint32>(mirRetType->GetSize()));
488 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(mirRetType->GetPrimType()));
489 }
490 CHECK_FATAL(retOpnd->IsRegister(), "niy");
491 RegOperand *regOpnd = static_cast<RegOperand*>(retOpnd);
492 regno_t retRegNo = retMech.GetReg0();
493 if (regOpnd->GetRegisterNumber() != retRegNo) {
494 RegOperand &phyRetOpnd =
495 cgFunc->GetOpndBuilder()->CreatePReg(retRegNo, regOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(retPrimType));
496 SelectCopy(*retOpnd, phyRetOpnd, retPrimType);
497 }
498 return;
499 }
500
SelectFloatingConst(MIRConst & floatingConst,PrimType primType) const501 Operand *X64MPIsel::SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const
502 {
503 CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const");
504 uint32 labelIdxTmp = cgFunc->GetLabelIdx();
505 Operand *result = nullptr;
506 if (primType == PTY_f64) {
507 result = SelectLiteral(static_cast<MIRDoubleConst&>(floatingConst), cgFunc->GetFunction(), labelIdxTmp++);
508 } else {
509 result = SelectLiteral(static_cast<MIRFloatConst&>(floatingConst), cgFunc->GetFunction(), labelIdxTmp++);
510 }
511 cgFunc->SetLabelIdx(labelIdxTmp);
512 return result;
513 }
514
PrepareMemcpyParm(MemOperand & memOperand,MOperator mOp)515 RegOperand *X64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp)
516 {
517 RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
518 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
519 addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult);
520 cgFunc->GetCurBB()->AppendInsn(addrInsn);
521 return ®Result;
522 }
523
PrepareMemcpyParm(uint64 copySize)524 RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize)
525 {
526 RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
527 ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, copySize);
528 SelectCopy(regResult, sizeOpnd, PTY_i64);
529 return ®Result;
530 }
531
SelectAggDassign(MirTypeInfo & lhsInfo,MemOperand & symbolMem,Operand & opndRhs)532 void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs)
533 {
534 /* rhs is Func Return, it must be from Regread */
535 if (opndRhs.IsRegister()) {
536 SelectIntAggCopyReturn(symbolMem, lhsInfo.size);
537 return;
538 }
539 /* In generally, rhs is from Dread/Iread */
540 CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem");
541 MemOperand &memRhs = static_cast<MemOperand &>(opndRhs);
542 SelectAggCopy(symbolMem, memRhs, lhsInfo.size);
543 }
544
SelectAggIassign(IassignNode & stmt,Operand & AddrOpnd,Operand & opndRhs)545 void X64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs)
546 {
547 /* mirSymbol info */
548 MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt);
549 MIRType *stmtMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx());
550
551 /* In generally, RHS is from Dread/Iread */
552 CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem");
553 MemOperand &memRhs = static_cast<MemOperand &>(opndRhs);
554 ImmOperand *stOfstSrc = memRhs.GetOffsetOperand();
555 RegOperand *baseSrc = memRhs.GetBaseRegister();
556
557 if (stmtMirType->GetPrimType() == PTY_agg) {
558 /* generate move to regs for agg return */
559 RegOperand *result[kFourRegister] = {nullptr}; /* up to 2 int or 4 fp */
560 uint32 numRegs = (symbolInfo.size <= k8ByteSize) ? kOneRegister : kTwoRegister;
561 PrimType retPrimType = (symbolInfo.size <= k4ByteSize) ? PTY_u32 : PTY_u64;
562 for (uint32 i = 0; i < numRegs; i++) {
563 MemOperand &rhsMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(retPrimType));
564 rhsMemOpnd.SetBaseRegister(*baseSrc);
565 ImmOperand &newStOfstSrc = static_cast<ImmOperand &>(*stOfstSrc->Clone(*cgFunc->GetMemoryPool()));
566 newStOfstSrc.SetValue(newStOfstSrc.GetValue() + i * k8ByteSize);
567 rhsMemOpnd.SetOffsetOperand(newStOfstSrc);
568 regno_t regNo = (i == 0) ? x64::RAX : x64::RDX;
569 result[i] = &cgFunc->GetOpndBuilder()->CreatePReg(regNo, GetPrimTypeBitSize(retPrimType),
570 cgFunc->GetRegTyFromPrimTy(retPrimType));
571 SelectCopy(*(result[i]), rhsMemOpnd, retPrimType);
572 }
573 } else {
574 RegOperand *lhsAddrOpnd = &SelectCopy2Reg(AddrOpnd, stmt.Opnd(0)->GetPrimType());
575 MemOperand &symbolMem =
576 cgFunc->GetOpndBuilder()->CreateMem(*lhsAddrOpnd, symbolInfo.offset, GetPrimTypeBitSize(PTY_u64));
577 SelectAggCopy(symbolMem, memRhs, symbolInfo.size);
578 }
579 }
580
AppendCall(x64::X64MOP_t mOp,Operand & targetOpnd,ListOperand & paramOpnds,ListOperand & retOpnds)581 Insn &X64MPIsel::AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds)
582 {
583 Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
584 callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds);
585 cgFunc->GetCurBB()->AppendInsn(callInsn);
586 cgFunc->GetCurBB()->SetHasCall();
587 cgFunc->GetFunction().SetHasCall();
588 return callInsn;
589 }
590
SelectCalleeReturn(MIRType * retType,ListOperand & retOpnds)591 void X64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds)
592 {
593 if (retType == nullptr) {
594 return;
595 }
596 auto retSize = retType->GetSize() * kBitsPerByte;
597 if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) {
598 if (retSize > k0BitSize) {
599 retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt));
600 }
601 if (retSize > k64BitSize) {
602 retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, k64BitSize, kRegTyInt));
603 }
604 }
605 }
606
SelectCall(CallNode & callNode)607 void X64MPIsel::SelectCall(CallNode &callNode)
608 {
609 MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx());
610 MIRSymbol *fsym = GlobalTables::GetGsymTable().GetSymbolFromStidx(fn->GetStIdx().Idx(), false);
611 Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*fsym);
612
613 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
614 uint32 fpNum = 0;
615 SelectParmList(callNode, paramOpnds, fpNum);
616 /* x64abi: rax = with variable arguments passes information about the number of vector registers used */
617 if (fn->IsVarargs()) {
618 ImmOperand &fpNumImm = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, fpNum);
619 RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt);
620 SelectCopy(raxOpnd, fpNumImm, PTY_i64);
621 }
622
623 MIRType *retType = fn->GetReturnType();
624 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
625 SelectCalleeReturn(retType, retOpnds);
626
627 Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds);
628 callInsn.SetRetType(Insn::kRegInt);
629 if (retType != nullptr) {
630 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
631 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
632 }
633 const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo();
634 for (const auto &elem : deoptBundleInfo) {
635 auto valueKind = elem.second.GetMapleValueKind();
636 if (valueKind == MapleValue::kPregKind) {
637 auto *opnd = cgFunc->GetOpndFromPregIdx(elem.second.GetPregIdx());
638 CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand");
639 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
640 } else if (valueKind == MapleValue::kConstKind) {
641 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), PTY_i32);
642 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
643 } else {
644 CHECK_FATAL(false, "not supported currently");
645 }
646 }
647 cgFunc->AppendStackMapInsn(callInsn);
648 }
649
SelectIcall(IcallNode & iCallNode,Operand & opnd0)650 void X64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0)
651 {
652 RegOperand &targetOpnd = SelectCopy2Reg(opnd0, iCallNode.Opnd(0)->GetPrimType());
653 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
654 uint32 fpNum = 0;
655 SelectParmList(iCallNode, paramOpnds, fpNum);
656
657 MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode.GetRetTyIdx());
658 if (iCallNode.GetOpCode() == OP_icallproto) {
659 CHECK_FATAL((retType->GetKind() == kTypeFunction), "NIY, must be func");
660 auto calleeType = static_cast<MIRFuncType *>(retType);
661 retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleeType->GetRetTyIdx());
662 }
663 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
664 SelectCalleeReturn(retType, retOpnds);
665
666 Insn &callInsn = AppendCall(x64::MOP_callq_r, targetOpnd, paramOpnds, retOpnds);
667 callInsn.SetRetType(Insn::kRegInt);
668 if (retType != nullptr) {
669 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
670 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
671 }
672 const auto &deoptBundleInfo = iCallNode.GetDeoptBundleInfo();
673 for (const auto &elem : deoptBundleInfo) {
674 auto valueKind = elem.second.GetMapleValueKind();
675 if (valueKind == MapleValue::kPregKind) {
676 auto *opnd = cgFunc->GetOpndFromPregIdx(elem.second.GetPregIdx());
677 CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand");
678 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
679 } else if (valueKind == MapleValue::kConstKind) {
680 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), PTY_i32);
681 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
682 } else {
683 CHECK_FATAL(false, "not supported currently");
684 }
685 }
686 cgFunc->AppendStackMapInsn(callInsn);
687 }
688
ProcessReturnReg(PrimType primType,int32 sReg)689 Operand &X64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg)
690 {
691 return GetTargetRetOperand(primType, sReg);
692 }
693
SelectGoto(GotoNode & stmt)694 void X64MPIsel::SelectGoto(GotoNode &stmt)
695 {
696 MOperator mOp = x64::MOP_jmpq_l;
697 auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset());
698 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset());
699 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
700 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
701 jmpInsn.AddOpndChain(targetOpnd);
702 cgFunc->GetCurBB()->SetKind(BB::kBBGoto);
703 return;
704 }
705
SelectIgoto(Operand & opnd0)706 void X64MPIsel::SelectIgoto(Operand &opnd0)
707 {
708 CHECK_FATAL(opnd0.IsRegister(), "only register implemented!");
709 MOperator mOp = x64::MOP_jmpq_r;
710 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
711 jmpInsn.AddOpndChain(opnd0);
712 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
713 return;
714 }
715
716 /* This function is to generate an inline function to generate the va_list data structure */
717 /* type $__va_list <struct {
718 @__stack <* void> align(8),
719 @__gr_top <* void> align(8),
720 @__vr_top <* void> align(8),
721 @__gr_offs i32 align(4),
722 @__vr_offs i32 align(4)}>
723 }
724 */
GenCVaStartIntrin(RegOperand & opnd,uint32 stkOffset)725 void X64MPIsel::GenCVaStartIntrin(RegOperand &opnd, uint32 stkOffset)
726 {
727 /* FPLR only pushed in regalloc() after intrin function */
728 RegOperand &fpOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RFP, k64BitSize, kRegTyInt);
729
730 uint32 fpLrLength = k16BitSize;
731 /* __stack */
732 if (stkOffset != 0) {
733 stkOffset += fpLrLength;
734 }
735
736 /* isvary reset StackFrameSize */
737 ImmOperand &vaListOnPassArgStackOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset);
738 RegOperand &vReg = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt);
739 SelectAdd(vReg, fpOpnd, vaListOnPassArgStackOffset, GetLoweredPtrType());
740
741 // The 8-byte data in the a structure needs to use this mop.
742 MOperator mOp = x64::MOP_movq_r_m;
743
744 /* mem operand in va_list struct (lhs) */
745 MemOperand &vaList = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, 0, k64BitSize);
746 Insn &fillInStkOffsetInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
747 fillInStkOffsetInsn.AddOpndChain(vReg).AddOpndChain(vaList);
748 cgFunc->GetCurBB()->AppendInsn(fillInStkOffsetInsn);
749
750 /* __gr_top ; it's the same as __stack before the 1st va_arg */
751 stkOffset = 0;
752 ImmOperand &grTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset);
753 SelectSub(vReg, fpOpnd, grTopOffset, PTY_a64);
754
755 /* mem operand in va_list struct (lhs) */
756 MemOperand &vaListGRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k8BitSize, k64BitSize);
757 Insn &fillInGRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
758 fillInGRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListGRTop);
759 cgFunc->GetCurBB()->AppendInsn(fillInGRTopInsn);
760
761 /* __vr_top */
762 int32 grAreaSize = static_cast<int32>(static_cast<X64MemLayout *>(cgFunc->GetMemlayout())->GetSizeOfGRSaveArea());
763 stkOffset += grAreaSize;
764 stkOffset += k8BitSize;
765 ImmOperand &vaListVRTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset);
766 SelectSub(vReg, fpOpnd, vaListVRTopOffset, PTY_a64);
767
768 MemOperand &vaListVRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k16BitSize, k64BitSize);
769 Insn &fillInVRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
770 fillInVRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListVRTop);
771 cgFunc->GetCurBB()->AppendInsn(fillInVRTopInsn);
772
773 // The 4-byte data in the a structure needs to use this mop.
774 mOp = x64::MOP_movl_r_m;
775
776 /* __gr_offs */
777 int32 grOffs = 0 - grAreaSize;
778 ImmOperand &vaListGROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, grOffs);
779 RegOperand &grOffsRegOpnd = SelectCopy2Reg(vaListGROffsOffset, PTY_a32);
780
781 MemOperand &vaListGROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize, k64BitSize);
782 Insn &fillInGROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
783 fillInGROffsInsn.AddOpndChain(grOffsRegOpnd).AddOpndChain(vaListGROffs);
784 cgFunc->GetCurBB()->AppendInsn(fillInGROffsInsn);
785
786 /* __vr_offs */
787 int32 vrOffs = static_cast<int32>(
788 0UL - static_cast<int32>(static_cast<X64MemLayout *>(cgFunc->GetMemlayout())->GetSizeOfVRSaveArea()));
789 ImmOperand &vaListVROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, vrOffs);
790 RegOperand &vrOffsRegOpnd = SelectCopy2Reg(vaListVROffsOffset, PTY_a32);
791
792 MemOperand &vaListVROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize + 4, k64BitSize);
793 Insn &fillInVROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
794 fillInVROffsInsn.AddOpndChain(vrOffsRegOpnd).AddOpndChain(vaListVROffs);
795 cgFunc->GetCurBB()->AppendInsn(fillInVROffsInsn);
796 }
797
SelectOverFlowCall(const IntrinsiccallNode & intrnNode)798 void X64MPIsel::SelectOverFlowCall(const IntrinsiccallNode &intrnNode)
799 {
800 DEBUG_ASSERT(intrnNode.NumOpnds() == kOpndNum2, "must be 2 operands");
801 MIRIntrinsicID intrinsic = intrnNode.GetIntrinsic();
802 // add
803 PrimType type = intrnNode.Opnd(0)->GetPrimType();
804 CHECK_FATAL(intrnNode.Opnd(0)->GetPrimType() == intrnNode.Opnd(1)->GetPrimType(), "should be same");
805 RegOperand &opnd0 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(0)),
806 intrnNode.Opnd(0)->GetPrimType()); /* first argument of intrinsic */
807 RegOperand &opnd1 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(1)),
808 intrnNode.Opnd(1)->GetPrimType()); /* first argument of intrinsic */
809 RegOperand &resReg =
810 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), cgFunc->GetRegTyFromPrimTy(type));
811 if (intrinsic == INTRN_ADD_WITH_OVERFLOW) {
812 SelectAdd(resReg, opnd0, opnd1, type);
813 } else if (intrinsic == INTRN_SUB_WITH_OVERFLOW) {
814 SelectSub(resReg, opnd0, opnd1, type);
815 } else if (intrinsic == INTRN_MUL_WITH_OVERFLOW) {
816 SelectMpy(resReg, opnd0, opnd1, type);
817 } else {
818 CHECK_FATAL(false, "niy");
819 }
820
821 // store
822 auto *p2nrets = &intrnNode.GetReturnVec();
823 if (p2nrets->size() == k1ByteSize) {
824 StIdx stIdx = (*p2nrets)[0].first;
825 MIRSymbol *sym =
826 cgFunc->GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx());
827 MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*sym, 1);
828 MemOperand &memOperand2 = GetOrCreateMemOpndFromSymbol(*sym, 2);
829 SelectCopy(memOperand, resReg, type);
830 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_seto_m, X64CG::kMd[MOP_seto_m]);
831 insn.AddOpndChain(memOperand2);
832 cgFunc->GetCurBB()->AppendInsn(insn);
833 } else {
834 CHECK_FATAL(false, "should not happen");
835 }
836 return;
837 }
838
839 /* The second parameter in function va_start does not need to be concerned here,
840 * it is mainly used in proepilog */
SelectCVaStart(const IntrinsiccallNode & intrnNode)841 void X64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode)
842 {
843 DEBUG_ASSERT(intrnNode.NumOpnds() == kOpndNum2, "must be 2 operands");
844 /* 2 operands, but only 1 needed. Don't need to emit code for second operand
845 *
846 * va_list is a passed struct with an address, load its address
847 */
848 BaseNode *argExpr = intrnNode.Opnd(0);
849 Operand *opnd = HandleExpr(intrnNode, *argExpr);
850 RegOperand &opnd0 = SelectCopy2Reg(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */
851
852 /* Find beginning of unnamed arg on stack.
853 * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...)
854 * where struct S has size 32, address of r and s are on stack but they are named.
855 */
856 X64CallConvImpl parmLocator(cgFunc->GetBecommon());
857 CCLocInfo pLoc;
858 uint32 stkSize = 0;
859 for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); i++) {
860 MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(cgFunc->GetFunction().GetNthParamTyIdx(i));
861 parmLocator.LocateNextParm(*ty, pLoc);
862 if (pLoc.reg0 == kRinvalid) { /* on stack */
863 stkSize = static_cast<uint32_t>(pLoc.memOffset + pLoc.memSize);
864 }
865 }
866
867 stkSize = static_cast<uint32>(RoundUp(stkSize, GetPointerSize()));
868
869 GenCVaStartIntrin(opnd0, stkSize);
870
871 return;
872 }
873
SelectIntrinCall(IntrinsiccallNode & intrinsiccallNode)874 void X64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode)
875 {
876 MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic();
877
878 if (intrinsic == INTRN_C_va_start) {
879 SelectCVaStart(intrinsiccallNode);
880 return;
881 }
882 if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) {
883 return;
884 }
885 // JS
886 if (intrinsic == INTRN_ADD_WITH_OVERFLOW || intrinsic == INTRN_SUB_WITH_OVERFLOW ||
887 intrinsic == INTRN_MUL_WITH_OVERFLOW) {
888 SelectOverFlowCall(intrinsiccallNode);
889 return;
890 }
891
892 CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the X64 CG.", intrinsic, GetIntrinsicName(intrinsic));
893 }
894
SelectRangeGoto(RangeGotoNode & rangeGotoNode,Operand & srcOpnd)895 void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd)
896 {
897 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64);
898 std::vector<uint32> sizeArray;
899 const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable();
900 sizeArray.emplace_back(switchTable.size());
901 MemPool *memPool = cgFunc->GetMemoryPool();
902 MIRArrayType *arrayType = memPool->New<MIRArrayType>(etype->GetTypeIndex(), sizeArray);
903 MIRAggConst *arrayConst = memPool->New<MIRAggConst>(cgFunc->GetMirModule(), *arrayType);
904 for (const auto &itPair : switchTable) {
905 LabelIdx labelIdx = itPair.second;
906 cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx);
907 MIRConst *mirConst = memPool->New<MIRLblConst>(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype);
908 arrayConst->AddItem(mirConst, 0);
909 }
910 MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal);
911 lblSt->SetStorageClass(kScFstatic);
912 lblSt->SetSKind(kStConst);
913 lblSt->SetTyIdx(arrayType->GetTypeIndex());
914 lblSt->SetKonst(arrayConst);
915 std::string lblStr(".L_");
916 uint32 labelIdxTmp = cgFunc->GetLabelIdx();
917 lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++));
918 cgFunc->SetLabelIdx(labelIdxTmp);
919 lblSt->SetNameStrIdx(lblStr);
920 cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt);
921 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0);
922 /* get index */
923 PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType();
924 RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType);
925 int32 minIdx = switchTable[0].first;
926 ImmOperand &opnd1 =
927 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), -minIdx - rangeGotoNode.GetTagOffset());
928 RegOperand &indexOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt);
929 SelectAdd(indexOpnd, opnd0, opnd1, srcType);
930
931 /* load the displacement into a register by accessing memory at base + index * 8 */
932 /* mov .L_xxx_LOCAL_CONST.x(%baseReg, %indexOpnd, 8), %dstRegOpnd */
933 MemOperand &dstMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(PTY_a64));
934 RegOperand &baseReg = cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(PTY_i64), kRegTyInt);
935 dstMemOpnd.SetBaseRegister(baseReg);
936 dstMemOpnd.SetIndexRegister(indexOpnd);
937 dstMemOpnd.SetOffsetOperand(stOpnd);
938 dstMemOpnd.SetScaleOperand(cgFunc->GetOpndBuilder()->CreateImm(baseReg.GetSize(), k8ByteSize));
939
940 /* jumping to the absolute address which is stored in dstRegOpnd */
941 MOperator mOp = x64::MOP_jmpq_m;
942 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
943 jmpInsn.AddOpndChain(dstMemOpnd);
944 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
945 }
946
SelectAddrof(AddrofNode & expr,const BaseNode & parent)947 Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent)
948 {
949 /* get mirSymbol info*/
950 MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx());
951 /* <prim-type> of AddrofNode must be either ptr, a32 or a64 */
952 PrimType ptype = expr.GetPrimType();
953 RegOperand &resReg =
954 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(ptype), cgFunc->GetRegTyFromPrimTy(ptype));
955 MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID());
956 uint32 pSize = GetPrimTypeSize(ptype);
957 MOperator mOp;
958 if (pSize <= k4ByteSize) {
959 mOp = x64::MOP_leal_m_r;
960 } else if (pSize <= k8ByteSize) {
961 mOp = x64::MOP_leaq_m_r;
962 } else {
963 CHECK_FATAL(false, "NIY");
964 }
965 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
966 addrInsn.AddOpndChain(memOperand).AddOpndChain(resReg);
967 cgFunc->GetCurBB()->AppendInsn(addrInsn);
968 return &resReg;
969 }
970
SelectAddrofFunc(AddroffuncNode & expr,const BaseNode & parent)971 Operand *X64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent)
972 {
973 uint32 instrSize = static_cast<uint32>(expr.SizeOfInstr());
974 /* <prim-type> must be either a32 or a64. */
975 PrimType primType = (instrSize == k8ByteSize) ? PTY_a64 : (instrSize == k4ByteSize) ? PTY_a32 : PTY_begin;
976 CHECK_FATAL(primType != PTY_begin, "prim-type of Func Addr must be either a32 or a64!");
977 MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx());
978 MIRSymbol *symbol = mirFunction->GetFuncSymbol();
979 MIRStorageClass storageClass = symbol->GetStorageClass();
980 RegOperand &resReg =
981 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType));
982 if (storageClass == maple::kScText && symbol->GetSKind() == maple::kStFunc) {
983 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*symbol, 0, 0);
984 X64MOP_t mOp = x64::MOP_movabs_s_r;
985 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
986 addrInsn.AddOpndChain(stOpnd).AddOpndChain(resReg);
987 cgFunc->GetCurBB()->AppendInsn(addrInsn);
988 } else {
989 CHECK_FATAL(false, "NIY");
990 }
991 return &resReg;
992 }
993
SelectAddrofLabel(AddroflabelNode & expr,const BaseNode & parent)994 Operand *X64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent)
995 {
996 PrimType primType = expr.GetPrimType();
997 uint32 bitSize = GetPrimTypeBitSize(primType);
998 RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
999 RegOperand &baseOpnd =
1000 cgFunc->GetOpndBuilder()->CreatePReg(x64::RIP, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1001
1002 auto labelStr = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(expr.GetOffset());
1003 MIRSymbol *labelSym = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal);
1004 DEBUG_ASSERT(labelSym != nullptr, "null ptr check");
1005 labelSym->SetStorageClass(kScFstatic);
1006 labelSym->SetSKind(kStConst);
1007 labelSym->SetNameStrIdx(labelStr);
1008 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64);
1009 DEBUG_ASSERT(etype != nullptr, "null ptr check");
1010 auto *labelConst =
1011 cgFunc->GetMemoryPool()->New<MIRLblConst>(expr.GetOffset(), cgFunc->GetFunction().GetPuidx(), *etype);
1012 DEBUG_ASSERT(labelConst != nullptr, "null ptr check");
1013 labelSym->SetKonst(labelConst);
1014 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0);
1015
1016 MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(bitSize);
1017 memOpnd.SetBaseRegister(baseOpnd);
1018 memOpnd.SetOffsetOperand(stOpnd);
1019
1020 X64MOP_t mOp = x64::MOP_begin;
1021 if (bitSize <= k32BitSize) {
1022 mOp = x64::MOP_leal_m_r;
1023 } else if (bitSize <= k64BitSize) {
1024 mOp = x64::MOP_leaq_m_r;
1025 } else {
1026 CHECK_FATAL(false, "NIY");
1027 }
1028 Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]));
1029 addrInsn.AddOpndChain(memOpnd).AddOpndChain(resOpnd);
1030 cgFunc->GetCurBB()->AppendInsn(addrInsn);
1031 return &resOpnd;
1032 }
1033
1034 /*
1035 * unorded ZF, PF, CF ==> 1,1,1
1036 * above ZF, PF, CF ==> 0,0,0
1037 * below ZF, PF, CF ==> 0,0,1
1038 * equal ZF, PF, CF ==> 1,0,0
1039 *
1040 * To distinguish between less than(only check whether CF = 1 or not) and unorderd(CF=1),
1041 * So ** judging gt/ge by swaping operands is used to represent lt/le in float**
1042 */
PickJmpInsn(Opcode brOp,Opcode cmpOp,bool isFloat,bool isSigned)1043 static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned)
1044 {
1045 switch (cmpOp) {
1046 case OP_ne:
1047 return (brOp == OP_brtrue) ? MOP_jne_l : MOP_je_l;
1048 case OP_eq:
1049 return (brOp == OP_brtrue) ? MOP_je_l : MOP_jne_l;
1050 case OP_lt:
1051 return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jl_l : MOP_jb_l))
1052 : (isSigned ? MOP_jge_l : MOP_jae_l);
1053 case OP_le:
1054 return (brOp == OP_brtrue) ? (isFloat ? MOP_jae_l : (isSigned ? MOP_jle_l : MOP_jbe_l))
1055 : (isSigned ? MOP_jg_l : MOP_ja_l);
1056 case OP_gt:
1057 return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jg_l : MOP_ja_l))
1058 : (isSigned ? MOP_jle_l : MOP_jbe_l);
1059 case OP_ge:
1060 return (brOp == OP_brtrue) ? (isSigned ? MOP_jge_l : MOP_jae_l) : (isSigned ? MOP_jl_l : MOP_jb_l);
1061 default:
1062 CHECK_FATAL(false, "PickJmpInsn error");
1063 }
1064 }
1065
1066 /*
1067 * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node
1068 * such as a dread for example
1069 */
SelectCondGoto(CondGotoNode & stmt,BaseNode & condNode,Operand & opnd0)1070 void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0)
1071 {
1072 Opcode opcode = stmt.GetOpCode();
1073 X64MOP_t jmpOperator = x64::MOP_begin;
1074 if (opnd0.IsImmediate()) {
1075 DEBUG_ASSERT(opnd0.IsIntImmediate(), "only support int immediate");
1076 DEBUG_ASSERT(opcode == OP_brtrue || opcode == OP_brfalse, "unsupported opcode");
1077 ImmOperand &immOpnd0 = static_cast<ImmOperand &>(opnd0);
1078 if ((opcode == OP_brtrue && !(immOpnd0.GetValue() != 0)) ||
1079 (opcode == OP_brfalse && !(immOpnd0.GetValue() == 0))) {
1080 return;
1081 }
1082 jmpOperator = x64::MOP_jmpq_l;
1083 cgFunc->SetCurBBKind(BB::kBBGoto);
1084 } else {
1085 PrimType primType;
1086 Opcode condOpcode = condNode.GetOpCode();
1087 // op_ne
1088 if (!kOpcodeInfo.IsCompare(condOpcode)) {
1089 primType = condNode.GetPrimType();
1090 ImmOperand &imm0 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), 0);
1091 SelectCmp(opnd0, imm0, primType);
1092 condOpcode = OP_ne;
1093 } else {
1094 primType = static_cast<CompareNode &>(condNode).GetOpndType();
1095 }
1096 bool isFloat = IsPrimitiveFloat(primType);
1097 jmpOperator = PickJmpInsn(opcode, condOpcode, isFloat, IsSignedInteger(primType));
1098 cgFunc->SetCurBBKind(BB::kBBIf);
1099 }
1100 /* gen targetOpnd, .L.xxx__xx */
1101 auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset());
1102 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset());
1103 /* select jump Insn */
1104 Insn &jmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(jmpOperator, X64CG::kMd[jmpOperator]));
1105 jmpInsn.AddOpndChain(targetOpnd);
1106 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
1107 }
1108
SelectStrLiteral(ConststrNode & constStr)1109 Operand *X64MPIsel::SelectStrLiteral(ConststrNode &constStr)
1110 {
1111 std::string labelStr;
1112 labelStr.append(".LUstr_");
1113 labelStr.append(std::to_string(constStr.GetStrIdx()));
1114 MIRSymbol *labelSym =
1115 GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(labelStr));
1116 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64);
1117 auto *c = cgFunc->GetMemoryPool()->New<MIRStrConst>(constStr.GetStrIdx(), *etype);
1118 if (labelSym == nullptr) {
1119 labelSym = cgFunc->GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c->GetType());
1120 labelSym->SetStorageClass(kScFstatic);
1121 labelSym->SetSKind(kStConst);
1122 /* c may be local, we need a global node here */
1123 labelSym->SetKonst(cgFunc->NewMirConst(*c));
1124 }
1125 if (c->GetPrimType() == PTY_ptr) {
1126 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0);
1127 RegOperand &addrOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, cgFunc->GetRegTyFromPrimTy(PTY_a64));
1128 Insn &addrOfInsn = (cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_s_r, X64CG::kMd[x64::MOP_movabs_s_r]));
1129 addrOfInsn.AddOpndChain(stOpnd).AddOpndChain(addrOpnd);
1130 cgFunc->GetCurBB()->AppendInsn(addrOfInsn);
1131 return &addrOpnd;
1132 }
1133 CHECK_FATAL(false, "Unsupported const string type");
1134 return nullptr;
1135 }
1136
GetTargetRetOperand(PrimType primType,int32 sReg)1137 Operand &X64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg)
1138 {
1139 uint32 bitSize = GetPrimTypeBitSize(primType);
1140 regno_t retReg = 0;
1141 switch (sReg) {
1142 case kSregRetval0:
1143 retReg = IsPrimitiveFloat(primType) ? x64::V0 : x64::RAX;
1144 break;
1145 case kSregRetval1:
1146 retReg = x64::RDX;
1147 break;
1148 default:
1149 CHECK_FATAL(false, "GetTargetRetOperand: NIY");
1150 break;
1151 }
1152 RegOperand &parmRegOpnd =
1153 cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1154 return parmRegOpnd;
1155 }
1156
SelectMpy(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1157 Operand *X64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1158 {
1159 PrimType dtype = node.GetPrimType();
1160 RegOperand *resOpnd = nullptr;
1161 if (!IsPrimitiveVector(dtype)) {
1162 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1163 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType());
1164 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType());
1165 SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype);
1166 } else {
1167 /* vector operand */
1168 CHECK_FATAL(false, "NIY");
1169 }
1170
1171 return resOpnd;
1172 }
1173
SelectMpy(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1174 void X64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1175 {
1176 uint32 bitSize = GetPrimTypeBitSize(primType);
1177 SelectCopy(resOpnd, opnd0, primType);
1178 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType);
1179 if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) {
1180 X64MOP_t mOp = (bitSize == k64BitSize)
1181 ? x64::MOP_imulq_r_r
1182 : (bitSize == k32BitSize) ? x64::MOP_imull_r_r
1183 : (bitSize == k16BitSize) ? x64::MOP_imulw_r_r : x64::MOP_begin;
1184 CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping");
1185 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1186 insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd);
1187 cgFunc->GetCurBB()->AppendInsn(insn);
1188 } else if (IsPrimitiveFloat(primType)) {
1189 X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_mulfd_r_r :
1190 (bitSize == k32BitSize) ? x64::MOP_mulfs_r_r : x64::MOP_begin;
1191 CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping");
1192 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1193 insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd);
1194 cgFunc->GetCurBB()->AppendInsn(insn);
1195 }
1196 }
1197
1198 /*
1199 * Dividend(EDX:EAX) / Divisor(reg/mem32) = Quotient(EAX) Remainder(EDX)
1200 * IDIV instruction perform signed division of EDX:EAX by the contents of 32-bit register or memory location and
1201 * store the quotient in EAX and the remainder in EDX.
1202 * The instruction truncates non-integral results towards 0. The sign of the remainder is always the same as the sign
1203 * of the dividend, and the absolute value of the remainder is less than the absolute value of the divisor.
1204 * An overflow generates a #DE (divide error) exception, rather than setting the OF flag.
1205 * To avoid overflow problems, precede this instruction with a CDQ instruction to sign-extend the dividend Divisor.
1206 * CDQ Sign-extend EAX into EDX:EAX. This action helps avoid overflow problems in signed number arithmetic.
1207 */
SelectDiv(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1208 Operand *X64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1209 {
1210 PrimType primType = node.GetPrimType();
1211 Operand *resOpnd = nullptr;
1212 if (!IsPrimitiveVector(primType)) {
1213 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType());
1214 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType());
1215 resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode());
1216 } else {
1217 /* vector operand */
1218 CHECK_FATAL(false, "NIY");
1219 }
1220 return resOpnd;
1221 }
1222
SelectRem(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1223 Operand *X64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1224 {
1225 PrimType primType = node.GetPrimType();
1226 Operand *resOpnd = nullptr;
1227 if (!IsPrimitiveVector(primType)) {
1228 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType());
1229 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType());
1230 resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode());
1231 } else {
1232 /* vector operand */
1233 CHECK_FATAL(false, "NIY");
1234 }
1235 return resOpnd;
1236 }
1237
SelectDivRem(RegOperand & opnd0,RegOperand & opnd1,PrimType primType,Opcode opcode)1238 Operand *X64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode)
1239 {
1240 DEBUG_ASSERT(opcode == OP_div || opcode == OP_rem, "unsupported opcode");
1241 if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) {
1242 uint32 bitSize = GetPrimTypeBitSize(primType);
1243 /* copy dividend to eax */
1244 RegOperand &raxOpnd =
1245 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1246 SelectCopy(raxOpnd, opnd0, primType);
1247
1248 RegOperand &rdxOpnd =
1249 cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1250 bool isSigned = IsSignedInteger(primType);
1251 if (isSigned) {
1252 /* cdq edx:eax = sign-extend of eax*/
1253 X64MOP_t cvtMOp =
1254 (bitSize == k64BitSize)
1255 ? x64::MOP_cqo
1256 : (bitSize == k32BitSize) ? x64::MOP_cdq : (bitSize == k16BitSize) ? x64::MOP_cwd : x64::MOP_begin;
1257 CHECK_FATAL(cvtMOp != x64::MOP_begin, "NIY mapping");
1258 Insn &cvtInsn = cgFunc->GetInsnBuilder()->BuildInsn(cvtMOp, raxOpnd, rdxOpnd);
1259 cgFunc->GetCurBB()->AppendInsn(cvtInsn);
1260 } else {
1261 /* set edx = 0 */
1262 SelectCopy(rdxOpnd, cgFunc->GetOpndBuilder()->CreateImm(bitSize, 0), primType);
1263 }
1264 /* div */
1265 X64MOP_t divMOp =
1266 (bitSize == k64BitSize)
1267 ? (isSigned ? x64::MOP_idivq_r : x64::MOP_divq_r)
1268 : (bitSize == k32BitSize)
1269 ? (isSigned ? x64::MOP_idivl_r : x64::MOP_divl_r)
1270 : (bitSize == k16BitSize) ? (isSigned ? x64::MOP_idivw_r : x64::MOP_divw_r) : x64::MOP_begin;
1271 CHECK_FATAL(divMOp != x64::MOP_begin, "NIY mapping");
1272 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, raxOpnd, rdxOpnd);
1273 cgFunc->GetCurBB()->AppendInsn(insn);
1274 /* return */
1275 RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1276 SelectCopy(resOpnd, ((opcode == OP_div) ? raxOpnd : rdxOpnd), primType);
1277 return &resOpnd;
1278 } else if (IsPrimitiveFloat(primType)) {
1279 uint32 bitSize = GetPrimTypeBitSize(primType);
1280 auto &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
1281 SelectCopy(resOpnd, opnd0, primType);
1282 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_divsd_r, opnd1, resOpnd);
1283 cgFunc->GetCurBB()->AppendInsn(insn);
1284 return &resOpnd;
1285 } else {
1286 CHECK_FATAL(false, "NIY");
1287 }
1288 }
1289
SelectLnot(const UnaryNode & node,Operand & opnd0,const BaseNode & parent)1290 Operand *X64MPIsel::SelectLnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent)
1291 {
1292 PrimType dtype = node.GetPrimType();
1293 RegOperand *resOpnd = nullptr;
1294 if (!IsPrimitiveVector(dtype)) {
1295 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1296 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType());
1297 ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(dtype), 0);
1298 if (IsPrimitiveFloat(dtype)) {
1299 SelectCmpFloatEq(*resOpnd, regOpnd0, immOpnd, dtype, dtype);
1300 } else {
1301 SelectCmp(regOpnd0, immOpnd, dtype);
1302 SelectCmpResult(*resOpnd, OP_eq, dtype, dtype);
1303 }
1304 } else {
1305 /* vector operand */
1306 CHECK_FATAL(false, "NIY");
1307 }
1308 return resOpnd;
1309 }
1310
1311 /*
1312 * unorded ZF, PF, CF ==> 1,1,1
1313 * above ZF, PF, CF ==> 0,0,0
1314 * below ZF, PF, CF ==> 0,0,1
1315 * equal ZF, PF, CF ==> 1,0,0
1316 *
1317 * To distinguish between less than(only check whether CF = 1 or not) and unorderd(CF=1),
1318 * So ** lt/le in float is replaced by judging gt/ge and swaping operands **
1319 *
1320 * float eq using cmpeqsd, same with llvm
1321 */
SelectCmpOp(CompareNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)1322 Operand *X64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
1323 {
1324 PrimType dtype = node.GetPrimType();
1325 PrimType primOpndType = node.GetOpndType();
1326 RegOperand *resOpnd = nullptr;
1327 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primOpndType, node.Opnd(0)->GetPrimType());
1328 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primOpndType, node.Opnd(1)->GetPrimType());
1329 if (!IsPrimitiveVector(node.GetPrimType())) {
1330 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1331 auto nodeOp = node.GetOpCode();
1332 Opcode parentOp = parent.GetOpCode();
1333 bool isFloat = IsPrimitiveFloat(primOpndType);
1334 bool isJump = (parentOp == OP_brfalse || parentOp == OP_brtrue || parentOp == OP_select);
1335 // float eq
1336 if (isFloat && (nodeOp == maple::OP_eq) && (!isJump)) {
1337 SelectCmpFloatEq(*resOpnd, regOpnd0, regOpnd1, dtype, primOpndType);
1338 return resOpnd;
1339 }
1340
1341 bool isSwap = (isFloat && (nodeOp == maple::OP_le || nodeOp == maple::OP_lt) && (parentOp != OP_brfalse));
1342 SelectCmp(regOpnd0, regOpnd1, primOpndType, isSwap);
1343 if (isJump) {
1344 return resOpnd;
1345 }
1346 SelectCmpResult(*resOpnd, nodeOp, dtype, primOpndType);
1347 } else {
1348 /* vector operand */
1349 CHECK_FATAL(false, "NIY");
1350 }
1351 return resOpnd;
1352 }
1353
SelectCmp(Operand & opnd0,Operand & opnd1,PrimType primType,bool isSwap)1354 void X64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType, bool isSwap)
1355 {
1356 x64::X64MOP_t cmpMOp = x64::MOP_begin;
1357 if (IsPrimitiveInteger(primType)) {
1358 cmpMOp = GetCmpMop(opnd0.GetKind(), opnd1.GetKind(), primType);
1359 } else if (IsPrimitiveFloat(primType)) {
1360 cmpMOp = x64::MOP_ucomisd_r_r;
1361 } else {
1362 CHECK_FATAL(false, "NIY");
1363 }
1364 DEBUG_ASSERT(cmpMOp != x64::MOP_begin, "unsupported mOp");
1365 Insn &cmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(cmpMOp, X64CG::kMd[cmpMOp]));
1366 if (isSwap) {
1367 cmpInsn.AddOpndChain(opnd0).AddOpndChain(opnd1);
1368 } else {
1369 cmpInsn.AddOpndChain(opnd1).AddOpndChain(opnd0);
1370 }
1371 cgFunc->GetCurBB()->AppendInsn(cmpInsn);
1372 }
1373
SelectCmpFloatEq(RegOperand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primResType,PrimType primOpndType)1374 void X64MPIsel::SelectCmpFloatEq(RegOperand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primResType,
1375 PrimType primOpndType)
1376 {
1377 /* float eq using cmpeqsd is same with llvm */
1378 x64::X64MOP_t eqMOp = x64::MOP_cmpeqsd_r_r;
1379 Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(eqMOp, X64CG::kMd[eqMOp]);
1380
1381 auto ®Opnd1 = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primOpndType),
1382 cgFunc->GetRegTyFromPrimTy(primOpndType));
1383 SelectCopy(regOpnd1, opnd1, primOpndType);
1384 /* CMPEQSD xmm1, xmm2 => CMPSD xmm1, xmm2, 0 */
1385 setInsn.AddOpndChain(opnd0).AddOpndChain(regOpnd1);
1386 cgFunc->GetCurBB()->AppendInsn(setInsn);
1387
1388 /* set result -> u64/u32 */
1389 auto tmpResType = (primOpndType == maple::PTY_f64) ? PTY_u64 :PTY_u32;
1390 RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(tmpResType),
1391 cgFunc->GetRegTyFromPrimTy(tmpResType));
1392 SelectRetypeFloat(tmpResOpnd, regOpnd1, tmpResType, primOpndType);
1393 /* cvt u64/u32 -> primType */
1394 SelectIntCvt(resOpnd, tmpResOpnd, primResType, tmpResType);
1395 }
1396
SelectCmpResult(RegOperand & resOpnd,Opcode opCode,PrimType primType,PrimType primOpndType)1397 void X64MPIsel::SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType)
1398 {
1399 bool isFloat = IsPrimitiveFloat(primOpndType);
1400 bool isSigned = (!IsPrimitiveUnsigned(primOpndType) && !IsPrimitiveFloat(primOpndType));
1401 /* set result -> u8 */
1402 RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k8BitSize, cgFunc->GetRegTyFromPrimTy(PTY_u8));
1403 x64::X64MOP_t setMOp = GetSetCCMop(opCode, tmpResOpnd.GetKind(), isSigned, isFloat);
1404 DEBUG_ASSERT(setMOp != x64::MOP_begin, "unsupported mOp");
1405 Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(setMOp, X64CG::kMd[setMOp]);
1406 setInsn.AddOpndChain(tmpResOpnd);
1407 cgFunc->GetCurBB()->AppendInsn(setInsn);
1408 /* cvt u8 -> primType */
1409 SelectIntCvt(resOpnd, tmpResOpnd, primType, PTY_u8);
1410 }
1411
SelectSelect(TernaryNode & expr,Operand & cond,Operand & trueOpnd,Operand & falseOpnd,const BaseNode & parent)1412 Operand *X64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd,
1413 const BaseNode &parent)
1414 {
1415 PrimType dtype = expr.GetPrimType();
1416 RegOperand &resOpnd =
1417 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
1418 RegOperand &trueRegOpnd = SelectCopy2Reg(trueOpnd, dtype, expr.Opnd(1)->GetPrimType());
1419 RegOperand &falseRegOpnd = SelectCopy2Reg(falseOpnd, dtype, expr.Opnd(2)->GetPrimType());
1420 Opcode cmpOpcode;
1421 PrimType cmpPrimType;
1422 if (kOpcodeInfo.IsCompare(expr.Opnd(0)->GetOpCode())) {
1423 CompareNode *cmpNode = static_cast<CompareNode *>(expr.Opnd(0));
1424 DEBUG_ASSERT(cmpNode != nullptr, "null ptr check");
1425 cmpOpcode = cmpNode->GetOpCode();
1426 cmpPrimType = cmpNode->GetOpndType();
1427 } else {
1428 cmpPrimType = expr.Opnd(0)->GetPrimType();
1429 cmpOpcode = OP_ne;
1430 ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(cmpPrimType), 0);
1431 SelectCmp(cond, immOpnd, cmpPrimType);
1432 }
1433 SelectSelect(resOpnd, trueRegOpnd, falseRegOpnd, dtype, cmpOpcode, cmpPrimType);
1434 return &resOpnd;
1435 }
1436
SelectSelect(Operand & resOpnd,Operand & trueOpnd,Operand & falseOpnd,PrimType primType,Opcode cmpOpcode,PrimType cmpPrimType)1437 void X64MPIsel::SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType,
1438 Opcode cmpOpcode, PrimType cmpPrimType)
1439 {
1440 CHECK_FATAL(!IsPrimitiveFloat(primType), "NIY");
1441 bool isSigned = !IsPrimitiveUnsigned(primType);
1442 uint32 bitSize = GetPrimTypeBitSize(primType);
1443 if (bitSize == k8BitSize) {
1444 /* cmov unsupported 8bit, cvt to 32bit */
1445 PrimType cvtType = isSigned ? PTY_i32 : PTY_u32;
1446 RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k32BitSize, kRegTyInt);
1447 Operand &tmpTrueOpnd = SelectCopy2Reg(trueOpnd, cvtType, primType);
1448 Operand &tmpFalseOpnd = SelectCopy2Reg(falseOpnd, cvtType, primType);
1449 SelectSelect(tmpResOpnd, tmpTrueOpnd, tmpFalseOpnd, cvtType, cmpOpcode, cmpPrimType);
1450 SelectCopy(resOpnd, tmpResOpnd, primType, cvtType);
1451 return;
1452 }
1453 RegOperand &tmpOpnd = SelectCopy2Reg(trueOpnd, primType);
1454 SelectCopy(resOpnd, falseOpnd, primType);
1455 x64::X64MOP_t cmovMop = GetCMovCCMop(cmpOpcode, bitSize, !IsPrimitiveUnsigned(cmpPrimType));
1456 DEBUG_ASSERT(cmovMop != x64::MOP_begin, "unsupported mOp");
1457 Insn &comvInsn = cgFunc->GetInsnBuilder()->BuildInsn(cmovMop, X64CG::kMd[cmovMop]);
1458 comvInsn.AddOpndChain(tmpOpnd).AddOpndChain(resOpnd);
1459 cgFunc->GetCurBB()->AppendInsn(comvInsn);
1460 }
1461
SelectMinOrMax(bool isMin,Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)1462 void X64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
1463 {
1464 if (IsPrimitiveInteger(primType)) {
1465 SelectCmp(opnd0, opnd1, primType);
1466 Opcode cmpOpcode = isMin ? OP_lt : OP_gt;
1467 SelectSelect(resOpnd, opnd0, opnd1, primType, cmpOpcode, primType);
1468 } else {
1469 // float lt/le need to swap operands, and using seta
1470 CHECK_FATAL(false, "NIY type max or min");
1471 }
1472 }
1473
SelectCexp(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1474 Operand *X64MPIsel::SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1475 {
1476 PrimType primType = node.GetPrimType();
1477 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType);
1478 Operand &retReg =
1479 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType));
1480 std::vector<Operand*> opndVec = {®Opnd0};
1481 SelectLibCall("exp", opndVec, primType, &retReg, primType);
1482 return &retReg;
1483 }
1484
SelectCctz(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1485 Operand *X64MPIsel::SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1486 {
1487 CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!");
1488 PrimType origPrimType = node.Opnd(0)->GetPrimType();
1489 RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType);
1490
1491 bool is64BitCtz = node.GetIntrinsic() == INTRN_C_ctz64;
1492 MOperator mopBsf = is64BitCtz ? x64::MOP_bsfq_r_r : x64::MOP_bsfl_r_r;
1493 Insn &bsfInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsf, X64CG::kMd[mopBsf]);
1494 bsfInsn.AddOpndChain(opnd).AddOpndChain(opnd);
1495 cgFunc->GetCurBB()->AppendInsn(bsfInsn);
1496
1497 PrimType retType = node.GetPrimType();
1498 RegOperand &destReg =
1499 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), cgFunc->GetRegTyFromPrimTy(retType));
1500 // ctz i32 (u32) => cvt u32 -> i32
1501 // ctz i32 (u64) => cvt u64 -> i32
1502 SelectIntCvt(destReg, opnd, retType, origPrimType);
1503 return &destReg;
1504 }
1505
SelectCclz(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1506 Operand *X64MPIsel::SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1507 {
1508 CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!");
1509 PrimType origPrimType = node.Opnd(0)->GetPrimType();
1510 RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType);
1511
1512 bool is64BitClz = node.GetIntrinsic() == INTRN_C_clz64;
1513 MOperator mopBsr = is64BitClz ? x64::MOP_bsrq_r_r : x64::MOP_bsrl_r_r;
1514 Insn &bsrInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsr, X64CG::kMd[mopBsr]);
1515 bsrInsn.AddOpndChain(opnd).AddOpndChain(opnd);
1516 cgFunc->GetCurBB()->AppendInsn(bsrInsn);
1517
1518 MOperator mopXor = is64BitClz ? x64::MOP_xorq_i_r : MOP_xorl_i_r;
1519 ImmOperand &imm =
1520 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(origPrimType), GetPrimTypeBitSize(origPrimType) - 1);
1521 Insn &xorInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopXor, X64CG::kMd[mopXor]);
1522 xorInsn.AddOpndChain(imm).AddOpndChain(opnd);
1523 cgFunc->GetCurBB()->AppendInsn(xorInsn);
1524
1525 PrimType retType = node.GetPrimType();
1526 RegOperand &destReg =
1527 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), cgFunc->GetRegTyFromPrimTy(retType));
1528 SelectIntCvt(destReg, opnd, retType, origPrimType);
1529 return &destReg;
1530 }
1531
SelectBswap(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)1532 Operand *X64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
1533 {
1534 PrimType dtype = node.GetPrimType();
1535 auto bitWidth = GetPrimTypeBitSize(dtype);
1536 // bswap only support 32/64-bit, xchg support 16-bit -- xchg al, ah
1537 CHECK_FATAL(bitWidth == k16BitSize || bitWidth == k32BitSize || bitWidth == k64BitSize,
1538 "NIY, unsupported bitWidth.");
1539
1540 RegOperand *resOpnd = nullptr;
1541
1542 if (bitWidth == k16BitSize) {
1543 /*
1544 * For 16-bit, use xchg, such as: xchg ah, al. So, the register must support high 8-bit.
1545 * For x64, we can use RAX(AH:AL), RBX(BH:BL), RCX(CH:CL), RDX(DH:DL).
1546 * The RA does not perform special processing for the high 8-bit case.
1547 * So, we use the RAX regiser in here.
1548 */
1549 resOpnd = &cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitWidth, cgFunc->GetRegTyFromPrimTy(dtype));
1550 SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType());
1551 RegOperand &lowerOpnd =
1552 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, cgFunc->GetRegTyFromPrimTy(dtype));
1553 RegOperand &highOpnd =
1554 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, cgFunc->GetRegTyFromPrimTy(dtype));
1555 highOpnd.SetHigh8Bit();
1556 x64::X64MOP_t xchgMop = MOP_xchgb_r_r;
1557 Insn &xchgInsn = cgFunc->GetInsnBuilder()->BuildInsn(xchgMop, X64CG::kMd[xchgMop]);
1558 xchgInsn.AddOpndChain(highOpnd).AddOpndChain(lowerOpnd);
1559 cgFunc->GetCurBB()->AppendInsn(xchgInsn);
1560 } else {
1561 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(bitWidth, cgFunc->GetRegTyFromPrimTy(dtype));
1562 SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType());
1563 x64::X64MOP_t bswapMop = (bitWidth == k64BitSize) ? MOP_bswapq_r : MOP_bswapl_r;
1564 Insn &bswapInsn = cgFunc->GetInsnBuilder()->BuildInsn(bswapMop, X64CG::kMd[bswapMop]);
1565 bswapInsn.AddOperand(*resOpnd);
1566 cgFunc->GetCurBB()->AppendInsn(bswapInsn);
1567 }
1568 return resOpnd;
1569 }
1570
GetTargetStackPointer(PrimType primType)1571 RegOperand &X64MPIsel::GetTargetStackPointer(PrimType primType)
1572 {
1573 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, GetPrimTypeBitSize(primType),
1574 cgFunc->GetRegTyFromPrimTy(primType));
1575 }
1576
GetTargetBasicPointer(PrimType primType)1577 RegOperand &X64MPIsel::GetTargetBasicPointer(PrimType primType)
1578 {
1579 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(primType),
1580 cgFunc->GetRegTyFromPrimTy(primType));
1581 }
1582
SelectRetypeFloat(RegOperand & resOpnd,Operand & opnd0,PrimType toType,PrimType fromType)1583 void X64MPIsel::SelectRetypeFloat(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType)
1584 {
1585 uint32 fromSize = GetPrimTypeBitSize(fromType);
1586 [[maybe_unused]] uint32 toSize = GetPrimTypeBitSize(toType);
1587 DEBUG_ASSERT(fromSize == toSize, "retype bit widith doesn' match");
1588 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType);
1589 MOperator mOp = x64::MOP_begin;
1590 if (fromSize == k32BitSize) {
1591 mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movd_fr_r : x64::MOP_begin;
1592 } else if (fromSize == k64BitSize) {
1593 mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movq_fr_r : x64::MOP_movq_r_fr;
1594 } else {
1595 CHECK_FATAL(false, "niy");
1596 }
1597 CHECK_FATAL(mOp != x64::MOP_begin, "NIY");
1598 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1599 (void)insn.AddOpndChain(regOpnd0).AddOpndChain(resOpnd);
1600 cgFunc->GetCurBB()->AppendInsn(insn);
1601 return;
1602 }
1603
SelectSqrt(UnaryNode & node,Operand & src,const BaseNode & parent)1604 Operand *X64MPIsel::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent)
1605 {
1606 PrimType dtype = node.GetPrimType();
1607 if (!IsPrimitiveFloat(dtype)) {
1608 DEBUG_ASSERT(false, "should be float type");
1609 return nullptr;
1610 }
1611 auto bitSize = GetPrimTypeBitSize(dtype);
1612 MOperator mOp = x64::MOP_begin;
1613 if (bitSize == k64BitSize) {
1614 mOp = MOP_sqrtd_r_r;
1615 } else if (bitSize == k32BitSize) {
1616 mOp = MOP_sqrts_r_r;
1617 } else {
1618 CHECK_FATAL(false, "niy");
1619 }
1620 RegOperand ®Opnd0 = SelectCopy2Reg(src, dtype);
1621 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
1622 Operand &retReg = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(dtype));
1623
1624 (void)insn.AddOpndChain(regOpnd0).AddOpndChain(retReg);
1625 cgFunc->GetCurBB()->AppendInsn(insn);
1626 return &retReg;
1627 }
SelectAsm(AsmNode & node)1628 void X64MPIsel::SelectAsm(AsmNode &node)
1629 {
1630 cgFunc->SetHasAsm();
1631 CHECK_FATAL(false, "NIY");
1632 }
1633 } // namespace maplebe
1634