1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "x64_MPISel.h"
17 #include "x64_isa_tbl.h"
18 #include "x64_cg.h"
19
20 namespace maplebe {
21 /* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */
GetOrCreateMemOpndFromSymbol(const MIRSymbol & symbol,FieldID fieldId) const22 MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const
23 {
24 PrimType symType;
25 int32 fieldOffset = 0;
26 CHECK_FATAL(fieldId == 0, "fieldId must be 0");
27 symType = symbol.GetType()->GetPrimType();
28 uint32 opndSz = GetPrimTypeBitSize(symType);
29 return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset);
30 }
GetOrCreateMemOpndFromSymbol(const MIRSymbol & symbol,uint32 opndSize,int64 offset) const31 MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const
32 {
33 MIRStorageClass storageClass = symbol.GetStorageClass();
34 MemOperand *result = nullptr;
35 RegOperand *stackBaseReg = nullptr;
36 if ((storageClass == kScAuto) || (storageClass == kScFormal)) {
37 auto *symloc = static_cast<X64SymbolAlloc *>(cgFunc->GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex()));
38 DEBUG_ASSERT(symloc != nullptr, "sym loc should have been defined");
39 stackBaseReg = static_cast<X64CGFunc *>(cgFunc)->GetBaseReg(*symloc);
40 int stOfst = cgFunc->GetBaseOffset(*symloc);
41 /* Create field symbols in aggregate structure */
42 result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize);
43 result->SetBaseRegister(*stackBaseReg);
44 result->SetOffsetOperand(GetCurFunc()->GetOpndBuilder()->CreateImm(k64BitSize, stOfst + offset));
45 CHECK_FATAL(result != nullptr, "NIY");
46 return *result;
47 }
48 if ((storageClass == kScGlobal) || (storageClass == kScExtern) || (storageClass == kScPstatic) ||
49 (storageClass == kScFstatic)) {
50 stackBaseReg = &GetCurFunc()->GetOpndBuilder()->CreatePReg(x64::RIP, k64BitSize, kRegTyInt);
51 result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize);
52 ImmOperand &stOfstOpnd = GetCurFunc()->GetOpndBuilder()->CreateImm(symbol, offset, 0);
53 result->SetBaseRegister(*stackBaseReg);
54 result->SetOffsetOperand(stOfstOpnd);
55 CHECK_FATAL(result != nullptr, "NIY");
56 return *result;
57 }
58 CHECK_FATAL(false, "NIY");
59 return *result;
60 }
61
SelectReturn(NaryStmtNode & retNode,Operand & opnd)62 void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd)
63 {
64 MIRType *retType = cgFunc->GetFunction().GetReturnType();
65 X64CallConvImpl retLocator(cgFunc->GetBecommon());
66 CCLocInfo retMech;
67 retLocator.LocateRetVal(*retType, retMech);
68 if (retMech.GetRegCount() == 0) {
69 return;
70 }
71 std::vector<RegOperand *> retRegs;
72 PrimType oriPrimType = retMech.GetPrimTypeOfReg0();
73 regno_t retReg = retMech.GetReg0();
74 DEBUG_ASSERT(retReg != kRinvalid, "NIY");
75 RegOperand &retOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, GetPrimTypeBitSize(oriPrimType),
76 cgFunc->GetRegTyFromPrimTy(oriPrimType));
77 retRegs.push_back(&retOpnd);
78 SelectCopy(retOpnd, opnd, oriPrimType, retNode.Opnd(0)->GetPrimType());
79 /* for optimization ,insert pseudo ret ,in case rax,rdx is removed*/
80 SelectPseduoForReturn(retRegs);
81 }
82
SelectPseduoForReturn(std::vector<RegOperand * > & retRegs)83 void X64MPIsel::SelectPseduoForReturn(std::vector<RegOperand *> &retRegs)
84 {
85 for (auto retReg : retRegs) {
86 MOperator mop = x64::MOP_pseudo_ret_int;
87 Insn &pInsn = cgFunc->GetInsnBuilder()->BuildInsn(mop, X64CG::kMd[mop]);
88 cgFunc->GetCurBB()->AppendInsn(pInsn);
89 pInsn.AddOpndChain(*retReg);
90 }
91 }
92
SelectReturn()93 void X64MPIsel::SelectReturn()
94 {
95 /* jump to epilogue */
96 MOperator mOp = x64::MOP_jmpq_l;
97 LabelNode *endLabel = cgFunc->GetEndLabel();
98 auto endLabelName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(endLabel->GetLabelIdx());
99 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(endLabelName.c_str(), endLabel->GetLabelIdx());
100 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
101 jmpInsn.AddOpndChain(targetOpnd);
102 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
103 cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB());
104 }
105
106 /*
107 * SelectParmList generates an instrunction for each of the parameters
108 * to load the parameter value into the corresponding register.
109 * We return a list of registers to the call instruction because
110 * they may be needed in the register allocation phase.
111 * fp Num is a return value which is the number of vector
112 * registers used;
113 */
SelectParmList(StmtNode & naryNode,ListOperand & srcOpnds,uint32 & fpNum)114 void X64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, uint32 &fpNum)
115 {
116 paramPassByReg.clear();
117 fpNum = 0;
118 /* for IcallNode, the 0th operand is the function pointer */
119 size_t argBegin = 0;
120 if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) {
121 ++argBegin;
122 }
123
124 MIRFunction *callee = nullptr;
125 if (naryNode.GetOpCode() == OP_call) {
126 PUIdx calleePuIdx = static_cast<CallNode &>(naryNode).GetPUIdx();
127 callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx);
128 }
129 X64CallConvImpl parmLocator(cgFunc->GetBecommon(), X64CallConvImpl::GetCallConvKind(naryNode));
130 CCLocInfo ploc;
131 for (size_t i = argBegin; i < naryNode.NumOpnds(); ++i) {
132 BaseNode *argExpr = naryNode.Opnd(i);
133 DEBUG_ASSERT(argExpr != nullptr, "not null check");
134 PrimType primType = argExpr->GetPrimType();
135 DEBUG_ASSERT(primType != PTY_void, "primType should not be void");
136 bool isArgUnused = (callee != nullptr && callee->GetFuncDesc().IsArgUnused(i));
137
138 Operand *argOpnd = HandleExpr(naryNode, *argExpr);
139 DEBUG_ASSERT(argOpnd != nullptr, "not null check");
140 MIRType *mirType = GlobalTables::GetTypeTable().GetTypeTable()[static_cast<uint32>(primType)];
141 parmLocator.LocateNextParm(*mirType, ploc);
142
143 /* skip unused args */
144 if (isArgUnused) {
145 continue;
146 }
147
148 if (ploc.reg0 != x64::kRinvalid) {
149 /* load to the register. */
150 RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, GetPrimTypeBitSize(primType),
151 cgFunc->GetRegTyFromPrimTy(primType));
152 paramPassByReg.push_back({&parmRegOpnd, argOpnd, primType});
153 if (x64::IsFPSIMDRegister(static_cast<X64reg>(ploc.reg0))) {
154 fpNum++;
155 }
156 } else {
157 /* load to stack memory */
158 RegOperand &baseOpnd =
159 cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
160 MemOperand &actMemOpnd =
161 cgFunc->GetOpndBuilder()->CreateMem(baseOpnd, ploc.memOffset, GetPrimTypeBitSize(primType));
162 SelectCopy(actMemOpnd, *argOpnd, primType);
163 }
164 DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NIY");
165 }
166
167 /* param pass by reg */
168 for (auto [regOpnd, argOpnd, primType] : paramPassByReg) {
169 DEBUG_ASSERT(regOpnd != nullptr, "not null check");
170 DEBUG_ASSERT(argOpnd != nullptr, "not null check");
171 SelectCopy(*regOpnd, *argOpnd, primType);
172 srcOpnds.PushOpnd(*regOpnd);
173 }
174 }
175
SelectSpecialRegread(PregIdx pregIdx,PrimType primType)176 RegOperand &X64MPIsel::SelectSpecialRegread(PregIdx pregIdx, PrimType primType)
177 {
178 switch (-pregIdx) {
179 case kSregFp: {
180 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RFP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
181 }
182 case kSregSp: {
183 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType));
184 }
185 default: {
186 CHECK_FATAL(false, "ERROR: Not supported special register!");
187 }
188 }
189 }
190
SelectFloatingConst(MIRConst & floatingConst,PrimType primType) const191 Operand *X64MPIsel::SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const
192 {
193 CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const");
194 uint32 labelIdxTmp = cgFunc->GetLabelIdx();
195 Operand *result = nullptr;
196 if (primType == PTY_f64) {
197 result = SelectLiteral(static_cast<MIRDoubleConst &>(floatingConst), cgFunc->GetFunction(), labelIdxTmp++);
198 } else {
199 result = SelectLiteral(static_cast<MIRFloatConst &>(floatingConst), cgFunc->GetFunction(), labelIdxTmp++);
200 }
201 cgFunc->SetLabelIdx(labelIdxTmp);
202 return result;
203 }
204
AppendCall(x64::X64MOP_t mOp,Operand & targetOpnd,ListOperand & paramOpnds,ListOperand & retOpnds)205 Insn &X64MPIsel::AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds)
206 {
207 Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
208 callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds);
209 cgFunc->GetCurBB()->AppendInsn(callInsn);
210 cgFunc->GetCurBB()->SetHasCall();
211 cgFunc->GetFunction().SetHasCall();
212 return callInsn;
213 }
214
SelectCalleeReturn(MIRType * retType,ListOperand & retOpnds)215 void X64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds)
216 {
217 if (retType == nullptr) {
218 return;
219 }
220 auto retSize = retType->GetSize() * kBitsPerByte;
221 if (retSize <= k128BitSize) {
222 if (retSize > k0BitSize) {
223 retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt));
224 }
225 if (retSize > k64BitSize) {
226 retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, k64BitSize, kRegTyInt));
227 }
228 }
229 }
230
SelectCall(CallNode & callNode)231 void X64MPIsel::SelectCall(CallNode &callNode)
232 {
233 MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx());
234 MIRSymbol *fsym = GlobalTables::GetGsymTable().GetSymbolFromStidx(fn->GetStIdx().Idx(), false);
235 Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*fsym);
236
237 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
238 uint32 fpNum = 0;
239 SelectParmList(callNode, paramOpnds, fpNum);
240 /* x64abi: rax = with variable arguments passes information about the number of vector registers used */
241 if (fn->IsVarargs()) {
242 ImmOperand &fpNumImm = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, fpNum);
243 RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt);
244 SelectCopy(raxOpnd, fpNumImm, PTY_i64);
245 }
246
247 MIRType *retType = fn->GetReturnType();
248 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
249 SelectCalleeReturn(retType, retOpnds);
250
251 Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds);
252 if (retType != nullptr) {
253 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
254 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
255 }
256 const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo();
257 for (const auto &elem : deoptBundleInfo) {
258 auto valueKind = elem.second.GetMapleValueKind();
259 if (valueKind == MapleValue::kPregKind) {
260 auto *opnd = cgFunc->GetOrCreateRegOpndFromPregIdx(elem.second.GetPregIdx(), PTY_ref);
261 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
262 } else if (valueKind == MapleValue::kConstKind) {
263 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), PTY_i32);
264 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
265 } else {
266 CHECK_FATAL(false, "not supported currently");
267 }
268 }
269 cgFunc->AppendStackMapInsn(callInsn);
270 }
271
SelectIcall(IcallNode & iCallNode)272 void X64MPIsel::SelectIcall(IcallNode &iCallNode)
273 {
274 Operand *opnd0 = HandleExpr(iCallNode, *iCallNode.GetNopndAt(0));
275 RegOperand &targetOpnd = SelectCopy2Reg(*opnd0, iCallNode.Opnd(0)->GetPrimType());
276 ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList();
277 uint32 fpNum = 0;
278 SelectParmList(iCallNode, paramOpnds, fpNum);
279
280 MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode.GetRetTyIdx());
281 if (iCallNode.GetOpCode() == OP_icallproto) {
282 CHECK_FATAL((retType->GetKind() == kTypeFunction), "NIY, must be func");
283 auto calleeType = static_cast<MIRFuncType *>(retType);
284 retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleeType->GetRetTyIdx());
285 }
286 ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList();
287 SelectCalleeReturn(retType, retOpnds);
288
289 Insn &callInsn = AppendCall(x64::MOP_callq_r, targetOpnd, paramOpnds, retOpnds);
290 if (retType != nullptr) {
291 callInsn.SetRetSize(static_cast<uint32>(retType->GetSize()));
292 callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType()));
293 }
294 const auto &deoptBundleInfo = iCallNode.GetDeoptBundleInfo();
295 for (const auto &elem : deoptBundleInfo) {
296 auto valueKind = elem.second.GetMapleValueKind();
297 if (valueKind == MapleValue::kPregKind) {
298 auto *opnd = cgFunc->GetOrCreateRegOpndFromPregIdx(elem.second.GetPregIdx(), PTY_ref);
299 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
300 } else if (valueKind == MapleValue::kConstKind) {
301 auto *opnd = SelectIntConst(static_cast<const MIRIntConst &>(elem.second.GetConstValue()), PTY_i32);
302 callInsn.AddDeoptBundleInfo(elem.first, *opnd);
303 } else {
304 CHECK_FATAL(false, "not supported currently");
305 }
306 }
307 cgFunc->AppendStackMapInsn(callInsn);
308 }
309
ProcessReturnReg(PrimType primType,int32 sReg)310 Operand &X64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg)
311 {
312 return GetTargetRetOperand(primType, sReg);
313 }
314
SelectGoto(GotoNode & stmt)315 void X64MPIsel::SelectGoto(GotoNode &stmt)
316 {
317 MOperator mOp = x64::MOP_jmpq_l;
318 auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset());
319 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset());
320 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
321 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
322 jmpInsn.AddOpndChain(targetOpnd);
323 cgFunc->GetCurBB()->SetKind(BB::kBBGoto);
324 return;
325 }
326
SelectOverFlowCall(const IntrinsiccallNode & intrnNode)327 void X64MPIsel::SelectOverFlowCall(const IntrinsiccallNode &intrnNode)
328 {
329 DEBUG_ASSERT(intrnNode.NumOpnds() == kOpndNum2, "must be 2 operands");
330 MIRIntrinsicID intrinsic = intrnNode.GetIntrinsic();
331 // add
332 PrimType type = intrnNode.Opnd(0)->GetPrimType();
333 PrimType type2 = intrnNode.Opnd(1)->GetPrimType();
334 CHECK_FATAL(type == PTY_i32 || type == PTY_u32, "only support i32 or u32 here");
335 CHECK_FATAL(type2 == PTY_i32 || type2 == PTY_u32, "only support i32 or u32 here");
336 RegOperand &opnd0 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(0)),
337 intrnNode.Opnd(0)->GetPrimType()); /* first argument of intrinsic */
338 RegOperand &opnd1 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(1)),
339 intrnNode.Opnd(1)->GetPrimType()); /* first argument of intrinsic */
340 RegOperand &resReg =
341 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), cgFunc->GetRegTyFromPrimTy(type));
342 if (intrinsic == INTRN_ADD_WITH_OVERFLOW) {
343 SelectAdd(resReg, opnd0, opnd1, type);
344 } else if (intrinsic == INTRN_SUB_WITH_OVERFLOW) {
345 SelectSub(resReg, opnd0, opnd1, type);
346 } else if (intrinsic == INTRN_MUL_WITH_OVERFLOW) {
347 SelectMpy(resReg, opnd0, opnd1, type);
348 } else {
349 CHECK_FATAL(false, "niy");
350 }
351
352 // store
353 auto *p2nrets = &intrnNode.GetReturnVec();
354 if (p2nrets->size() == k2ByteSize) {
355 CHECK_NULL_FATAL(cgFunc->GetBecommon().GetMIRModule().CurFunction());
356 PregIdx pregIdx = (*p2nrets)[0].second.GetPregIdx();
357 MIRPreg *mirPreg = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx);
358 PrimType regType = mirPreg->GetPrimType();
359 RegOperand &retReg = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx),
360 GetPrimTypeBitSize(regType), cgFunc->GetRegTyFromPrimTy(regType));
361 SelectCopy(retReg, resReg, type);
362 PregIdx pregIdx2 = (*p2nrets)[1].second.GetPregIdx();
363 MIRPreg *mirPreg2 = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx2);
364 PrimType regType2 = mirPreg2->GetPrimType();
365 RegOperand &retReg2 = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx2),
366 GetPrimTypeBitSize(regType2), cgFunc->GetRegTyFromPrimTy(regType2));
367 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_seto_r, X64CG::kMd[MOP_seto_r]);
368 insn.AddOpndChain(retReg2);
369 cgFunc->GetCurBB()->AppendInsn(insn);
370 } else {
371 CHECK_FATAL(false, "should not happen");
372 }
373 return;
374 }
375
SelectPureCall(const IntrinsiccallNode & intrnNode)376 void X64MPIsel::SelectPureCall(const IntrinsiccallNode &intrnNode)
377 {
378 DEBUG_ASSERT(intrnNode.NumOpnds() == 7, "must be 7 operands"); // must be 7 operands
379 ListOperand &srcOpnds = cgFunc->GetOpndBuilder()->CreateList();
380 auto &callee = *intrnNode.Opnd(0);
381 auto ptyp = callee.GetPrimType();
382 RegOperand &calleeReg = SelectCopy2Reg(*HandleExpr(intrnNode, callee), ptyp);
383 uint32 i = 1;
384 for (; i < kSeventhReg; i++) {
385 srcOpnds.PushOpnd(LoadOpndIntoPhysicalRegister(intrnNode, i));
386 }
387 // R11 is used in asm call
388 srcOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::R11, GetPrimTypeBitSize(PTY_i64), kRegTyInt));
389 MOperator mOp = x64::MOP_pure_call;
390 Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
391 callInsn.AddOpndChain(calleeReg);
392 callInsn.AddOpndChain(srcOpnds);
393 cgFunc->GetCurBB()->AppendInsn(callInsn);
394 return;
395 }
396
LoadOpndIntoPhysicalRegister(const IntrinsiccallNode & intrnNode,uint32 index)397 RegOperand &X64MPIsel::LoadOpndIntoPhysicalRegister(const IntrinsiccallNode &intrnNode, uint32 index)
398 {
399 auto &opnd = *intrnNode.Opnd(index);
400 auto ptyp = opnd.GetPrimType();
401 RegOperand &opndReg = SelectCopy2Reg(*HandleExpr(intrnNode, opnd), ptyp);
402 PRegNo regId;
403 switch (index - 1) {
404 case kFirstReg:
405 regId = x64::RDI;
406 break;
407 case kSecondReg:
408 regId = x64::RSI;
409 break;
410 case kThirdReg:
411 regId = x64::RDX;
412 break;
413 case kFourthReg:
414 regId = x64::RCX;
415 break;
416 case kFifthReg:
417 regId = x64::R8;
418 break;
419 case kSixthReg:
420 regId = x64::R9;
421 break;
422 default:
423 CHECK_FATAL_FALSE("Unreachable!");
424 }
425 RegOperand &realReg = cgFunc->GetOpndBuilder()->CreatePReg(regId, GetPrimTypeBitSize(PTY_i64), kRegTyInt);
426 SelectCopy(realReg, opndReg, ptyp, ptyp);
427 return realReg;
428 }
429
SelectIntrinsicCall(IntrinsiccallNode & intrinsiccallNode)430 void X64MPIsel::SelectIntrinsicCall(IntrinsiccallNode &intrinsiccallNode)
431 {
432 MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic();
433 if (intrinsic == INTRN_ADD_WITH_OVERFLOW || intrinsic == INTRN_SUB_WITH_OVERFLOW ||
434 intrinsic == INTRN_MUL_WITH_OVERFLOW) {
435 SelectOverFlowCall(intrinsiccallNode);
436 return;
437 }
438 if (intrinsic == maple::INTRN_JS_PURE_CALL) {
439 SelectPureCall(intrinsiccallNode);
440 return;
441 }
442
443 CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the X64 CG.", intrinsic, GetIntrinsicName(intrinsic));
444 }
445
SelectRangeGoto(RangeGotoNode & rangeGotoNode,Operand & srcOpnd)446 void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd)
447 {
448 MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64);
449 std::vector<uint64> sizeArray;
450 const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable();
451 sizeArray.emplace_back(switchTable.size());
452 MemPool *memPool = cgFunc->GetMemoryPool();
453 MIRArrayType *arrayType = memPool->New<MIRArrayType>(etype->GetTypeIndex(), sizeArray);
454 MIRAggConst *arrayConst = memPool->New<MIRAggConst>(cgFunc->GetMirModule(), *arrayType);
455 for (const auto &itPair : switchTable) {
456 LabelIdx labelIdx = itPair.second;
457 cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx);
458 MIRConst *mirConst = memPool->New<MIRLblConst>(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype);
459 arrayConst->AddItem(mirConst, 0);
460 }
461 MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal);
462 lblSt->SetStorageClass(kScFstatic);
463 lblSt->SetSKind(kStConst);
464 lblSt->SetTyIdx(arrayType->GetTypeIndex());
465 lblSt->SetKonst(arrayConst);
466 std::string lblStr(".L_");
467 uint32 labelIdxTmp = cgFunc->GetLabelIdx();
468 lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++));
469 cgFunc->SetLabelIdx(labelIdxTmp);
470 lblSt->SetNameStrIdx(lblStr);
471 cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt);
472 ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0);
473 /* get index */
474 PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType();
475 RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType);
476 int32 minIdx = switchTable[0].first;
477 ImmOperand &opnd1 =
478 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), -minIdx - rangeGotoNode.GetTagOffset());
479 RegOperand &indexOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt);
480 SelectAdd(indexOpnd, opnd0, opnd1, srcType);
481
482 /* load the displacement into a register by accessing memory at base + index * 8 */
483 /* mov .L_xxx_LOCAL_CONST.x(%baseReg, %indexOpnd, 8), %dstRegOpnd */
484 MemOperand &dstMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(PTY_a64));
485 RegOperand &baseReg = cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(PTY_i64), kRegTyInt);
486 dstMemOpnd.SetBaseRegister(baseReg);
487 dstMemOpnd.SetIndexRegister(indexOpnd);
488 dstMemOpnd.SetOffsetOperand(stOpnd);
489 dstMemOpnd.SetScaleOperand(cgFunc->GetOpndBuilder()->CreateImm(baseReg.GetSize(), k8ByteSize));
490
491 /* jumping to the absolute address which is stored in dstRegOpnd */
492 MOperator mOp = x64::MOP_jmpq_m;
493 Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
494 jmpInsn.AddOpndChain(dstMemOpnd);
495 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
496 }
497
498 /*
499 * unorded ZF, PF, CF ==> 1,1,1
500 * above ZF, PF, CF ==> 0,0,0
501 * below ZF, PF, CF ==> 0,0,1
502 * equal ZF, PF, CF ==> 1,0,0
503 *
504 * To distinguish between less than(only check whether CF = 1 or not) and unorderd(CF=1),
505 * So ** judging gt/ge by swaping operands is used to represent lt/le in float**
506 */
PickJmpInsn(Opcode brOp,Opcode cmpOp,bool isFloat,bool isSigned)507 static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned)
508 {
509 switch (cmpOp) {
510 case OP_ne:
511 return (brOp == OP_brtrue) ? MOP_jne_l : MOP_je_l;
512 case OP_eq:
513 return (brOp == OP_brtrue) ? MOP_je_l : MOP_jne_l;
514 case OP_lt:
515 return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jl_l : MOP_jb_l))
516 : (isSigned ? MOP_jge_l : MOP_jae_l);
517 case OP_le:
518 return (brOp == OP_brtrue) ? (isFloat ? MOP_jae_l : (isSigned ? MOP_jle_l : MOP_jbe_l))
519 : (isSigned ? MOP_jg_l : MOP_ja_l);
520 case OP_gt:
521 return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jg_l : MOP_ja_l))
522 : (isSigned ? MOP_jle_l : MOP_jbe_l);
523 case OP_ge:
524 return (brOp == OP_brtrue) ? (isSigned ? MOP_jge_l : MOP_jae_l) : (isSigned ? MOP_jl_l : MOP_jb_l);
525 default:
526 CHECK_FATAL(false, "PickJmpInsn error");
527 }
528 }
529
530 /*
531 * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node
532 * such as a dread for example
533 */
SelectCondGoto(CondGotoNode & stmt,BaseNode & condNode,Operand & opnd0)534 void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0)
535 {
536 Opcode opcode = stmt.GetOpCode();
537 X64MOP_t jmpOperator = x64::MOP_begin;
538 if (opnd0.IsImmediate()) {
539 DEBUG_ASSERT(opnd0.IsIntImmediate(), "only support int immediate");
540 DEBUG_ASSERT(opcode == OP_brtrue || opcode == OP_brfalse, "unsupported opcode");
541 ImmOperand &immOpnd0 = static_cast<ImmOperand &>(opnd0);
542 if ((opcode == OP_brtrue && !(immOpnd0.GetValue() != 0)) ||
543 (opcode == OP_brfalse && !(immOpnd0.GetValue() == 0))) {
544 return;
545 }
546 jmpOperator = x64::MOP_jmpq_l;
547 cgFunc->SetCurBBKind(BB::kBBGoto);
548 } else {
549 PrimType primType;
550 Opcode condOpcode = condNode.GetOpCode();
551 // op_ne
552 if (!kOpcodeInfo.IsCompare(condOpcode)) {
553 primType = condNode.GetPrimType();
554 ImmOperand &imm0 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), 0);
555 SelectCmp(opnd0, imm0, primType);
556 condOpcode = OP_ne;
557 } else {
558 primType = static_cast<CompareNode &>(condNode).GetOpndType();
559 }
560 bool isFloat = IsPrimitiveFloat(primType);
561 jmpOperator = PickJmpInsn(opcode, condOpcode, isFloat, IsSignedInteger(primType));
562 cgFunc->SetCurBBKind(BB::kBBIf);
563 }
564 /* gen targetOpnd, .L.xxx__xx */
565 auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset());
566 LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset());
567 /* select jump Insn */
568 Insn &jmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(jmpOperator, X64CG::kMd[jmpOperator]));
569 jmpInsn.AddOpndChain(targetOpnd);
570 cgFunc->GetCurBB()->AppendInsn(jmpInsn);
571 }
572
GetTargetRetOperand(PrimType primType,int32 sReg)573 Operand &X64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg)
574 {
575 uint32 bitSize = GetPrimTypeBitSize(primType);
576 regno_t retReg = 0;
577 switch (sReg) {
578 case kSregRetval0:
579 retReg = IsPrimitiveFloat(primType) ? x64::V0 : x64::RAX;
580 break;
581 case kSregRetval1:
582 retReg = x64::RDX;
583 break;
584 default:
585 CHECK_FATAL(false, "GetTargetRetOperand: NIY");
586 break;
587 }
588 RegOperand &parmRegOpnd =
589 cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
590 return parmRegOpnd;
591 }
592
SelectMpy(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)593 Operand *X64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
594 {
595 PrimType dtype = node.GetPrimType();
596 RegOperand *resOpnd = nullptr;
597 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
598 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType());
599 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType());
600 SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype);
601 return resOpnd;
602 }
603
SelectMpy(Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)604 void X64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
605 {
606 uint32 bitSize = GetPrimTypeBitSize(primType);
607 SelectCopy(resOpnd, opnd0, primType);
608 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType);
609 if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) {
610 X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_imulq_r_r
611 : (bitSize == k32BitSize) ? x64::MOP_imull_r_r
612 : (bitSize == k16BitSize) ? x64::MOP_imulw_r_r
613 : x64::MOP_begin;
614 CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping");
615 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
616 insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd);
617 cgFunc->GetCurBB()->AppendInsn(insn);
618 } else if (IsPrimitiveFloat(primType)) {
619 X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_mulfd_r_r
620 : (bitSize == k32BitSize) ? x64::MOP_mulfs_r_r
621 : x64::MOP_begin;
622 CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping");
623 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
624 insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd);
625 cgFunc->GetCurBB()->AppendInsn(insn);
626 }
627 }
628
629 /*
630 * Dividend(EDX:EAX) / Divisor(reg/mem32) = Quotient(EAX) Remainder(EDX)
631 * IDIV instruction perform signed division of EDX:EAX by the contents of 32-bit register or memory location and
632 * store the quotient in EAX and the remainder in EDX.
633 * The instruction truncates non-integral results towards 0. The sign of the remainder is always the same as the sign
634 * of the dividend, and the absolute value of the remainder is less than the absolute value of the divisor.
635 * An overflow generates a #DE (divide error) exception, rather than setting the OF flag.
636 * To avoid overflow problems, precede this instruction with a CDQ instruction to sign-extend the dividend Divisor.
637 * CDQ Sign-extend EAX into EDX:EAX. This action helps avoid overflow problems in signed number arithmetic.
638 */
SelectDiv(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)639 Operand *X64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
640 {
641 PrimType primType = node.GetPrimType();
642 Operand *resOpnd = nullptr;
643 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType());
644 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType());
645 resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode());
646 return resOpnd;
647 }
648
SelectRem(BinaryNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)649 Operand *X64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
650 {
651 PrimType primType = node.GetPrimType();
652 Operand *resOpnd = nullptr;
653 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType());
654 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType());
655 resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode());
656 return resOpnd;
657 }
658
SelectDivRem(RegOperand & opnd0,RegOperand & opnd1,PrimType primType,Opcode opcode)659 Operand *X64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode)
660 {
661 DEBUG_ASSERT(opcode == OP_div || opcode == OP_rem, "unsupported opcode");
662 if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) {
663 uint32 bitSize = GetPrimTypeBitSize(primType);
664 /* copy dividend to eax */
665 RegOperand &raxOpnd =
666 cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
667 SelectCopy(raxOpnd, opnd0, primType);
668
669 RegOperand &rdxOpnd =
670 cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, bitSize, cgFunc->GetRegTyFromPrimTy(primType));
671 bool isSigned = IsSignedInteger(primType);
672 if (isSigned) {
673 /* cdq edx:eax = sign-extend of eax*/
674 X64MOP_t cvtMOp = (bitSize == k64BitSize) ? x64::MOP_cqo
675 : (bitSize == k32BitSize) ? x64::MOP_cdq
676 : (bitSize == k16BitSize) ? x64::MOP_cwd
677 : x64::MOP_begin;
678 CHECK_FATAL(cvtMOp != x64::MOP_begin, "NIY mapping");
679 Insn &cvtInsn = cgFunc->GetInsnBuilder()->BuildInsn(cvtMOp, raxOpnd, rdxOpnd);
680 cgFunc->GetCurBB()->AppendInsn(cvtInsn);
681 } else {
682 /* set edx = 0 */
683 SelectCopy(rdxOpnd, cgFunc->GetOpndBuilder()->CreateImm(bitSize, 0), primType);
684 }
685 /* div */
686 X64MOP_t divMOp = (bitSize == k64BitSize) ? (isSigned ? x64::MOP_idivq_r : x64::MOP_divq_r)
687 : (bitSize == k32BitSize) ? (isSigned ? x64::MOP_idivl_r : x64::MOP_divl_r)
688 : (bitSize == k16BitSize) ? (isSigned ? x64::MOP_idivw_r : x64::MOP_divw_r)
689 : x64::MOP_begin;
690 CHECK_FATAL(divMOp != x64::MOP_begin, "NIY mapping");
691 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, raxOpnd, rdxOpnd);
692 cgFunc->GetCurBB()->AppendInsn(insn);
693 /* return */
694 RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
695 SelectCopy(resOpnd, ((opcode == OP_div) ? raxOpnd : rdxOpnd), primType);
696 return &resOpnd;
697 } else if (IsPrimitiveFloat(primType)) {
698 uint32 bitSize = GetPrimTypeBitSize(primType);
699 auto &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType));
700 SelectCopy(resOpnd, opnd0, primType);
701 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_divsd_r, opnd1, resOpnd);
702 cgFunc->GetCurBB()->AppendInsn(insn);
703 return &resOpnd;
704 } else {
705 CHECK_FATAL(false, "NIY");
706 }
707 }
708
SelectLnot(const UnaryNode & node,Operand & opnd0,const BaseNode & parent)709 Operand *X64MPIsel::SelectLnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent)
710 {
711 PrimType dtype = node.GetPrimType();
712 RegOperand *resOpnd = nullptr;
713 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
714 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType());
715 ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(dtype), 0);
716 if (IsPrimitiveFloat(dtype)) {
717 SelectCmpFloatEq(*resOpnd, regOpnd0, immOpnd, dtype, dtype);
718 } else {
719 SelectCmp(regOpnd0, immOpnd, dtype);
720 SelectCmpResult(*resOpnd, OP_eq, dtype, dtype);
721 }
722 return resOpnd;
723 }
724
725 /*
726 * unorded ZF, PF, CF ==> 1,1,1
727 * above ZF, PF, CF ==> 0,0,0
728 * below ZF, PF, CF ==> 0,0,1
729 * equal ZF, PF, CF ==> 1,0,0
730 *
731 * To distinguish between less than(only check whether CF = 1 or not) and unorderd(CF=1),
732 * So ** lt/le in float is replaced by judging gt/ge and swaping operands **
733 *
734 * float eq using cmpeqsd, same with llvm
735 */
SelectCmpOp(CompareNode & node,Operand & opnd0,Operand & opnd1,const BaseNode & parent)736 Operand *X64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent)
737 {
738 PrimType dtype = node.GetPrimType();
739 PrimType primOpndType = node.GetOpndType();
740 RegOperand *resOpnd = nullptr;
741 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primOpndType, node.Opnd(0)->GetPrimType());
742 RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primOpndType, node.Opnd(1)->GetPrimType());
743 resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype));
744 auto nodeOp = node.GetOpCode();
745 Opcode parentOp = parent.GetOpCode();
746 bool isFloat = IsPrimitiveFloat(primOpndType);
747 bool isJump = (parentOp == OP_brfalse || parentOp == OP_brtrue);
748 // float eq
749 if (isFloat && (nodeOp == maple::OP_eq) && (!isJump)) {
750 SelectCmpFloatEq(*resOpnd, regOpnd0, regOpnd1, dtype, primOpndType);
751 return resOpnd;
752 }
753
754 bool isSwap = (isFloat && (nodeOp == maple::OP_le || nodeOp == maple::OP_lt) && (parentOp != OP_brfalse));
755 SelectCmp(regOpnd0, regOpnd1, primOpndType, isSwap);
756 if (isJump) {
757 return resOpnd;
758 }
759 SelectCmpResult(*resOpnd, nodeOp, dtype, primOpndType);
760 return resOpnd;
761 }
762
SelectCmp(Operand & opnd0,Operand & opnd1,PrimType primType,bool isSwap)763 void X64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType, bool isSwap)
764 {
765 x64::X64MOP_t cmpMOp = x64::MOP_begin;
766 if (IsPrimitiveInteger(primType)) {
767 cmpMOp = GetCmpMop(opnd0.GetKind(), opnd1.GetKind(), primType);
768 } else if (IsPrimitiveFloat(primType)) {
769 cmpMOp = x64::MOP_ucomisd_r_r;
770 } else {
771 CHECK_FATAL(false, "NIY");
772 }
773 DEBUG_ASSERT(cmpMOp != x64::MOP_begin, "unsupported mOp");
774 Insn &cmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(cmpMOp, X64CG::kMd[cmpMOp]));
775 if (isSwap) {
776 cmpInsn.AddOpndChain(opnd0).AddOpndChain(opnd1);
777 } else {
778 cmpInsn.AddOpndChain(opnd1).AddOpndChain(opnd0);
779 }
780 cgFunc->GetCurBB()->AppendInsn(cmpInsn);
781 }
782
SelectCmpFloatEq(RegOperand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primResType,PrimType primOpndType)783 void X64MPIsel::SelectCmpFloatEq(RegOperand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primResType,
784 PrimType primOpndType)
785 {
786 /* float eq using cmpeqsd is same with llvm */
787 x64::X64MOP_t eqMOp = x64::MOP_cmpeqsd_r_r;
788 Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(eqMOp, X64CG::kMd[eqMOp]);
789
790 auto ®Opnd1 = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primOpndType),
791 cgFunc->GetRegTyFromPrimTy(primOpndType));
792 SelectCopy(regOpnd1, opnd1, primOpndType);
793 /* CMPEQSD xmm1, xmm2 => CMPSD xmm1, xmm2, 0 */
794 setInsn.AddOpndChain(opnd0).AddOpndChain(regOpnd1);
795 cgFunc->GetCurBB()->AppendInsn(setInsn);
796
797 /* set result -> u64/u32 */
798 auto tmpResType = (primOpndType == maple::PTY_f64) ? PTY_u64 : PTY_u32;
799 RegOperand &tmpResOpnd =
800 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(tmpResType), cgFunc->GetRegTyFromPrimTy(tmpResType));
801 SelectRetypeFloat(tmpResOpnd, regOpnd1, tmpResType, primOpndType);
802 /* cvt u64/u32 -> primType */
803 SelectIntCvt(resOpnd, tmpResOpnd, primResType, tmpResType);
804 }
805
SelectCmpResult(RegOperand & resOpnd,Opcode opCode,PrimType primType,PrimType primOpndType)806 void X64MPIsel::SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType)
807 {
808 bool isFloat = IsPrimitiveFloat(primOpndType);
809 bool isSigned = (!IsPrimitiveUnsigned(primOpndType) && !IsPrimitiveFloat(primOpndType));
810 /* set result -> u8 */
811 RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k8BitSize, cgFunc->GetRegTyFromPrimTy(PTY_u8));
812 x64::X64MOP_t setMOp = GetSetCCMop(opCode, tmpResOpnd.GetKind(), isSigned, isFloat);
813 DEBUG_ASSERT(setMOp != x64::MOP_begin, "unsupported mOp");
814 Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(setMOp, X64CG::kMd[setMOp]);
815 setInsn.AddOpndChain(tmpResOpnd);
816 cgFunc->GetCurBB()->AppendInsn(setInsn);
817 /* cvt u8 -> primType */
818 SelectIntCvt(resOpnd, tmpResOpnd, primType, PTY_u8);
819 }
820
SelectSelect(Operand & resOpnd,Operand & trueOpnd,Operand & falseOpnd,PrimType primType,Opcode cmpOpcode,PrimType cmpPrimType)821 void X64MPIsel::SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType,
822 Opcode cmpOpcode, PrimType cmpPrimType)
823 {
824 CHECK_FATAL(!IsPrimitiveFloat(primType), "NIY");
825 bool isSigned = !IsPrimitiveUnsigned(primType);
826 uint32 bitSize = GetPrimTypeBitSize(primType);
827 if (bitSize == k8BitSize) {
828 /* cmov unsupported 8bit, cvt to 32bit */
829 PrimType cvtType = isSigned ? PTY_i32 : PTY_u32;
830 RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k32BitSize, kRegTyInt);
831 Operand &tmpTrueOpnd = SelectCopy2Reg(trueOpnd, cvtType, primType);
832 Operand &tmpFalseOpnd = SelectCopy2Reg(falseOpnd, cvtType, primType);
833 SelectSelect(tmpResOpnd, tmpTrueOpnd, tmpFalseOpnd, cvtType, cmpOpcode, cmpPrimType);
834 SelectCopy(resOpnd, tmpResOpnd, primType, cvtType);
835 return;
836 }
837 RegOperand &tmpOpnd = SelectCopy2Reg(trueOpnd, primType);
838 SelectCopy(resOpnd, falseOpnd, primType);
839 x64::X64MOP_t cmovMop = GetCMovCCMop(cmpOpcode, bitSize, !IsPrimitiveUnsigned(cmpPrimType));
840 DEBUG_ASSERT(cmovMop != x64::MOP_begin, "unsupported mOp");
841 Insn &comvInsn = cgFunc->GetInsnBuilder()->BuildInsn(cmovMop, X64CG::kMd[cmovMop]);
842 comvInsn.AddOpndChain(tmpOpnd).AddOpndChain(resOpnd);
843 cgFunc->GetCurBB()->AppendInsn(comvInsn);
844 }
845
SelectMinOrMax(bool isMin,Operand & resOpnd,Operand & opnd0,Operand & opnd1,PrimType primType)846 void X64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType)
847 {
848 if (IsPrimitiveInteger(primType)) {
849 SelectCmp(opnd0, opnd1, primType);
850 Opcode cmpOpcode = isMin ? OP_lt : OP_gt;
851 SelectSelect(resOpnd, opnd0, opnd1, primType, cmpOpcode, primType);
852 } else {
853 // float lt/le need to swap operands, and using seta
854 CHECK_FATAL(false, "NIY type max or min");
855 }
856 }
857
SelectCctz(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)858 Operand *X64MPIsel::SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
859 {
860 CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!");
861 PrimType origPrimType = node.Opnd(0)->GetPrimType();
862 RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType);
863
864 MOperator mopBsf = x64::MOP_bsfl_r_r;
865 Insn &bsfInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsf, X64CG::kMd[mopBsf]);
866 bsfInsn.AddOpndChain(opnd).AddOpndChain(opnd);
867 cgFunc->GetCurBB()->AppendInsn(bsfInsn);
868
869 PrimType retType = node.GetPrimType();
870 RegOperand &destReg =
871 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), cgFunc->GetRegTyFromPrimTy(retType));
872 // ctz i32 (u32) => cvt u32 -> i32
873 // ctz i32 (u64) => cvt u64 -> i32
874 SelectIntCvt(destReg, opnd, retType, origPrimType);
875 return &destReg;
876 }
877
SelectCclz(IntrinsicopNode & node,Operand & opnd0,const BaseNode & parent)878 Operand *X64MPIsel::SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent)
879 {
880 CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!");
881 CHECK_FATAL(node.GetIntrinsic() == INTRN_C_clz32, "only support clz32");
882 PrimType origPrimType = node.Opnd(0)->GetPrimType();
883 RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType);
884 // bsr opnd tmp2
885 ImmOperand &imm =
886 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(origPrimType), -1);
887 RegOperand &tmp1 = SelectCopy2Reg(imm, origPrimType);
888 RegOperand &tmp2 =
889 cgFunc->GetOpndBuilder()->CreateVReg(
890 GetPrimTypeBitSize(origPrimType), cgFunc->GetRegTyFromPrimTy(origPrimType));
891 MOperator mopBsr = x64::MOP_bsrl_r_r;
892 Insn &bsrInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsr, X64CG::kMd[mopBsr]);
893 bsrInsn.AddOpndChain(opnd).AddOpndChain(tmp2);
894 cgFunc->GetCurBB()->AppendInsn(bsrInsn);
895 // cmove -1, tmp2
896 MOperator mopComv = x64::MOP_cmovel_r_r;
897 Insn &cmovInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopComv, X64CG::kMd[mopComv]);
898 cmovInsn.AddOpndChain(tmp1).AddOpndChain(tmp2);
899 cgFunc->GetCurBB()->AppendInsn(cmovInsn);
900 // neg tmp2
901 MOperator mopNeg = x64::MOP_negl_r;
902 Insn &negInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopNeg, X64CG::kMd[mopNeg]);
903 negInsn.AddOpndChain(tmp2);
904 cgFunc->GetCurBB()->AppendInsn(negInsn);
905 // add res 31 tmp2
906 ImmOperand &imm2 =
907 cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(origPrimType), k32BitSize - 1);
908 RegOperand &tmp3 =
909 cgFunc->GetOpndBuilder()->CreateVReg(
910 GetPrimTypeBitSize(origPrimType), cgFunc->GetRegTyFromPrimTy(origPrimType));
911 SelectAdd(tmp3, imm2, tmp2, origPrimType);
912 PrimType retType = node.GetPrimType();
913 RegOperand &destReg =
914 cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), cgFunc->GetRegTyFromPrimTy(retType));
915 SelectIntCvt(destReg, tmp3, retType, origPrimType);
916 return &destReg;
917 }
918
GetTargetBasicPointer(PrimType primType)919 RegOperand &X64MPIsel::GetTargetBasicPointer(PrimType primType)
920 {
921 return cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(primType),
922 cgFunc->GetRegTyFromPrimTy(primType));
923 }
924
SelectRetypeFloat(RegOperand & resOpnd,Operand & opnd0,PrimType toType,PrimType fromType)925 void X64MPIsel::SelectRetypeFloat(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType)
926 {
927 uint32 fromSize = GetPrimTypeBitSize(fromType);
928 [[maybe_unused]] uint32 toSize = GetPrimTypeBitSize(toType);
929 DEBUG_ASSERT(fromSize == toSize, "retype bit widith doesn' match");
930 RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType);
931 MOperator mOp = x64::MOP_begin;
932 if (fromSize == k32BitSize) {
933 mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movd_fr_r : x64::MOP_movd_r_fr;
934 } else if (fromSize == k64BitSize) {
935 mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movq_fr_r : x64::MOP_movq_r_fr;
936 } else {
937 CHECK_FATAL(false, "niy");
938 }
939 CHECK_FATAL(mOp != x64::MOP_begin, "NIY");
940 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
941 (void)insn.AddOpndChain(regOpnd0).AddOpndChain(resOpnd);
942 cgFunc->GetCurBB()->AppendInsn(insn);
943 return;
944 }
945
SelectSqrt(UnaryNode & node,Operand & src,const BaseNode & parent)946 Operand *X64MPIsel::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent)
947 {
948 PrimType dtype = node.GetPrimType();
949 if (!IsPrimitiveFloat(dtype)) {
950 DEBUG_ASSERT(false, "should be float type");
951 return nullptr;
952 }
953 auto bitSize = GetPrimTypeBitSize(dtype);
954 MOperator mOp = x64::MOP_begin;
955 if (bitSize == k64BitSize) {
956 mOp = MOP_sqrtd_r_r;
957 } else if (bitSize == k32BitSize) {
958 mOp = MOP_sqrts_r_r;
959 } else {
960 CHECK_FATAL(false, "niy");
961 }
962 RegOperand ®Opnd0 = SelectCopy2Reg(src, dtype);
963 Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]);
964 Operand &retReg = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(dtype));
965
966 (void)insn.AddOpndChain(regOpnd0).AddOpndChain(retReg);
967 cgFunc->GetCurBB()->AppendInsn(insn);
968 return &retReg;
969 }
970 } // namespace maplebe
971