Lines Matching refs:MVT
139 bool isTypeLegal(Type *Ty, MVT &VT);
140 bool isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed = false);
144 bool simplifyAddress(Address &Addr, MVT VT);
153 bool optimizeIntExtLoad(const Instruction *I, MVT RetVT, MVT SrcVT);
158 unsigned emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
161 unsigned emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg,
164 unsigned emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg,
167 unsigned emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg,
172 unsigned emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg,
181 bool emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, bool IsZExt);
182 bool emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
183 bool emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS);
184 unsigned emitLoad(MVT VT, MVT ResultVT, Address Addr, bool WantZExt = true,
186 bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
188 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
189 unsigned emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
190 unsigned emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
193 unsigned emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, int64_t Imm);
194 unsigned emitSub(MVT RetVT, const Value *LHS, const Value *RHS,
197 unsigned emitSubs_rr(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
199 unsigned emitSubs_rs(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
203 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
205 unsigned emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
207 unsigned emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, unsigned LHSReg,
210 unsigned emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm);
211 unsigned emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
213 unsigned emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
215 unsigned emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
217 unsigned emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
219 unsigned emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
221 unsigned emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
223 unsigned emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
225 unsigned emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
227 unsigned emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill,
230 unsigned materializeInt(const ConstantInt *CI, MVT VT);
231 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
237 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
239 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
285 static unsigned getImplicitScaleFactor(MVT VT) { in getImplicitScaleFactor()
289 case MVT::i1: // fall-through in getImplicitScaleFactor()
290 case MVT::i8: in getImplicitScaleFactor()
292 case MVT::i16: in getImplicitScaleFactor()
294 case MVT::i32: // fall-through in getImplicitScaleFactor()
295 case MVT::f32: in getImplicitScaleFactor()
297 case MVT::i64: // fall-through in getImplicitScaleFactor()
298 case MVT::f64: in getImplicitScaleFactor()
312 assert(TLI.getValueType(AI->getType(), true) == MVT::i64 && in fastMaterializeAlloca()
335 unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) { in materializeInt()
336 if (VT > MVT::i64) in materializeInt()
343 const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass in materializeInt()
345 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in materializeInt()
352 unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) { in materializeFP()
358 if (VT != MVT::f32 && VT != MVT::f64) in materializeFP()
362 bool Is64Bit = (VT == MVT::f64); in materializeFP()
466 MVT VT = CEVT.getSimpleVT(); in fastMaterializeConstant()
481 MVT VT; in fastMaterializeFloatZero()
485 if (VT != MVT::f32 && VT != MVT::f64) in fastMaterializeFloatZero()
488 bool Is64Bit = (VT == MVT::f64); in fastMaterializeFloatZero()
700 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill, in computeAddress()
797 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill, in computeAddress()
904 bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) { in isTypeLegal()
908 if (evt == MVT::Other || !evt.isSimple()) in isTypeLegal()
913 if (VT == MVT::f128) in isTypeLegal()
925 bool AArch64FastISel::isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed) { in isTypeSupported()
934 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) in isTypeSupported()
951 bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { in simplifyAddress()
995 ResultReg = emitAddSub_rx(/*UseAdd=*/true, MVT::i64, Addr.getReg(), in simplifyAddress()
1000 ResultReg = emitAddSub_rs(/*UseAdd=*/true, MVT::i64, Addr.getReg(), in simplifyAddress()
1006 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(), in simplifyAddress()
1010 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(), in simplifyAddress()
1014 ResultReg = emitLSL_ri(MVT::i64, MVT::i64, Addr.getOffsetReg(), in simplifyAddress()
1032 ResultReg = emitAdd_ri_(MVT::i64, Addr.getReg(), /*IsKill=*/false, Offset); in simplifyAddress()
1034 ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset); in simplifyAddress()
1084 unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, in emitAddSub()
1092 case MVT::i1: in emitAddSub()
1095 case MVT::i8: in emitAddSub()
1099 case MVT::i16: in emitAddSub()
1103 case MVT::i32: // fall-through in emitAddSub()
1104 case MVT::i64: in emitAddSub()
1107 MVT SrcVT = RetVT; in emitAddSub()
1108 RetVT.SimpleTy = std::max(RetVT.SimpleTy, MVT::i32); in emitAddSub()
1231 unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_rr()
1237 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_rr()
1246 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_rr()
1265 unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_ri()
1270 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_ri()
1288 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_ri()
1310 unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_rs()
1318 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_rs()
1327 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_rs()
1347 unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg, in emitAddSub_rx()
1355 if (RetVT != MVT::i32 && RetVT != MVT::i64) in emitAddSub_rx()
1364 bool Is64Bit = RetVT == MVT::i64; in emitAddSub_rx()
1392 MVT VT = EVT.getSimpleVT(); in emitCmp()
1397 case MVT::i1: in emitCmp()
1398 case MVT::i8: in emitCmp()
1399 case MVT::i16: in emitCmp()
1400 case MVT::i32: in emitCmp()
1401 case MVT::i64: in emitCmp()
1403 case MVT::f32: in emitCmp()
1404 case MVT::f64: in emitCmp()
1409 bool AArch64FastISel::emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, in emitICmp()
1415 bool AArch64FastISel::emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, in emitICmp_ri()
1421 bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) { in emitFCmp()
1422 if (RetVT != MVT::f32 && RetVT != MVT::f64) in emitFCmp()
1438 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri; in emitFCmp()
1449 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr; in emitFCmp()
1456 unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, in emitAdd()
1467 unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, in emitAdd_ri_()
1486 unsigned AArch64FastISel::emitSub(MVT RetVT, const Value *LHS, const Value *RHS, in emitSub()
1492 unsigned AArch64FastISel::emitSubs_rr(MVT RetVT, unsigned LHSReg, in emitSubs_rr()
1499 unsigned AArch64FastISel::emitSubs_rs(MVT RetVT, unsigned LHSReg, in emitSubs_rs()
1509 unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, in emitLogicalOp()
1578 MVT VT = std::max(MVT::i32, RetVT.SimpleTy); in emitLogicalOp()
1580 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) { in emitLogicalOp()
1581 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; in emitLogicalOp()
1582 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLogicalOp()
1587 unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, in emitLogicalOp_ri()
1603 case MVT::i1: in emitLogicalOp_ri()
1604 case MVT::i8: in emitLogicalOp_ri()
1605 case MVT::i16: in emitLogicalOp_ri()
1606 case MVT::i32: { in emitLogicalOp_ri()
1613 case MVT::i64: in emitLogicalOp_ri()
1626 if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc != ISD::AND) { in emitLogicalOp_ri()
1627 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; in emitLogicalOp_ri()
1628 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLogicalOp_ri()
1633 unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, in emitLogicalOp_rs()
1649 case MVT::i1: in emitLogicalOp_rs()
1650 case MVT::i8: in emitLogicalOp_rs()
1651 case MVT::i16: in emitLogicalOp_rs()
1652 case MVT::i32: in emitLogicalOp_rs()
1656 case MVT::i64: in emitLogicalOp_rs()
1664 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) { in emitLogicalOp_rs()
1665 uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; in emitLogicalOp_rs()
1666 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLogicalOp_rs()
1671 unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, in emitAnd_ri()
1676 unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, in emitLoad()
1749 bool IsRet64Bit = RetVT == MVT::i64; in emitLoad()
1753 case MVT::i1: // Intentional fall-through. in emitLoad()
1754 case MVT::i8: in emitLoad()
1759 case MVT::i16: in emitLoad()
1764 case MVT::i32: in emitLoad()
1769 case MVT::i64: in emitLoad()
1773 case MVT::f32: in emitLoad()
1777 case MVT::f64: in emitLoad()
1790 if (VT == MVT::i1) { in emitLoad()
1791 unsigned ANDReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, 1); in emitLoad()
1798 if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) { in emitLoad()
1811 MVT VT; in selectAddSub()
1837 MVT VT; in selectLogicalOp()
1866 MVT VT; in selectLoad()
1881 MVT RetVT = VT; in selectLoad()
1922 if (RetVT == MVT::i64 && VT <= MVT::i32) { in selectLoad()
1928 ResultReg = fastEmitInst_extractsubreg(MVT::i32, ResultReg, in selectLoad()
1960 bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr, in emitStore()
2000 case MVT::i1: VTIsi1 = true; in emitStore()
2001 case MVT::i8: Opc = OpcTable[Idx][0]; break; in emitStore()
2002 case MVT::i16: Opc = OpcTable[Idx][1]; break; in emitStore()
2003 case MVT::i32: Opc = OpcTable[Idx][2]; break; in emitStore()
2004 case MVT::i64: Opc = OpcTable[Idx][3]; break; in emitStore()
2005 case MVT::f32: Opc = OpcTable[Idx][4]; break; in emitStore()
2006 case MVT::f64: Opc = OpcTable[Idx][5]; break; in emitStore()
2011 unsigned ANDReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1); in emitStore()
2026 MVT VT; in selectStore()
2040 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectStore()
2043 VT = MVT::getIntegerVT(VT.getSizeInBits()); in selectStore()
2044 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectStore()
2119 MVT VT; in emitCompareAndBranch()
2165 if (VT == MVT::i1) in emitCompareAndBranch()
2212 SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill, in emitCompareAndBranch()
2216 SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*IsZExt=*/true); in emitCompareAndBranch()
2320 MVT SrcVT; in selectBranch()
2329 if (SrcVT == MVT::i64) { in selectBranch()
2330 CondReg = fastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill, in selectBranch()
2335 unsigned ANDReg = emitAnd_ri(MVT::i32, CondReg, CondIsKill, 1); in selectBranch()
2337 emitICmp_ri(MVT::i32, ANDReg, /*IsKill=*/true, 0); in selectBranch()
2405 emitICmp_ri(MVT::i32, CondReg, CondRegIsKill, 0); in selectBranch()
2461 ResultReg = fastEmit_i(MVT::i32, MVT::i32, ISD::Constant, 1); in selectCmp()
2573 Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, Src1IsKill, 1); in optimizeSelect()
2584 MVT VT; in selectSelect()
2593 case MVT::i1: in selectSelect()
2594 case MVT::i8: in selectSelect()
2595 case MVT::i16: in selectSelect()
2596 case MVT::i32: in selectSelect()
2600 case MVT::i64: in selectSelect()
2604 case MVT::f32: in selectSelect()
2608 case MVT::f64: in selectSelect()
2743 MVT DestVT; in selectFPToInt()
2752 if (SrcVT == MVT::f128) in selectFPToInt()
2756 if (SrcVT == MVT::f64) { in selectFPToInt()
2758 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr; in selectFPToInt()
2760 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr; in selectFPToInt()
2763 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr; in selectFPToInt()
2765 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr; in selectFPToInt()
2768 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass); in selectFPToInt()
2776 MVT DestVT; in selectIntToFP()
2779 assert ((DestVT == MVT::f32 || DestVT == MVT::f64) && in selectIntToFP()
2790 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) { in selectIntToFP()
2792 emitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed); in selectIntToFP()
2799 if (SrcVT == MVT::i64) { in selectIntToFP()
2801 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri; in selectIntToFP()
2803 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri; in selectIntToFP()
2806 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri; in selectIntToFP()
2808 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri; in selectIntToFP()
2850 MVT VT = ArgVT.getSimpleVT().SimpleTy; in fastLowerArguments()
2858 if (VT >= MVT::i1 && VT <= MVT::i64) in fastLowerArguments()
2860 else if ((VT >= MVT::f16 && VT <= MVT::f64) || VT.is64BitVector() || in fastLowerArguments()
2888 MVT VT = TLI.getSimpleValueType(Arg.getType()); in fastLowerArguments()
2891 if (VT >= MVT::i1 && VT <= MVT::i32) { in fastLowerArguments()
2894 VT = MVT::i32; in fastLowerArguments()
2895 } else if (VT == MVT::i64) { in fastLowerArguments()
2898 } else if (VT == MVT::f16) { in fastLowerArguments()
2901 } else if (VT == MVT::f32) { in fastLowerArguments()
2904 } else if ((VT == MVT::f64) || VT.is64BitVector()) { in fastLowerArguments()
2927 SmallVectorImpl<MVT> &OutVTs, in processCallArgs()
2946 MVT ArgVT = OutVTs[VA.getValNo()]; in processCallArgs()
2957 MVT DestVT = VA.getLocVT(); in processCallArgs()
2958 MVT SrcVT = ArgVT; in processCallArgs()
2967 MVT DestVT = VA.getLocVT(); in processCallArgs()
2968 MVT SrcVT = ArgVT; in processCallArgs()
3017 bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT, in finishCall()
3027 if (RetVT != MVT::isVoid) { in finishCall()
3037 MVT CopyVT = RVLocs[0].getValVT(); in finishCall()
3084 MVT RetVT; in fastLowerCall()
3086 RetVT = MVT::isVoid; in fastLowerCall()
3095 SmallVector<MVT, 16> OutVTs; in fastLowerCall()
3099 MVT VT; in fastLowerCall()
3101 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) in fastLowerCall()
3193 MVT VT; in tryEmitSmallMemCpy()
3196 VT = MVT::i64; in tryEmitSmallMemCpy()
3198 VT = MVT::i32; in tryEmitSmallMemCpy()
3200 VT = MVT::i16; in tryEmitSmallMemCpy()
3202 VT = MVT::i8; in tryEmitSmallMemCpy()
3207 VT = MVT::i32; in tryEmitSmallMemCpy()
3209 VT = MVT::i16; in tryEmitSmallMemCpy()
3211 VT = MVT::i8; in tryEmitSmallMemCpy()
3247 MVT RetVT; in foldXALUIntrinsic()
3254 if (RetVT != MVT::i32 && RetVT != MVT::i64) in foldXALUIntrinsic()
3411 MVT RetVT; in fastLowerIntrinsicCall()
3415 if (RetVT != MVT::f32 && RetVT != MVT::f64) in fastLowerIntrinsicCall()
3424 bool Is64Bit = RetVT == MVT::f64; in fastLowerIntrinsicCall()
3459 MVT VT; in fastLowerIntrinsicCall()
3467 case MVT::f32: in fastLowerIntrinsicCall()
3470 case MVT::f64: in fastLowerIntrinsicCall()
3492 MVT VT; in fastLowerIntrinsicCall()
3519 MVT VT; in fastLowerIntrinsicCall()
3523 if (VT != MVT::i32 && VT != MVT::i64) in fastLowerIntrinsicCall()
3586 if (VT == MVT::i32) { in fastLowerIntrinsicCall()
3587 MulReg = emitSMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill); in fastLowerIntrinsicCall()
3588 unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg, in fastLowerIntrinsicCall()
3597 assert(VT == MVT::i64 && "Unexpected value type."); in fastLowerIntrinsicCall()
3618 if (VT == MVT::i32) { in fastLowerIntrinsicCall()
3619 MulReg = emitUMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill); in fastLowerIntrinsicCall()
3620 emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg, in fastLowerIntrinsicCall()
3626 assert(VT == MVT::i64 && "Unexpected value type."); in fastLowerIntrinsicCall()
3716 MVT RVVT = RVEVT.getSimpleVT(); in selectRet()
3717 if (RVVT == MVT::f128) in selectRet()
3720 MVT DestVT = VA.getValVT(); in selectRet()
3723 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) in selectRet()
3762 MVT SrcVT = SrcEVT.getSimpleVT(); in selectTrunc()
3763 MVT DestVT = DestEVT.getSimpleVT(); in selectTrunc()
3765 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 && in selectTrunc()
3766 SrcVT != MVT::i8) in selectTrunc()
3768 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 && in selectTrunc()
3769 DestVT != MVT::i1) in selectTrunc()
3783 if (SrcVT == MVT::i64) { in selectTrunc()
3789 case MVT::i1: in selectTrunc()
3792 case MVT::i8: in selectTrunc()
3795 case MVT::i16: in selectTrunc()
3800 unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill, in selectTrunc()
3803 ResultReg = emitAnd_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask); in selectTrunc()
3816 unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) { in emiti1Ext()
3817 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 || in emiti1Ext()
3818 DestVT == MVT::i64) && in emiti1Ext()
3821 if (DestVT == MVT::i8 || DestVT == MVT::i16) in emiti1Ext()
3822 DestVT = MVT::i32; in emiti1Ext()
3825 unsigned ResultReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1); in emiti1Ext()
3827 if (DestVT == MVT::i64) { in emiti1Ext()
3840 if (DestVT == MVT::i64) { in emiti1Ext()
3849 unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, in emitMul_rr()
3854 case MVT::i8: in emitMul_rr()
3855 case MVT::i16: in emitMul_rr()
3856 case MVT::i32: in emitMul_rr()
3857 RetVT = MVT::i32; in emitMul_rr()
3859 case MVT::i64: in emitMul_rr()
3864 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitMul_rr()
3869 unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, in emitSMULL_rr()
3871 if (RetVT != MVT::i64) in emitSMULL_rr()
3879 unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, in emitUMULL_rr()
3881 if (RetVT != MVT::i64) in emitUMULL_rr()
3889 unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, in emitLSL_rr()
3896 case MVT::i8: Opc = AArch64::LSLVWr; NeedTrunc = true; Mask = 0xff; break; in emitLSL_rr()
3897 case MVT::i16: Opc = AArch64::LSLVWr; NeedTrunc = true; Mask = 0xffff; break; in emitLSL_rr()
3898 case MVT::i32: Opc = AArch64::LSLVWr; break; in emitLSL_rr()
3899 case MVT::i64: Opc = AArch64::LSLVXr; break; in emitLSL_rr()
3903 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitLSL_rr()
3905 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); in emitLSL_rr()
3911 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLSL_rr()
3915 unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, in emitLSL_ri()
3920 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || in emitLSL_ri()
3921 SrcVT == MVT::i32 || SrcVT == MVT::i64) && in emitLSL_ri()
3923 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || in emitLSL_ri()
3924 RetVT == MVT::i64) && "Unexpected return value type."); in emitLSL_ri()
3926 bool Is64Bit = (RetVT == MVT::i64); in emitLSL_ri()
3982 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { in emitLSL_ri()
3995 unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, in emitLSR_rr()
4002 case MVT::i8: Opc = AArch64::LSRVWr; NeedTrunc = true; Mask = 0xff; break; in emitLSR_rr()
4003 case MVT::i16: Opc = AArch64::LSRVWr; NeedTrunc = true; Mask = 0xffff; break; in emitLSR_rr()
4004 case MVT::i32: Opc = AArch64::LSRVWr; break; in emitLSR_rr()
4005 case MVT::i64: Opc = AArch64::LSRVXr; break; in emitLSR_rr()
4009 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitLSR_rr()
4011 Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Op0IsKill, Mask); in emitLSR_rr()
4012 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); in emitLSR_rr()
4018 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitLSR_rr()
4022 unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, in emitLSR_ri()
4027 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || in emitLSR_ri()
4028 SrcVT == MVT::i32 || SrcVT == MVT::i64) && in emitLSR_ri()
4030 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || in emitLSR_ri()
4031 RetVT == MVT::i64) && "Unexpected return value type."); in emitLSR_ri()
4033 bool Is64Bit = (RetVT == MVT::i64); in emitLSR_ri()
4103 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { in emitLSR_ri()
4116 unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, in emitASR_rr()
4123 case MVT::i8: Opc = AArch64::ASRVWr; NeedTrunc = true; Mask = 0xff; break; in emitASR_rr()
4124 case MVT::i16: Opc = AArch64::ASRVWr; NeedTrunc = true; Mask = 0xffff; break; in emitASR_rr()
4125 case MVT::i32: Opc = AArch64::ASRVWr; break; in emitASR_rr()
4126 case MVT::i64: Opc = AArch64::ASRVXr; break; in emitASR_rr()
4130 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitASR_rr()
4132 Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*IsZExt=*/false); in emitASR_rr()
4133 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); in emitASR_rr()
4139 ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); in emitASR_rr()
4143 unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, in emitASR_ri()
4148 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || in emitASR_ri()
4149 SrcVT == MVT::i32 || SrcVT == MVT::i64) && in emitASR_ri()
4151 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 || in emitASR_ri()
4152 RetVT == MVT::i64) && "Unexpected return value type."); in emitASR_ri()
4154 bool Is64Bit = (RetVT == MVT::i64); in emitASR_ri()
4212 if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { in emitASR_ri()
4225 unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, in emitIntExt()
4227 assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?"); in emitIntExt()
4233 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && in emitIntExt()
4234 (DestVT != MVT::i32) && (DestVT != MVT::i64)) || in emitIntExt()
4235 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && in emitIntExt()
4236 (SrcVT != MVT::i16) && (SrcVT != MVT::i32))) in emitIntExt()
4245 case MVT::i1: in emitIntExt()
4247 case MVT::i8: in emitIntExt()
4248 if (DestVT == MVT::i64) in emitIntExt()
4254 case MVT::i16: in emitIntExt()
4255 if (DestVT == MVT::i64) in emitIntExt()
4261 case MVT::i32: in emitIntExt()
4262 assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?"); in emitIntExt()
4269 if (DestVT == MVT::i8 || DestVT == MVT::i16) in emitIntExt()
4270 DestVT = MVT::i32; in emitIntExt()
4271 else if (DestVT == MVT::i64) { in emitIntExt()
4282 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in emitIntExt()
4334 bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT, in optimizeIntExtLoad()
4335 MVT SrcVT) { in optimizeIntExtLoad()
4363 if (RetVT != MVT::i64 || SrcVT > MVT::i32) { in optimizeIntExtLoad()
4390 MVT RetVT; in selectIntExt()
4391 MVT SrcVT; in selectIntExt()
4411 if (RetVT == MVT::i64 && SrcVT != MVT::i64) { in selectIntExt()
4446 MVT DestVT = DestEVT.getSimpleVT(); in selectRem()
4447 if (DestVT != MVT::i64 && DestVT != MVT::i32) in selectRem()
4451 bool Is64bit = (DestVT == MVT::i64); in selectRem()
4474 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; in selectRem()
4488 MVT VT; in selectMul()
4505 MVT SrcVT = VT; in selectMul()
4509 MVT VT; in selectMul()
4518 MVT VT; in selectMul()
4561 MVT RetVT; in selectShift()
4571 MVT SrcVT = RetVT; in selectShift()
4576 MVT TmpVT; in selectShift()
4585 MVT TmpVT; in selectShift()
4650 MVT RetVT, SrcVT; in selectBitCast()
4658 if (RetVT == MVT::f32 && SrcVT == MVT::i32) in selectBitCast()
4660 else if (RetVT == MVT::f64 && SrcVT == MVT::i64) in selectBitCast()
4662 else if (RetVT == MVT::i32 && SrcVT == MVT::f32) in selectBitCast()
4664 else if (RetVT == MVT::i64 && SrcVT == MVT::f64) in selectBitCast()
4672 case MVT::i32: RC = &AArch64::GPR32RegClass; break; in selectBitCast()
4673 case MVT::i64: RC = &AArch64::GPR64RegClass; break; in selectBitCast()
4674 case MVT::f32: RC = &AArch64::FPR32RegClass; break; in selectBitCast()
4675 case MVT::f64: RC = &AArch64::FPR64RegClass; break; in selectBitCast()
4691 MVT RetVT; in selectFRem()
4699 case MVT::f32: in selectFRem()
4702 case MVT::f64: in selectFRem()
4728 MVT VT; in selectSDiv()
4736 if ((VT != MVT::i32 && VT != MVT::i64) || !C || in selectSDiv()
4765 if (VT == MVT::i64) { in selectSDiv()
4780 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectSDiv()
4807 MVT PtrVT = TLI.getPointerTy(); in getRegForGEPIndex()
4831 MVT VT = TLI.getPointerTy(); in selectGetElementPtr()