Home
last modified time | relevance | path

Searched refs:VMOVDRR (Results 1 – 12 of 12) sorted by relevance

/external/swiftshader/third_party/LLVM/lib/Target/ARM/
DARMISelLowering.h80 VMOVDRR, // Two gprs to double. enumerator
DARMISelLowering.cpp850 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; in getTargetNodeName()
1132 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerCallResult()
1147 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerCallResult()
2394 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); in GetF64FormalArgument()
3092 Tmp0.getOpcode() == ARMISD::VMOVDRR; in LowerFCOPYSIGN()
3163 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerFCOPYSIGN()
3228 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); in ExpandBITCAST()
6912 if (InDouble.getOpcode() == ARMISD::VMOVDRR) in PerformVMOVRRDCombine()
6977 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && in PerformSTORECombine()
7937 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); in PerformDAGCombine()
DARMInstrVFP.td27 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
555 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
DARMFastISel.cpp1691 TII.get(ARM::VMOVDRR), ResultReg) in FinishCall()
/external/llvm/lib/Target/ARM/
DARMInstrVFP.td21 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
1008 // Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
1085 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
1115 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1118 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1121 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>,
1124 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>,
2292 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
DARMISelLowering.h77 VMOVDRR, // Two gprs to double. enumerator
DARM.td125 // Some targets (e.g. Cortex-A9) prefer VMOVSR to VMOVDRR even when using NEON
DARMISelLowering.cpp1152 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; in getTargetNodeName()
1486 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerCallResult()
1503 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerCallResult()
3108 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); in GetF64FormalArgument()
3716 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); in getCMOV()
4250 Tmp0.getOpcode() == ARMISD::VMOVDRR; in LowerFCOPYSIGN()
4321 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); in LowerFCOPYSIGN()
4479 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); in ExpandBITCAST()
9618 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) in PerformVMOVRRDCombine()
10328 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && in PerformSTORECombine()
[all …]
DARMScheduleSwift.td629 def : InstRW<[SwiftWriteP2FourCycle], (instregex "VMOVDRR$")>;
DARMFastISel.cpp2039 TII.get(ARM::VMOVDRR), ResultReg) in FinishCall()
DARMInstrNEON.td5998 // NEONvdup patterns for uarchs with slow VDUP.32 - use VMOVDRR instead.
5999 def : Pat<(v2i32 (NEONvdup (i32 GPR:$R))), (VMOVDRR GPR:$R, GPR:$R)>,
6001 def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VMOVDRR GPR:$R, GPR:$R)>,
6702 // Prefer VMOVDRR for i32 -> f32 bitcasts, it can write all DPR registers.
6704 (EXTRACT_SUBREG (VMOVDRR GPR:$a, GPR:$a), ssub_0)>,
DARMBaseInstrInfo.cpp4602 case ARM::VMOVDRR: in getRegSequenceLikeInputs()