/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 30 // Handle all vector types as either f64 or v2f64. 32 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 34 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack 35 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>, 42 CCIfType<[v2f64], CCAssignToStack<16, 4>> 48 // Handle all vector types as either f64 or v2f64. 50 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 52 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>, 62 // Handle all vector types as either f64 or v2f64. 64 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, [all …]
|
D | ARMCallingConv.h | 65 if (LocVT == MVT::v2f64 && in CC_ARM_APCS_Custom_f64() 113 if (LocVT == MVT::v2f64 && in CC_ARM_AAPCS_Custom_f64() 145 if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) in RetCC_ARM_APCS_Custom_f64()
|
D | ARMISelLowering.cpp | 165 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); in addQRTypeForNEON() 442 addQRTypeForNEON(MVT::v2f64); in ARMTargetLowering() 450 setOperationAction(ISD::FADD, MVT::v2f64, Expand); in ARMTargetLowering() 451 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); in ARMTargetLowering() 452 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); in ARMTargetLowering() 453 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); in ARMTargetLowering() 454 setOperationAction(ISD::FREM, MVT::v2f64, Expand); in ARMTargetLowering() 455 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); in ARMTargetLowering() 456 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); in ARMTargetLowering() 457 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); in ARMTargetLowering() [all …]
|
D | ARMRegisterInfo.td | 280 def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128, 289 def QPR_VFP2 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 296 def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
D | ARMInstrNEON.td | 154 [(set QPR:$dst, (v2f64 (load GPR:$Rn)))]>; 161 [(store (v2f64 QPR:$src), GPR:$Rn)]>; 4352 def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2), 4411 def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)), 4416 def : Pat<(v2f64 (scalar_to_vector (f64 DPR:$src))), 4417 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), DPR:$src, dsub_0)>; 4911 def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>; 4916 def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>; 4921 def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>; 4926 def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>; [all …]
|
/external/llvm/test/CodeGen/CellSPU/useful-harnesses/ |
D | vecoperations.c | 8 typedef double v2f64 __attribute__((ext_vector_type(2))); typedef 62 void print_v2f64(const char *str, v2f64 v) { in print_v2f64() 131 v2f64 v2f64_shuffle(v2f64 a) { in v2f64_shuffle() 132 v2f64 c2 = a.yx; in v2f64_shuffle() 148 v2f64 v4 = { 5.8e56, 9.103e-62 }; in main()
|
/external/llvm/lib/Target/CellSPU/ |
D | CellSDKIntrinsics.td | 412 [(set (v2f64 VECREG:$rT), (int_spu_si_dfa (v2f64 VECREG:$rA), 413 (v2f64 VECREG:$rB)))]>; 418 [(set (v2f64 VECREG:$rT), (int_spu_si_dfs (v2f64 VECREG:$rA), 419 (v2f64 VECREG:$rB)))]>; 424 [(set (v2f64 VECREG:$rT), (int_spu_si_dfm (v2f64 VECREG:$rA), 425 (v2f64 VECREG:$rB)))]>; 430 [(set (v2f64 VECREG:$rT), (int_spu_si_dfma (v2f64 VECREG:$rA), 431 (v2f64 VECREG:$rB)))]>; 436 [(set (v2f64 VECREG:$rT), (int_spu_si_dfnma (v2f64 VECREG:$rA), 437 (v2f64 VECREG:$rB)))]>; [all …]
|
D | SPUCallingConv.td | 24 CCIfType<[i8,i16,i32,i64,i128,f32,f64,v16i8,v8i16,v4i32,v2i64,v4f32,v2f64], 41 v16i8, v8i16, v4i32, v4f32, v2i64, v2f64], 55 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
D | SPUInstrInfo.td | 65 def v2f64: LoadDFormVec<v2f64>; 97 def v2f64: LoadAFormVec<v2f64>; 129 def v2f64: LoadXFormVec<v2f64>; 177 def v2f64: StoreDFormVec<v2f64>; 207 def v2f64: StoreAFormVec<v2f64>; 239 def v2f64: StoreXFormVec<v2f64>; 307 [(set (v2f64 VECREG:$rT), (SPUshufmask dform2_addr:$src))]>; 311 [(set (v2f64 VECREG:$rT), (SPUshufmask xform_addr:$src))]>; 353 // TODO: Need v2f64, v4f32 1403 def v2f64: ORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), [all …]
|
D | SPUISelDAGToDAG.cpp | 603 case MVT::v2f64: in getRC() 796 && (OpVT == MVT::f64 || OpVT == MVT::v2f64)) { in Select() 805 if (OpVT == MVT::v2f64) in Select() 821 } else if (OpVT == MVT::v2f64) { in Select() 835 } else if (OpVT == MVT::v2f64) { in Select()
|
D | SPURegisterInfo.td | 182 def VECREG : RegisterClass<"SPU", [v16i8,v8i16,v4i32,v4f32,v2i64,v2f64], 128,
|
D | SPUISelLowering.cpp | 398 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass); in SPUTargetLowering() 1095 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec)); in LowerConstantFP() 1162 case MVT::v2f64: in LowerFormalArguments() 1314 case MVT::v2f64: in LowerCall() 1675 case MVT::v2f64: { in LowerBUILD_VECTOR() 1681 return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, in LowerBUILD_VECTOR() 1961 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break; in LowerSCALAR_TO_VECTOR()
|
/external/llvm/include/llvm/CodeGen/ |
D | ValueTypes.h | 74 v2f64 = 31, // 2 x f64 enumerator 209 case v2f64: in getVectorElementType() 237 case v2f64: return 2; in getVectorNumElements() 277 case v2f64: return 128; in getSizeInBits() 368 if (NumElements == 2) return MVT::v2f64; in getVectorVT() 482 V==MVT::v2i64 || V==MVT::v4f32 || V==MVT::v2f64); in is128BitVector()
|
D | ValueTypes.td | 55 def v2f64 : ValueType<128, 31>; // 2 x f64 vector value
|
/external/llvm/lib/Target/X86/ |
D | X86InstrSSE.td | 140 def VMOVSDrr : sse12_move_rr<FR64, v2f64, 153 def MOVSDrr : sse12_move_rr<FR64, v2f64, 170 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)), 171 (MOVSDrr (v2f64 VR128:$src1), 172 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>; 179 def : Pat<(v2f64 (scalar_to_vector FR64:$src)), 180 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>; 199 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))), 201 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))), 203 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))), [all …]
|
D | X86CallingConv.td | 40 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 157 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 175 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 194 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, 213 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 235 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 273 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 282 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
|
D | X86InstrFragmentsSIMD.td | 163 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [], 173 def sdmem : Operand<v2f64> { 186 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>; 216 (v2f64 (alignedload node:$ptr))>; 248 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>; 298 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
|
D | X86ISelLowering.cpp | 823 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); in X86TargetLowering() 842 setOperationAction(ISD::FADD, MVT::v2f64, Legal); in X86TargetLowering() 843 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); in X86TargetLowering() 844 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); in X86TargetLowering() 845 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); in X86TargetLowering() 846 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); in X86TargetLowering() 847 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); in X86TargetLowering() 849 setOperationAction(ISD::VSETCC, MVT::v2f64, Custom); in X86TargetLowering() 860 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); in X86TargetLowering() 883 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); in X86TargetLowering() [all …]
|
D | X86RegisterInfo.td | 453 def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
/external/llvm/test/TableGen/ |
D | cast.td | 10 def v2f64 : ValueType<128, 28>; // 2 x f64 vector value 58 def VR128 : RegisterClass<[v2i64, v2f64],
|
D | TargetInstrSpec.td | 11 def v2f64 : ValueType<128, 28>; // 2 x f64 vector value 59 def VR128 : RegisterClass<[v2i64, v2f64],
|
/external/llvm/lib/VMCore/ |
D | ValueTypes.cpp | 133 case MVT::v2f64: return "v2f64"; in getEVTString() 180 case MVT::v2f64: return VectorType::get(Type::getDoubleTy(Context), 2); in getTypeForEVT()
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCCallingConv.td | 59 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
/external/llvm/utils/TableGen/ |
D | CodeGenTarget.cpp | 88 case MVT::v2f64: return "MVT::v2f64"; in getEnumName()
|
/external/llvm/include/llvm/ |
D | Intrinsics.td | 137 def llvm_v2f64_ty : LLVMType<v2f64>; // 2 x double
|