/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrQPX.td | 42 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 47 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 52 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 57 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRC))]>; 62 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 67 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 72 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 115 [(set v4f64:$FRT, (fadd v4f64:$FRA, v4f64:$FRB))]>; 126 [(set v4f64:$FRT, (fsub v4f64:$FRA, v4f64:$FRB))]>; 137 [(set v4f64:$FRT, (PPCfre v4f64:$FRB))]>; [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/PowerPC/ |
D | PPCInstrQPX.td | 42 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 47 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 52 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 57 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRC))]>; 62 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 67 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 72 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 115 [(set v4f64:$FRT, (fadd v4f64:$FRA, v4f64:$FRB))]>; 126 [(set v4f64:$FRT, (fsub v4f64:$FRA, v4f64:$FRB))]>; 137 [(set v4f64:$FRT, (PPCfre v4f64:$FRB))]>; [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | vector-intrinsics.ll | 3 declare <4 x double> @llvm.sin.v4f64(<4 x double> %p) 4 declare <4 x double> @llvm.cos.v4f64(<4 x double> %p) 5 declare <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 6 declare <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32) 10 %t = call <4 x double> @llvm.sin.v4f64(<4 x double> %p) 15 %t = call <4 x double> @llvm.cos.v4f64(<4 x double> %p) 20 %t = call <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 25 %t = call <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32 %q)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vector-intrinsics.ll | 3 declare <4 x double> @llvm.sin.v4f64(<4 x double> %p) 4 declare <4 x double> @llvm.cos.v4f64(<4 x double> %p) 5 declare <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 6 declare <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32) 10 %t = call <4 x double> @llvm.sin.v4f64(<4 x double> %p) 15 %t = call <4 x double> @llvm.cos.v4f64(<4 x double> %p) 20 %t = call <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 25 %t = call <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32 %q)
|
D | pr34149.ll | 5 declare <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) 6 declare <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) 13 %z = call fast <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) readnone 22 %z = call fast <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
|
D | vec-copysign-avx512.ll | 70 define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { 71 ; AVX512VL-LABEL: v4f64: 78 ; AVX512VLDQ-LABEL: v4f64: 84 %tmp = tail call <4 x double> @llvm.copysign.v4f64( <4 x double> %a, <4 x double> %b ) 110 declare <4 x double> @llvm.copysign.v4f64(<4 x double> %Mag, <4 x double> %Sgn)
|
D | vec-copysign.ll | 141 define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { 142 ; SSE2-LABEL: v4f64: 154 ; AVX-LABEL: v4f64: 161 %tmp = tail call <4 x double> @llvm.copysign.v4f64( <4 x double> %a, <4 x double> %b ) 168 declare <4 x double> @llvm.copysign.v4f64(<4 x double> %Mag, <4 x double> %Sgn)
|
D | fma-phi-213-to-231.ll | 28 %add = call <4 x double> @llvm.fma.v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %acc.04) 37 declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
|
/external/llvm/test/CodeGen/X86/ |
D | vector-intrinsics.ll | 3 declare <4 x double> @llvm.sin.v4f64(<4 x double> %p) 4 declare <4 x double> @llvm.cos.v4f64(<4 x double> %p) 5 declare <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 6 declare <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32) 10 %t = call <4 x double> @llvm.sin.v4f64(<4 x double> %p) 15 %t = call <4 x double> @llvm.cos.v4f64(<4 x double> %p) 20 %t = call <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 25 %t = call <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32 %q)
|
D | vec_floor.ll | 46 %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) 49 declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) 108 %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 111 declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 170 %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 173 declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 232 %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) 235 declare <4 x double> @llvm.rint.v4f64(<4 x double> %p) 294 %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) 297 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
D | fma-phi-213-to-231.ll | 28 %add = call <4 x double> @llvm.fma.v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %acc.04) 37 declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | vecreduce-propagate-sd-flags.ll | 5 ; This test check that when v4f64 gets broken down to two v2f64 it maintains 8 ; CHECK: Legalizing node: [[VFOUR:t.*]]: v4f64 = BUILD_VECTOR 9 ; CHECK-NEXT: Analyzing result type: v4f64 10 ; CHECK-NEXT: Split node result: [[VFOUR]]: v4f64 = BUILD_VECTOR 27 %4 = call nnan reassoc double @llvm.experimental.vector.reduce.fmax.f64.v4f64(<4 x double> %3) 31 declare double @llvm.experimental.vector.reduce.fmax.f64.v4f64(<4 x double>)
|
/external/llvm/test/CodeGen/PowerPC/ |
D | qpx-rounding-ops.ll | 20 %call = tail call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone 30 declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone 46 %call = tail call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %x) nounwind readnone 56 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) nounwind readnone 72 %call = tail call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone 82 declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone 98 %call = tail call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone 108 declare <4 x double> @llvm.trunc.v4f64(<4 x double>) nounwind readnone
|
D | vec_rounding.ll | 19 declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) 22 %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) 41 declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 44 %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 63 declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 66 %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 85 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) 88 %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | qpx-rounding-ops.ll | 20 %call = tail call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone 30 declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone 46 %call = tail call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %x) nounwind readnone 56 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) nounwind readnone 72 %call = tail call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone 82 declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone 98 %call = tail call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone 108 declare <4 x double> @llvm.trunc.v4f64(<4 x double>) nounwind readnone
|
D | vec_rounding.ll | 19 declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) 22 %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) 41 declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 44 %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 63 declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 66 %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 85 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) 88 %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86InstrVecCompiler.td | 58 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>; 62 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>; 67 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>; 72 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>; 78 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>; 79 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>; 80 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>; 81 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>; 82 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>; 83 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>; [all …]
|
D | X86TargetTransformInfo.cpp | 633 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 635 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 637 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 645 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 682 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ in getArithmeticInstrCost() 911 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd in getShuffleCost() 928 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd in getShuffleCost() 943 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd in getShuffleCost() 950 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd in getShuffleCost() 960 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd in getShuffleCost() [all …]
|
/external/swiftshader/third_party/LLVM/include/llvm/CodeGen/ |
D | ValueTypes.h | 75 v4f64 = 32, // 4 x f64 enumerator 78 LAST_VECTOR_VALUETYPE = v4f64, 147 (SimpleTy >= MVT::v2f32 && SimpleTy <= MVT::v4f64)); in isFloatingPoint() 210 case v4f64: return f64; in getVectorElementType() 231 case v4f64: return 4; in getVectorNumElements() 283 case v4f64: return 256; in getSizeInBits() 369 if (NumElements == 4) return MVT::v4f64; in getVectorVT() 504 return (V == MVT::v8f32 || V == MVT::v4f64 || V == MVT::v32i8 || in is256BitVector()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | fround.ll | 24 …timated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 35 …stimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 46 …stimated cost of 1 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 57 …stimated cost of 1 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 68 …stimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 79 …stimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 90 …stimated cost of 1 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 101 %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 115 …imated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double> undef) 126 …timated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double> undef) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Instrumentation/MemorySanitizer/ |
D | masked-store-load.ll | 9 declare <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>) 64 …%x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4… 80 ; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 … 98 ; ADDR: = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask… 117 …%x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4… 122 ; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 …
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 443 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd in getShuffleCost() 538 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, in getCastInstrCost() 546 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, in getCastInstrCost() 589 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, in getCastInstrCost() 594 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, in getCastInstrCost() 601 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, in getCastInstrCost() 607 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, in getCastInstrCost() 674 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, in getCastInstrCost() 677 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, in getCastInstrCost() 680 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, in getCastInstrCost() [all …]
|
D | X86InstrFMA.td | 107 loadv4f64, X86Fmadd, v2f64, v4f64>, VEX_W; 109 loadv4f64, X86Fmsub, v2f64, v4f64>, VEX_W; 112 v2f64, v4f64>, VEX_W; 115 v2f64, v4f64>, VEX_W; 127 loadv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W; 130 v4f64>, VEX_W; 428 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64, 430 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64, 432 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64, 434 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64, [all …]
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 117 v4f64 = 62, // 4 x f64 enumerator 257 return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 || in is256BitVector() 366 case v4f64: in getVectorElementType() 410 case v4f64: return 4; in getVectorNumElements() 495 case v4f64: return 256; in getSizeInBits() 658 if (NumElements == 4) return MVT::v4f64; in getVectorVT()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | llvm.rint.f64.ll | 38 %0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %in) 46 declare <4 x double> @llvm.rint.v4f64(<4 x double>) #0
|