/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCInstrQPX.td | 41 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 46 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 51 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 56 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRC))]>; 61 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 66 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 71 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 114 [(set v4f64:$FRT, (fadd v4f64:$FRA, v4f64:$FRB))]>; 125 [(set v4f64:$FRT, (fsub v4f64:$FRA, v4f64:$FRB))]>; 136 [(set v4f64:$FRT, (PPCfre v4f64:$FRB))]>; [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrQPX.td | 42 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 47 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB, v4f64:$FRC))]>; 52 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 57 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRC))]>; 62 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 67 [(set v4f64:$FRT, (IntID v4f64:$FRA, v4f64:$FRB))]>; 72 [(set v4f64:$FRT, (IntID v4f64:$FRB))]>; 115 [(set v4f64:$FRT, (fadd v4f64:$FRA, v4f64:$FRB))]>; 126 [(set v4f64:$FRT, (fsub v4f64:$FRA, v4f64:$FRB))]>; 137 [(set v4f64:$FRT, (PPCfre v4f64:$FRB))]>; [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | vecreduce-propagate-sd-flags.ll | 5 ; This test check that when v4f64 gets broken down to two v2f64 it maintains 8 ; CHECK: Legalizing node: [[VFOUR:t.*]]: v4f64 = BUILD_VECTOR 9 ; CHECK-NEXT: Analyzing result type: v4f64 10 ; CHECK-NEXT: Split node result: [[VFOUR]]: v4f64 = BUILD_VECTOR 27 %4 = call nnan reassoc double @llvm.vector.reduce.fmax.v4f64(<4 x double> %3) 31 declare double @llvm.vector.reduce.fmax.v4f64(<4 x double>)
|
/external/llvm/test/CodeGen/X86/ |
D | vector-intrinsics.ll | 3 declare <4 x double> @llvm.sin.v4f64(<4 x double> %p) 4 declare <4 x double> @llvm.cos.v4f64(<4 x double> %p) 5 declare <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 6 declare <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32) 10 %t = call <4 x double> @llvm.sin.v4f64(<4 x double> %p) 15 %t = call <4 x double> @llvm.cos.v4f64(<4 x double> %p) 20 %t = call <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) 25 %t = call <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32 %q)
|
D | vec_floor.ll | 46 %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) 49 declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) 108 %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 111 declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 170 %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 173 declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 232 %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) 235 declare <4 x double> @llvm.rint.v4f64(<4 x double> %p) 294 %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) 297 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | vec-strict-256.ll | 7 declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata… 9 declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata… 11 declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata… 13 declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata… 15 declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata) 17 declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata) 18 declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, meta… 19 declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x doub… 22 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) 24 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) [all …]
|
D | pr34149.ll | 5 declare <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) 6 declare <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) 13 %z = call fast <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) readnone 22 %z = call fast <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
|
D | vec-copysign-avx512.ll | 41 define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { 42 ; CHECK-LABEL: v4f64: 46 %tmp = tail call <4 x double> @llvm.copysign.v4f64( <4 x double> %a, <4 x double> %b ) 63 declare <4 x double> @llvm.copysign.v4f64(<4 x double> %Mag, <4 x double> %Sgn)
|
D | vec-copysign.ll | 131 define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { 132 ; SSE2-LABEL: v4f64: 144 ; AVX-LABEL: v4f64: 151 %tmp = tail call <4 x double> @llvm.copysign.v4f64( <4 x double> %a, <4 x double> %b ) 158 declare <4 x double> @llvm.copysign.v4f64(<4 x double> %Mag, <4 x double> %Sgn)
|
D | extractelement-fp.ll | 438 %v = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %x) 475 %v = call <4 x double> @llvm.sin.v4f64(<4 x double> %x) 520 %v = call <4 x double> @llvm.fma.v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %z) 566 %v = call <4 x double> @llvm.fabs.v4f64(<4 x double> %x) 618 %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) 670 %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) 682 ; %v = call <4 x double> @llvm.maximum.v4f64(<4 x double> %x, <4 x double> %y) 694 ; %v = call <4 x double> @llvm.minimum.v4f64(<4 x double> %x, <4 x double> %y) 842 %v = call <4 x double> @llvm.copysign.v4f64(<4 x double> %x, <4 x double> %y) 886 %v = call <4 x double> @llvm.floor.v4f64(<4 x double> %x) [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | qpx-rounding-ops.ll | 20 %call = tail call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone 30 declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone 46 %call = tail call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %x) nounwind readnone 56 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) nounwind readnone 72 %call = tail call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone 82 declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone 98 %call = tail call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone 108 declare <4 x double> @llvm.trunc.v4f64(<4 x double>) nounwind readnone
|
D | vec_rounding.ll | 19 declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) 22 %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) 41 declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 44 %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 63 declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 66 %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 85 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) 88 %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | vec_rounding.ll | 19 declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) 22 %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) 41 declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 44 %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) 63 declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 66 %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) 85 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) 88 %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | fround.ll | 24 …timated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 35 …stimated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 46 …stimated cost of 1 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 57 …stimated cost of 1 for instruction: %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 68 %V4F64 = call <4 x double> @llvm.ceil.v4f64(<4 x double> undef) 82 …imated cost of 42 for instruction: %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double> undef) 93 …timated cost of 4 for instruction: %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double> undef) 104 …timated cost of 1 for instruction: %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double> undef) 115 …timated cost of 1 for instruction: %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double> undef) 126 %V4F64 = call <4 x double> @llvm.floor.v4f64(<4 x double> undef) [all …]
|
/external/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/PowerPC/ |
D | PPCGenDAGISel.inc | 2211 /* 4878*/ OPC_CheckChild1Type, MVT::v4f64, 2222 …// Src: (st qfrc:{ *:[v4f64] }:$FRT, xoaddr:{ *:[iPTR] }:$dst)<<P:Predicate_unindexedstore>><<P:Pr… 2223 // Dst: (QVSTFDX qfrc:{ *:[v4f64] }:$FRT, xoaddr:{ *:[iPTR] }:$dst) 2232 …// Src: (st qfrc:{ *:[v4f64] }:$FRT, xoaddr:{ *:[iPTR] }:$dst)<<P:Predicate_unindexedstore>><<P:Pr… 2233 // Dst: (QVSTFSX qfrc:{ *:[v4f64] }:$FRT, xoaddr:{ *:[iPTR] }:$dst) 2244 …// Src: (ist:{ *:[iPTR] } v4f64:{ *:[v4f64] }:$rS, iPTR:{ *:[iPTR] }:$ptrreg, iPTR:{ *:[iPTR] }:$p… 2245 …// Dst: (QVSTFDUX:{ *:[iPTR] } ?:{ *:[v4f64] }:$rS, ?:{ *:[iPTR] }:$ptrreg, ?:{ *:[iPTR] }:$ptroff) 2254 …// Src: (ist:{ *:[iPTR] } v4f64:{ *:[v4f64] }:$rS, iPTR:{ *:[iPTR] }:$ptrreg, iPTR:{ *:[iPTR] }:$p… 2255 …// Dst: (QVSTFSUXs:{ *:[iPTR] } ?:{ *:[v4f64] }:$rS, ?:{ *:[iPTR] }:$ptrreg, ?:{ *:[iPTR] }:$ptrof… 2807 …// Src: (intrinsic_void 5913:{ *:[iPTR] }, v4f64:{ *:[v4f64] }:$T, xoaddr:{ *:[iPTR] }:$dst) - Com… [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | fabs-copysign.ll | 6 declare <4 x double> @llvm.fabs.v4f64(<4 x double>) 34 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf <4 x double> @llvm.copysign.v4f64(<4 x double> <doub… 37 %f = call <4 x double> @llvm.fabs.v4f64(<4 x double> %x) 44 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf <4 x double> @llvm.copysign.v4f64(<4 x double> <doub… 47 %f = call <4 x double> @llvm.fabs.v4f64(<4 x double> %x)
|
/external/llvm-project/llvm/test/Transforms/LowerMatrixIntrinsics/ |
D | multiply-double-contraction-fmf.ll | 65 …%c = call contract <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double> %a, <4 x doub… 69 declare <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double>, <4 x double>, i32, i32, …
|
D | multiply-double-contraction.ll | 65 …%c = call <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double> %a, <4 x double> %b, i… 69 declare <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double>, <4 x double>, i32, i32, …
|
/external/llvm-project/llvm/test/Instrumentation/MemorySanitizer/ |
D | masked-store-load.ll | 15 declare <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>) 70 …%x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4… 86 ; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 … 104 ; ADDR: = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask… 123 …%x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4… 128 ; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 …
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 443 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd in getShuffleCost() 538 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, in getCastInstrCost() 546 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, in getCastInstrCost() 589 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, in getCastInstrCost() 594 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, in getCastInstrCost() 601 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, in getCastInstrCost() 607 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, in getCastInstrCost() 674 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, in getCastInstrCost() 677 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, in getCastInstrCost() 680 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, in getCastInstrCost() [all …]
|
D | X86InstrFMA.td | 107 loadv4f64, X86Fmadd, v2f64, v4f64>, VEX_W; 109 loadv4f64, X86Fmsub, v2f64, v4f64>, VEX_W; 112 v2f64, v4f64>, VEX_W; 115 v2f64, v4f64>, VEX_W; 127 loadv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W; 130 v4f64>, VEX_W; 428 defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64, 430 defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64, 432 defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64, 434 defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64, [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 695 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 697 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 699 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 707 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 744 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ in getArithmeticInstrCost() 1044 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd in getShuffleCost() 1061 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd in getShuffleCost() 1076 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd in getShuffleCost() 1083 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd in getShuffleCost() 1093 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd in getShuffleCost() [all …]
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 117 v4f64 = 62, // 4 x f64 enumerator 257 return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 || in is256BitVector() 366 case v4f64: in getVectorElementType() 410 case v4f64: return 4; in getVectorNumElements() 495 case v4f64: return 256; in getSizeInBits() 658 if (NumElements == 4) return MVT::v4f64; in getVectorVT()
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.rint.f64.ll | 38 %0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %in) 46 declare <4 x double> @llvm.rint.v4f64(<4 x double>) #0
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | llvm.rint.f64.ll | 38 %0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %in) 46 declare <4 x double> @llvm.rint.v4f64(<4 x double>) #0
|