/external/swiftshader/third_party/llvm-7.0/llvm/test/Verifier/ |
D | scatter_gather.ll | 14 …%res = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <16 x i1> %ma… 17 declare <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*>, i32, <16 x i1>, <8 x float>) 30 …%res = call <8 x float> @llvm.masked.gather.v8f32.p0v8p0f32(<8 x float*>* %ptrs, i32 4, <8 x i1> %… 33 declare <8 x float> @llvm.masked.gather.v8f32.p0v8p0f32(<8 x float*>*, i32, <8 x i1>, <8 x float>) 38 …%res = call <8 x float> @llvm.masked.gather.v8f32.v8f32(<8 x float> %ptrs, i32 4, <8 x i1> %mask, … 41 declare <8 x float> @llvm.masked.gather.v8f32.v8f32(<8 x float>, i32, <8 x i1>, <8 x float>) 46 …%res = call <8 x float> @llvm.masked.gather.v8f32.v8p0f64(<8 x double*> %ptrs, i32 4, <8 x i1> %ma… 49 declare <8 x float> @llvm.masked.gather.v8f32.v8p0f64(<8 x double*>, i32, <8 x i1>, <8 x float>) 54 …%res = call <8 x float> @llvm.masked.gather.v8f32.v16p0f32(<16 x float*> %ptrs, i32 4, <8 x i1> %m… 57 declare <8 x float> @llvm.masked.gather.v8f32.v16p0f32(<16 x float*>, i32, <8 x i1>, <8 x float>) [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_rounding.ll | 107 declare <8 x float> @llvm.floor.v8f32(<8 x float> %p) 110 %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) 126 declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 129 %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 145 declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 148 %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 164 declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) 167 %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
|
D | vec_sqrt.ll | 11 declare <8 x float> @llvm.sqrt.v8f32(<8 x float> %val) 40 %sqrt = call <8 x float> @llvm.sqrt.v8f32 (<8 x float> %x)
|
D | vec_fmuladd.ll | 8 declare <8 x float> @llvm.fmuladd.v8f32(<8 x float> %val, <8 x float>, <8 x float>) 31 %fmuladd = call <8 x float> @llvm.fmuladd.v8f32 (<8 x float> %x, <8 x float> %x, <8 x float> %x)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | vec_rounding.ll | 107 declare <8 x float> @llvm.floor.v8f32(<8 x float> %p) 110 %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) 126 declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 129 %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 145 declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 148 %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 164 declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) 167 %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
|
D | vec_fmuladd.ll | 8 declare <8 x float> @llvm.fmuladd.v8f32(<8 x float> %val, <8 x float>, <8 x float>) 31 %fmuladd = call <8 x float> @llvm.fmuladd.v8f32 (<8 x float> %x, <8 x float> %x, <8 x float> %x)
|
D | vec_sqrt.ll | 11 declare <8 x float> @llvm.sqrt.v8f32(<8 x float> %val) 40 %sqrt = call <8 x float> @llvm.sqrt.v8f32 (<8 x float> %x)
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86InstrVecCompiler.td | 57 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>; 63 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>; 68 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>; 73 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>; 74 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>; 75 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>; 76 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>; 77 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>; 78 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>; 83 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>; [all …]
|
D | X86TargetTransformInfo.cpp | 634 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 636 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 638 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 642 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ in getArithmeticInstrCost() 679 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ in getArithmeticInstrCost() 914 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps in getShuffleCost() 929 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps in getShuffleCost() 944 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps in getShuffleCost() 951 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps in getShuffleCost() 961 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps in getShuffleCost() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vec-copysign-avx512.ll | 23 define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind { 24 ; AVX512VL-LABEL: v8f32: 31 ; AVX512VLDQ-LABEL: v8f32: 37 %tmp = tail call <8 x float> @llvm.copysign.v8f32( <8 x float> %a, <8 x float> %b ) 107 declare <8 x float> @llvm.copysign.v8f32(<8 x float> %Mag, <8 x float> %Sgn)
|
D | vec-copysign.ll | 70 define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind { 71 ; SSE2-LABEL: v8f32: 83 ; AVX-LABEL: v8f32: 90 %tmp = tail call <8 x float> @llvm.copysign.v8f32( <8 x float> %a, <8 x float> %b ) 166 declare <8 x float> @llvm.copysign.v8f32(<8 x float> %Mag, <8 x float> %Sgn)
|
D | fnabs.ll | 54 %fabs = tail call <8 x float> @llvm.fabs.v8f32(< 8 x float> %a) #1 65 %fabs = tail call <8 x float> @llvm.fabs.v8f32(<8 x float> %a) #1 72 declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
|
D | fma4-intrinsics-x86.ll | 67 %1 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) 107 %2 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %1) 148 %2 = call <8 x float> @llvm.fma.v8f32(<8 x float> %1, <8 x float> %a1, <8 x float> %a2) 192 %3 = call <8 x float> @llvm.fma.v8f32(<8 x float> %1, <8 x float> %a1, <8 x float> %2) 237 %1 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) 239 %3 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %2) 286 %1 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) 288 %3 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %2) 307 declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #2
|
D | sqrt-fastmath-tune.ll | 11 declare <8 x float> @llvm.sqrt.v8f32(<8 x float>) #0 52 %call = tail call <8 x float> @llvm.sqrt.v8f32(<8 x float> %f) #1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | fround.ll | 20 …estimated cost of 86 for instruction: %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 31 … estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 42 … estimated cost of 1 for instruction: %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 53 … estimated cost of 1 for instruction: %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 64 … estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 75 … estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 86 … estimated cost of 1 for instruction: %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 96 %V8F32 = call <8 x float> @llvm.ceil.v8f32(<8 x float> undef) 111 …stimated cost of 86 for instruction: %V8F32 = call <8 x float> @llvm.floor.v8f32(<8 x float> undef) 122 …estimated cost of 4 for instruction: %V8F32 = call <8 x float> @llvm.floor.v8f32(<8 x float> undef) [all …]
|
D | arith-fma.ll | 12 … estimated cost of 1 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef,… 23 … estimated cost of 1 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef,… 34 … estimated cost of 1 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef,… 44 %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) 57 declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
|
/external/llvm/test/CodeGen/X86/ |
D | vec_floor.ll | 62 %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) 65 declare <8 x float> @llvm.floor.v8f32(<8 x float> %p) 124 %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 127 declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 186 %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 189 declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 248 %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) 251 declare <8 x float> @llvm.rint.v8f32(<8 x float> %p) 310 %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) 313 declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
|
D | fnabs.ll | 54 %fabs = tail call <8 x float> @llvm.fabs.v8f32(< 8 x float> %a) #1 65 %fabs = tail call <8 x float> @llvm.fabs.v8f32(<8 x float> %a) #1 72 declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 446 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps in getShuffleCost() 539 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, in getCastInstrCost() 544 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, in getCastInstrCost() 554 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, in getCastInstrCost() 556 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, in getCastInstrCost() 583 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, in getCastInstrCost() 590 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, in getCastInstrCost() 595 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, in getCastInstrCost() 602 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, in getCastInstrCost() 612 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, in getCastInstrCost() [all …]
|
D | X86InstrFMA.td | 94 loadv8f32, X86Fmadd, v4f32, v8f32>; 96 loadv8f32, X86Fmsub, v4f32, v8f32>; 99 v4f32, v8f32>; 102 v4f32, v8f32>; 121 loadv8f32, X86Fnmadd, v4f32, v8f32>; 123 loadv8f32, X86Fnmsub, v4f32, v8f32>; 397 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32, 399 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32, 401 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32, 403 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32, [all …]
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 113 v8f32 = 58, // 8 x f32 enumerator 257 return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 || in is256BitVector() 362 case v8f32: in getVectorElementType() 401 case v8f32: in getVectorNumElements() 494 case v8f32: in getSizeInBits() 652 if (NumElements == 8) return MVT::v8f32; in getVectorVT()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/X86/ |
D | x86-avx.ll | 9 ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.floor.v8f32(<8 x float> [[A:%.*]]) 18 ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.ceil.v8f32(<8 x float> [[A:%.*]])
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 155 v8f32 = 91, // 8 x f32 enumerator 361 return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 || in is256BitVector() 500 case v8f32: in getVectorElementType() 560 case v8f32: in getVectorNumElements() 719 case v8f32: in getSizeInBits() 893 if (NumElements == 8) return MVT::v8f32; in getVectorVT()
|
/external/clang/test/CodeGen/ |
D | x86_64-arguments.c | 194 typedef float v8f32 __attribute__((__vector_size__(32))); typedef 196 v8f32 v; 205 v8f32 v[1];
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.round.ll | 56 %result = call <8 x float> @llvm.round.v8f32(<8 x float> %in) #1 64 declare <8 x float> @llvm.round.v8f32(<8 x float>) #1
|