/external/llvm-project/llvm/test/CodeGen/X86/ |
D | vec-strict-512.ll | 5 declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata… 7 declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata… 9 declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata… 11 declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata… 13 declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata) 15 declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(<8 x float>, metadata) 16 declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, meta… 17 declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x doub… 20 declare <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata) 22 declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata) [all …]
|
D | fma-commute-loop.ll | 89 …%tmp38 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> %tmp24, <8 x double> %tmp37, <8 x doub… 90 …%tmp39 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> %tmp28, <8 x double> %tmp37, <8 x doub… 91 …%tmp40 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> %tmp32, <8 x double> %tmp37, <8 x doub… 97 …%tmp46 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> %tmp24, <8 x double> %tmp45, <8 x doub… 98 …%tmp47 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> %tmp28, <8 x double> %tmp45, <8 x doub… 99 …%tmp48 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> %tmp32, <8 x double> %tmp45, <8 x doub… 114 declare <8 x double> @llvm.fmuladd.v8f64(<8 x double>, <8 x double>, <8 x double>)
|
D | vec-copysign-avx512.ll | 50 define <8 x double> @v8f64(<8 x double> %a, <8 x double> %b) nounwind { 51 ; CHECK-LABEL: v8f64: 55 %tmp = tail call <8 x double> @llvm.copysign.v8f64( <8 x double> %a, <8 x double> %b ) 64 declare <8 x double> @llvm.copysign.v8f64(<8 x double> %Mag, <8 x double> %Sgn)
|
D | vmaskmov-offset.ll | 30 declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>) 31 declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) 51 …%masked_loaded_vec = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* nonnull %stac… 52 …call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %masked_loaded_vec, <8 x double>* nonnull …
|
D | vec-strict-inttofp-512.ll | 15 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata) 16 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata) 17 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata) 18 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata) 19 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadat… 20 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadat… 21 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadat… 22 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadat… 23 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadat… 24 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadat… [all …]
|
D | vec-strict-fptoint-512.ll | 7 declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double>, metadata) 8 declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double>, metadata) 11 declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f64(<8 x double>, metadata) 12 declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f64(<8 x double>, metadata) 13 declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f64(<8 x double>, metadata) 14 declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f64(<8 x double>, metadata) 15 declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f64(<8 x double>, metadata) 16 declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f64(<8 x double>, metadata) 17 declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f64(<8 x double>, metadata) 18 declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f64(<8 x double>, metadata) [all …]
|
D | vec-strict-cmp-512.ll | 373 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 398 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 423 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 448 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 473 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 498 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 523 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 548 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 573 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( 598 %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64( [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | avx512-round.ll | 14 %res = call <8 x double> @llvm.floor.v8f64(<8 x double> %a) 17 declare <8 x double> @llvm.floor.v8f64(<8 x double> %p) 30 %res = call <8 x double> @llvm.ceil.v8f64(<8 x double> %a) 33 declare <8 x double> @llvm.ceil.v8f64(<8 x double> %p) 46 %res = call <8 x double> @llvm.trunc.v8f64(<8 x double> %a) 49 declare <8 x double> @llvm.trunc.v8f64(<8 x double> %p) 62 %res = call <8 x double> @llvm.rint.v8f64(<8 x double> %a) 65 declare <8 x double> @llvm.rint.v8f64(<8 x double> %p) 78 %res = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %a) 81 declare <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p)
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 118 v8f64 = 63, // 8 x f64 enumerator 121 LAST_FP_VECTOR_VALUETYPE = v8f64, 124 LAST_VECTOR_VALUETYPE = v8f64, 264 return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 || in is512BitVector() 367 case v8f64: return f64; in getVectorElementType() 402 case v8f64: return 8; in getVectorNumElements() 502 case v8f64: return 512; in getSizeInBits() 659 if (NumElements == 8) return MVT::v8f64; in getVectorVT()
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | fround.ll | 25 …timated cost of 84 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 36 …stimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 47 …stimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 58 …stimated cost of 1 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 69 %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 83 …imated cost of 84 for instruction: %V8F64 = call <8 x double> @llvm.floor.v8f64(<8 x double> undef) 94 …timated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.floor.v8f64(<8 x double> undef) 105 …timated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.floor.v8f64(<8 x double> undef) 116 …timated cost of 1 for instruction: %V8F64 = call <8 x double> @llvm.floor.v8f64(<8 x double> undef) 127 %V8F64 = call <8 x double> @llvm.floor.v8f64(<8 x double> undef) [all …]
|
D | arith-fma.ll | 17 …estimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef… 28 …estimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef… 39 …estimated cost of 1 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef… 50 …%V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> un… 63 declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>)
|
D | fmaxnum-size-latency.ll | 35 …mated cost of 16 for instruction: %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 43 …imated cost of 6 for instruction: %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 50 %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef, <8 x double> undef) 64 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
|
D | fminnum-size-latency.ll | 35 …mated cost of 16 for instruction: %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 43 …imated cost of 6 for instruction: %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 50 %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef, <8 x double> undef) 64 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
|
D | fminnum.ll | 59 …mated cost of 16 for instruction: %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 67 …mated cost of 10 for instruction: %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 75 …imated cost of 6 for instruction: %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 83 …imated cost of 2 for instruction: %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 90 %V8f64 = call <8 x double> @llvm.minnum.v8f64(<8 x double> undef, <8 x double> undef) 133 …d cost of 4 for instruction: %V8f64 = call nnan <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 141 …d cost of 2 for instruction: %V8f64 = call nnan <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 149 …d cost of 1 for instruction: %V8f64 = call nnan <8 x double> @llvm.minnum.v8f64(<8 x double> undef… 156 %V8f64 = call nnan <8 x double> @llvm.minnum.v8f64(<8 x double> undef, <8 x double> undef) 170 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
|
D | fmaxnum.ll | 59 …mated cost of 16 for instruction: %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 67 …mated cost of 10 for instruction: %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 75 …imated cost of 6 for instruction: %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 83 …imated cost of 2 for instruction: %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 90 %V8f64 = call <8 x double> @llvm.maxnum.v8f64(<8 x double> undef, <8 x double> undef) 133 …d cost of 4 for instruction: %V8f64 = call nnan <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 141 …d cost of 2 for instruction: %V8f64 = call nnan <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 149 …d cost of 1 for instruction: %V8f64 = call nnan <8 x double> @llvm.maxnum.v8f64(<8 x double> undef… 156 %V8f64 = call nnan <8 x double> @llvm.maxnum.v8f64(<8 x double> undef, <8 x double> undef) 170 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
|
D | reduce-fmax.ll | 17 …ted cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fmax.v8f64(<8 x double> undef) 25 …ted cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fmax.v8f64(<8 x double> undef) 33 …ted cost of 6 for instruction: %V8 = call double @llvm.vector.reduce.fmax.v8f64(<8 x double> undef) 40 %V8 = call double @llvm.vector.reduce.fmax.v8f64(<8 x double> undef) 85 declare double @llvm.vector.reduce.fmax.v8f64(<8 x double>)
|
D | reduce-fmin.ll | 17 …ted cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fmin.v8f64(<8 x double> undef) 25 …ted cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fmin.v8f64(<8 x double> undef) 33 …ted cost of 6 for instruction: %V8 = call double @llvm.vector.reduce.fmin.v8f64(<8 x double> undef) 40 %V8 = call double @llvm.vector.reduce.fmin.v8f64(<8 x double> undef) 85 declare double @llvm.vector.reduce.fmin.v8f64(<8 x double>)
|
D | reduce-fadd.ll | 17 …ted cost of 8 for instruction: %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x … 25 …ted cost of 8 for instruction: %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x … 33 …ted cost of 8 for instruction: %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x … 41 …ted cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x … 49 …ted cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x … 57 …ted cost of 4 for instruction: %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x … 65 …ted cost of 6 for instruction: %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x … 72 %V8 = call double @llvm.vector.reduce.fadd.v8f64(double %arg, <8 x double> undef) 153 declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>)
|
/external/llvm-project/llvm/test/Transforms/LowerMatrixIntrinsics/ |
D | remarks-shared-subtrees.ll | 90 …%shared.load = tail call <8 x double> @llvm.matrix.column.major.load.v8f64.p0f64(double* %arg1, i6… 92 …%tmp17 = tail call <8 x double> @llvm.matrix.transpose.v8f64(<8 x double> %shared.load, i32 2, i32… 93 …tail call void @llvm.matrix.column.major.store.v8f64.p0f64(<8 x double> %tmp17, double* %arg3, i64… 95 …%tmp48 = tail call <60 x double> @llvm.matrix.multiply.v60f64.v8f64.v30f64(<8 x double> %tmp17, <3… 101 declare <8 x double> @llvm.matrix.transpose.v8f64(<8 x double>, i32 immarg, i32 immarg) 102 declare <8 x double> @llvm.matrix.column.major.load.v8f64.p0f64(double*, i64, i1 immarg, i32 immarg… 106 declare void @llvm.matrix.column.major.store.v8f64.p0f64(<8 x double>, double* writeonly, i64, i1 i… 107 declare <60 x double> @llvm.matrix.multiply.v60f64.v8f64.v30f64(<8 x double>, <30 x double>, i32 im…
|
D | strided-load-double.ll | 46 declare <8 x double> @llvm.matrix.column.major.load.v8f64(double*, i64, i1, i32, i32) 47 ; CHECK: declare <8 x double> @llvm.matrix.column.major.load.v8f64(double* nocapture, i64, i1 immar… 64 …%load = call <8 x double> @llvm.matrix.column.major.load.v8f64(double* %in, i64 %stride, i1 false,…
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 142 v8f64 = 87, // 8 x f64 enumerator 145 LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v8f64, 148 LAST_FIXEDLEN_VECTOR_VALUETYPE = v8f64, 368 SimpleTy == MVT::v8f64 || SimpleTy == MVT::v512i1 || in is512BitVector() 539 case v8f64: in getVectorElementType() 605 case v8f64: in getVectorNumElements() 797 case v8f64: return TypeSize::Fixed(512); in getSizeInBits() 1012 if (NumElements == 8) return MVT::v8f64; in getVectorVT()
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 540 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, in getCastInstrCost() 547 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, in getCastInstrCost() 554 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, in getCastInstrCost() 555 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, in getCastInstrCost() 556 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, in getCastInstrCost() 575 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, in getCastInstrCost() 577 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, in getCastInstrCost() 579 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, in getCastInstrCost() 582 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, in getCastInstrCost() 584 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, in getCastInstrCost() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.round.f64.ll | 58 %result = call <8 x double> @llvm.round.v8f64(<8 x double> %in) #1 68 declare <8 x double> @llvm.round.v8f64(<8 x double>) #1
|
D | fminnum.f64.ll | 7 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|
D | fmaxnum.f64.ll | 7 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|