/external/llvm/test/CodeGen/X86/ |
D | avx512-round.ll | 14 %res = call <8 x double> @llvm.floor.v8f64(<8 x double> %a) 17 declare <8 x double> @llvm.floor.v8f64(<8 x double> %p) 30 %res = call <8 x double> @llvm.ceil.v8f64(<8 x double> %a) 33 declare <8 x double> @llvm.ceil.v8f64(<8 x double> %p) 46 %res = call <8 x double> @llvm.trunc.v8f64(<8 x double> %a) 49 declare <8 x double> @llvm.trunc.v8f64(<8 x double> %p) 62 %res = call <8 x double> @llvm.rint.v8f64(<8 x double> %a) 65 declare <8 x double> @llvm.rint.v8f64(<8 x double> %p) 78 %res = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %a) 81 declare <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p)
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86InstrVecCompiler.td | 87 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>; 88 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>; 89 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>; 90 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>; 91 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>; 96 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>; 100 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>; 106 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>; 110 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>; 115 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>; [all …]
|
D | X86TargetTransformInfo.cpp | 490 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ in getArithmeticInstrCost() 491 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ in getArithmeticInstrCost() 492 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ in getArithmeticInstrCost() 900 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd in getShuffleCost() 905 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd in getShuffleCost() 910 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd in getShuffleCost() 924 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd in getShuffleCost() 1154 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, in getCastInstrCost() 1161 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, in getCastInstrCost() 1168 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, in getCastInstrCost() [all …]
|
D | X86CallingConv.td | 121 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 151 CCIfType<[v16i32, v8i64, v16f32, v8f64], CCAssignToStack<64, 64>> 195 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 248 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 303 CCIfType<[v16f32, v8f64, v16i32, v8i64], 550 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 570 CCIfType<[v16i32, v8i64, v16f32, v8f64], 613 CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>, 673 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 731 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vmaskmov-offset.ll | 24 declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>) 25 declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) 39 …%masked_loaded_vec = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* nonnull %stac… 40 …call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %masked_loaded_vec, <8 x double>* nonnull …
|
D | vec-copysign-avx512.ll | 88 define <8 x double> @v8f64(<8 x double> %a, <8 x double> %b) nounwind { 89 ; AVX512VL-LABEL: v8f64: 96 ; AVX512VLDQ-LABEL: v8f64: 102 %tmp = tail call <8 x double> @llvm.copysign.v8f64( <8 x double> %a, <8 x double> %b ) 111 declare <8 x double> @llvm.copysign.v8f64(<8 x double> %Mag, <8 x double> %Sgn)
|
D | avx512-rndscale.ll | 8 declare <8 x double> @llvm.floor.v8f64(<8 x double> %p) 14 declare <8 x double> @llvm.ceil.v8f64(<8 x double> %p) 20 declare <8 x double> @llvm.trunc.v8f64(<8 x double> %p) 26 declare <8 x double> @llvm.rint.v8f64(<8 x double> %p) 32 declare <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) 76 %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) 135 %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) 209 %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) 282 %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) 359 %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) [all …]
|
D | bitcast-setcc-512.ll | 359 define i8 @v8f64(<8 x double> %a, <8 x double> %b) { 360 ; SSE-LABEL: v8f64: 374 ; AVX1-LABEL: v8f64: 388 ; AVX2-LABEL: v8f64: 399 ; AVX512F-LABEL: v8f64: 407 ; AVX512BW-LABEL: v8f64:
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 118 v8f64 = 63, // 8 x f64 enumerator 121 LAST_FP_VECTOR_VALUETYPE = v8f64, 124 LAST_VECTOR_VALUETYPE = v8f64, 264 return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 || in is512BitVector() 367 case v8f64: return f64; in getVectorElementType() 402 case v8f64: return 8; in getVectorNumElements() 502 case v8f64: return 512; in getSizeInBits() 659 if (NumElements == 8) return MVT::v8f64; in getVectorVT()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | fround.ll | 25 …timated cost of 84 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 36 …stimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 47 …stimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 58 …stimated cost of 1 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 69 …stimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 80 …stimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 91 …stimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 102 %V8F64 = call <8 x double> @llvm.ceil.v8f64(<8 x double> undef) 116 …imated cost of 84 for instruction: %V8F64 = call <8 x double> @llvm.floor.v8f64(<8 x double> undef) 127 …timated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.floor.v8f64(<8 x double> undef) [all …]
|
D | arith-fma.ll | 17 …estimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef… 28 …estimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef… 39 …estimated cost of 1 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef… 50 …%V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> un… 63 declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>)
|
D | arith-fp.ll | 513 …imated cost of 128 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 524 …imated cost of 128 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 535 …timated cost of 86 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 546 …timated cost of 56 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 557 …stimated cost of 1 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 568 …imated cost of 280 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 579 …imated cost of 268 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 590 …timated cost of 86 for instruction: %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 601 %V8F64 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> undef) 615 …stimated cost of 8 for instruction: %V8F64 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef) [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 540 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, in getCastInstrCost() 547 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, in getCastInstrCost() 554 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, in getCastInstrCost() 555 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, in getCastInstrCost() 556 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, in getCastInstrCost() 575 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, in getCastInstrCost() 577 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, in getCastInstrCost() 579 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, in getCastInstrCost() 582 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, in getCastInstrCost() 584 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, in getCastInstrCost() [all …]
|
D | X86CallingConv.td | 68 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 122 CCIfType<[v16f32, v8f64, v16i32, v8i64], 149 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 346 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 366 CCIfType<[v16i32, v8i64, v16f32, v8f64], 406 CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>, 449 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 524 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 541 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 560 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 160 v8f64 = 96, // 8 x f64 enumerator 368 return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 || in is512BitVector() 510 case v8f64: in getVectorElementType() 561 case v8f64: in getVectorNumElements() 733 case v8f64: in getSizeInBits() 900 if (NumElements == 8) return MVT::v8f64; in getVectorVT()
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.round.f64.ll | 58 %result = call <8 x double> @llvm.round.v8f64(<8 x double> %in) #1 68 declare <8 x double> @llvm.round.v8f64(<8 x double>) #1
|
D | fmaxnum.f64.ll | 7 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|
D | fminnum.f64.ll | 7 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|
D | fceil64.ll | 9 declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone 78 %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
|
D | ftrunc.f64.ll | 9 declare <8 x double> @llvm.trunc.v8f64(<8 x double>) nounwind readnone 85 %y = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) nounwind readnone
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | llvm.round.f64.ll | 58 %result = call <8 x double> @llvm.round.v8f64(<8 x double> %in) #1 68 declare <8 x double> @llvm.round.v8f64(<8 x double>) #1
|
D | fmaxnum.f64.ll | 7 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|
D | fminnum.f64.ll | 7 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|
D | ftrunc.f64.ll | 9 declare <8 x double> @llvm.trunc.v8f64(<8 x double>) nounwind readnone 85 %y = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) nounwind readnone
|
D | fceil64.ll | 9 declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone 80 %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
|