/external/llvm/test/CodeGen/X86/ |
D | avx512-round.ll | 14 %res = call <8 x double> @llvm.floor.v8f64(<8 x double> %a) 17 declare <8 x double> @llvm.floor.v8f64(<8 x double> %p) 30 %res = call <8 x double> @llvm.ceil.v8f64(<8 x double> %a) 33 declare <8 x double> @llvm.ceil.v8f64(<8 x double> %p) 46 %res = call <8 x double> @llvm.trunc.v8f64(<8 x double> %a) 49 declare <8 x double> @llvm.trunc.v8f64(<8 x double> %p) 62 %res = call <8 x double> @llvm.rint.v8f64(<8 x double> %a) 65 declare <8 x double> @llvm.rint.v8f64(<8 x double> %p) 78 %res = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %a) 81 declare <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p)
|
D | fminnum.ll | 15 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) 105 %z = call <8 x double> @llvm.minnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
|
D | fmaxnum.ll | 15 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) 282 %z = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
|
D | masked_memop.ll | 79 …%res = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %addr, i32 4, <8 x i1>%mask, <8 x d… 343 declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) 346 declare void @llvm.masked.store.v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
|
D | avx512-arith.ll | 446 declare <8 x double> @llvm.sqrt.v8f64(<8 x double>) 452 %b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a) 862 %t = call <8 x double> @llvm.fabs.v8f64(<8 x double> %p) 865 declare <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 118 v8f64 = 63, // 8 x f64 enumerator 121 LAST_FP_VECTOR_VALUETYPE = v8f64, 124 LAST_VECTOR_VALUETYPE = v8f64, 257 return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 || in is512BitVector() 360 case v8f64: return f64; in getVectorElementType() 395 case v8f64: return 8; in getVectorNumElements() 495 case v8f64: return 512; in getSizeInBits() 652 if (NumElements == 8) return MVT::v8f64; in getVectorVT()
|
D | ValueTypes.td | 92 def v8f64 : ValueType<512, 63>; // 8 x f64 vector value
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 537 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, in getCastInstrCost() 544 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, in getCastInstrCost() 551 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, in getCastInstrCost() 552 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, in getCastInstrCost() 553 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, in getCastInstrCost() 577 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, in getCastInstrCost() 578 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, in getCastInstrCost() 579 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, in getCastInstrCost() 580 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, in getCastInstrCost() 588 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, in getCastInstrCost() [all …]
|
D | X86CallingConv.td | 68 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 122 CCIfType<[v16f32, v8f64, v16i32, v8i64], 149 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 312 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 332 CCIfType<[v16i32, v8i64, v16f32, v8f64], 372 CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>, 415 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 490 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 507 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 526 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], [all …]
|
D | X86InstrAVX512.td | 376 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>; 377 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>; 378 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>; 379 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>; 380 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>; 385 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>; 389 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>; 395 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>; 399 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>; 405 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>; [all …]
|
D | X86InstrFragmentsSIMD.td | 629 def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>; 638 def extloadv8f32 : PatFrag<(ops node:$ptr), (v8f64 (extloadvf32 node:$ptr))>; 716 (v8f64 (alignedload512 node:$ptr))>; 869 def bc_v8f64 : PatFrag<(ops node:$in), (v8f64 (bitconvert node:$in))>;
|
D | X86RegisterInfo.td | 465 def VR512 : RegisterClass<"X86", [v16f32, v8f64, v64i8, v32i16, v16i32, v8i64],
|
D | X86ISelLowering.cpp | 1312 addRegisterClass(MVT::v8f64, &X86::VR512RegClass); in X86TargetLowering() 1344 setOperationAction(ISD::LOAD, MVT::v8f64, Legal); in X86TargetLowering() 1357 setOperationAction(ISD::FADD, MVT::v8f64, Legal); in X86TargetLowering() 1358 setOperationAction(ISD::FSUB, MVT::v8f64, Legal); in X86TargetLowering() 1359 setOperationAction(ISD::FMUL, MVT::v8f64, Legal); in X86TargetLowering() 1360 setOperationAction(ISD::FDIV, MVT::v8f64, Legal); in X86TargetLowering() 1361 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal); in X86TargetLowering() 1362 setOperationAction(ISD::FNEG, MVT::v8f64, Custom); in X86TargetLowering() 1363 setOperationAction(ISD::FABS, MVT::v8f64, Custom); in X86TargetLowering() 1364 setOperationAction(ISD::FMA, MVT::v8f64, Legal); in X86TargetLowering() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.round.f64.ll | 58 %result = call <8 x double> @llvm.round.v8f64(<8 x double> %in) #1 68 declare <8 x double> @llvm.round.v8f64(<8 x double>) #1
|
D | fminnum.f64.ll | 7 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|
D | fmaxnum.f64.ll | 7 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) #0 48 %val = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b) #0
|
D | fceil64.ll | 9 declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone 79 %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
|
D | ftrunc.f64.ll | 9 declare <8 x double> @llvm.trunc.v8f64(<8 x double>) nounwind readnone 85 %y = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) nounwind readnone
|
D | ffloor.f64.ll | 10 declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone 101 %y = call <8 x double> @llvm.floor.v8f64(<8 x double> %x) nounwind readnone
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | masked-intrinsic-cost.ll | 83 declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) 86 declare void @llvm.masked.store.v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
|
/external/llvm/test/Transforms/LoopVectorize/X86/ |
D | masked_load_store.ll | 200 ;AVX512: call <8 x double> @llvm.masked.load.v8f64 203 ;AVX512: call void @llvm.masked.store.v8f64 441 ;AVX512: call <8 x double> @llvm.masked.load.v8f64 443 ;AVX512: call void @llvm.masked.store.v8f64 512 ;AVX512: call void @llvm.masked.store.v8f64 584 ;AVX512: call void @llvm.masked.store.v8f64
|
/external/llvm/lib/IR/ |
D | ValueTypes.cpp | 190 case MVT::v8f64: return "v8f64"; in getEVTString() 268 case MVT::v8f64: return VectorType::get(Type::getDoubleTy(Context), 8); in getTypeForEVT()
|
/external/llvm/utils/TableGen/ |
D | CodeGenTarget.cpp | 123 case MVT::v8f64: return "MVT::v8f64"; in getEnumName()
|
/external/llvm/include/llvm/IR/ |
D | Intrinsics.td | 214 def llvm_v8f64_ty : LLVMType<v8f64>; // 8 x double
|
/external/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelLowering.cpp | 230 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); in AMDGPUTargetLowering()
|