/external/llvm/test/CodeGen/X86/ |
D | avx512-round.ll | 6 %res = call <16 x float> @llvm.floor.v16f32(<16 x float> %a) 9 declare <16 x float> @llvm.floor.v16f32(<16 x float> %p) 22 %res = call <16 x float> @llvm.ceil.v16f32(<16 x float> %a) 25 declare <16 x float> @llvm.ceil.v16f32(<16 x float> %p) 38 %res = call <16 x float> @llvm.trunc.v16f32(<16 x float> %a) 41 declare <16 x float> @llvm.trunc.v16f32(<16 x float> %p) 54 %res = call <16 x float> @llvm.rint.v16f32(<16 x float> %a) 57 declare <16 x float> @llvm.rint.v16f32(<16 x float> %p) 70 %res = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %a) 73 declare <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p)
|
D | wide-fma-contraction.ll | 25 …%ret = tail call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> … 29 declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>) nounwind readno…
|
D | masked_gather_scatter.ll | 48 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i… 53 declare <16 x float> @llvm.masked.gather.v16f32(<16 x float*>, i32, <16 x i1>, <16 x float>) 97 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> %i… 544 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i… 575 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i… 603 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> un… 665 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> un… 1363 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i… 1747 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %ptrs, i32 4, <16 x i1> %mask, <… 1979 …call void @llvm.masked.scatter.v16f32(<16 x float> %src0, <16 x float*> %ptrs, i32 4, <16 x i1> %m… [all …]
|
D | avx512-arith.ll | 558 declare <16 x float> @llvm.sqrt.v16f32(<16 x float>) 564 %b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a) 1021 %t = call <16 x float> @llvm.fabs.v16f32(<16 x float> %p) 1024 declare <16 x float> @llvm.fabs.v16f32(<16 x float> %p)
|
/external/llvm/test/Transforms/LoopVectorize/X86/ |
D | gather_scatter.ll | 21 ;AVX512: llvm.masked.gather.v16f32 22 ;AVX512: llvm.masked.store.v16f32 99 ;AVX512: llvm.masked.gather.v16f32 100 ;AVX512: llvm.masked.store.v16f32 174 ;AVX512: llvm.masked.gather.v16f32 177 ;AVX512: llvm.masked.scatter.v16f32 236 declare void @llvm.masked.scatter.v16f32(<16 x float>, <16 x float*>, i32, <16 x i1>)
|
D | masked_load_store.ll | 187 ;AVX512: call <16 x float> @llvm.masked.load.v16f32.p0v16f32 189 ;AVX512: call void @llvm.masked.store.v16f32.p0v16f32
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 114 v16f32 = 59, // 16 x f32 enumerator 264 return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 || in is512BitVector() 363 case v16f32: return f32; in getVectorElementType() 394 case v16f32: return 16; in getVectorNumElements() 501 case v16f32: in getSizeInBits() 653 if (NumElements == 16) return MVT::v16f32; in getVectorVT()
|
D | ValueTypes.td | 88 def v16f32 : ValueType<512, 59>; // 16 x f32 vector value
|
/external/llvm/lib/Target/ARM/ |
D | ARMTargetTransformInfo.cpp | 148 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, in getCastInstrCost() 149 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, in getCastInstrCost() 150 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, in getCastInstrCost() 151 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, in getCastInstrCost() 175 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, in getCastInstrCost() 176 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } in getCastInstrCost()
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | masked-intrinsic-cost.ll | 131 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.v, i32 4, <16 x i1> <i1 tru… 149 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.v, i32 4, <16 x i1> %mask, … 167 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.v, i32 4, <16 x i1> %mask, … 188 …%res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i… 280 declare <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.v, i32, <16 x i1> %mask, <16 x f… 290 declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>) 291 declare <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
|
/external/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 68 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 122 CCIfType<[v16f32, v8f64, v16i32, v8i64], 149 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 346 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 366 CCIfType<[v16i32, v8i64, v16f32, v8f64], 406 CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>, 449 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 524 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 541 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 560 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], [all …]
|
D | X86TargetTransformInfo.cpp | 555 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, in getCastInstrCost() 576 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, in getCastInstrCost() 578 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, in getCastInstrCost() 580 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, in getCastInstrCost() 581 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, in getCastInstrCost() 587 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, in getCastInstrCost() 592 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, in getCastInstrCost() 597 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, in getCastInstrCost() 604 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, in getCastInstrCost() 613 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, in getCastInstrCost() [all …]
|
D | X86InstrAVX512.td | 92 // The corresponding float type, e.g. v16f32 for v16i32 382 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>; 383 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>; 384 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>; 385 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>; 386 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>; 387 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>; 392 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>; 394 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>; 402 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>; [all …]
|
D | X86InstrFragmentsSIMD.td | 622 def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>; 701 (v16f32 (alignedload512 node:$ptr))>; 859 def bc_v16f32 : PatFrag<(ops node:$in), (v16f32 (bitconvert node:$in))>;
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | ftrunc.ll | 10 declare <16 x float> @llvm.trunc.v16f32(<16 x float>) nounwind readnone 117 %y = call <16 x float> @llvm.trunc.v16f32(<16 x float> %x) nounwind readnone
|
D | fceil.ll | 10 declare <16 x float> @llvm.ceil.v16f32(<16 x float>) nounwind readnone 129 %y = call <16 x float> @llvm.ceil.v16f32(<16 x float> %x) nounwind readnone
|
D | fminnum.ll | 9 declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>) #0 117 %val = call <16 x float> @llvm.minnum.v16f32(<16 x float> %a, <16 x float> %b) #0
|
D | fmaxnum.ll | 8 declare <16 x float> @llvm.maxnum.v16f32(<16 x float>, <16 x float>) #0 118 %val = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %a, <16 x float> %b) #0
|
/external/llvm/lib/IR/ |
D | ValueTypes.cpp | 191 case MVT::v16f32: return "v16f32"; in getEVTString() 269 case MVT::v16f32: return VectorType::get(Type::getFloatTy(Context), 16); in getTypeForEVT()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64TargetTransformInfo.cpp | 243 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, in getCastInstrCost() 244 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, in getCastInstrCost()
|
/external/llvm/utils/TableGen/ |
D | CodeGenTarget.cpp | 119 case MVT::v16f32: return "MVT::v16f32"; in getEnumName()
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIRegisterInfo.td | 338 def VReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 32, (add VGPR_512)> {
|
D | AMDGPUISelLowering.cpp | 83 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); in AMDGPUTargetLowering() 84 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); in AMDGPUTargetLowering() 168 setOperationAction(ISD::STORE, MVT::v16f32, Promote); in AMDGPUTargetLowering() 169 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); in AMDGPUTargetLowering()
|
D | SIInstructions.td | 2860 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) 2863 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index) 2908 def : BitConvert <v16i32, v16f32, VReg_512>; 2909 def : BitConvert <v16f32, v16i32, VReg_512>; 3337 defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
|
D | SIISelLowering.cpp | 78 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); in SITargetLowering() 139 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { in SITargetLowering() 181 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); in SITargetLowering()
|