/external/llvm/test/CodeGen/PowerPC/ |
D | vec_rounding.ll | 107 declare <8 x float> @llvm.floor.v8f32(<8 x float> %p) 110 %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) 126 declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 129 %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 145 declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 148 %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 164 declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) 167 %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
|
D | vec_sqrt.ll | 11 declare <8 x float> @llvm.sqrt.v8f32(<8 x float> %val) 40 %sqrt = call <8 x float> @llvm.sqrt.v8f32 (<8 x float> %x)
|
D | vec_fmuladd.ll | 8 declare <8 x float> @llvm.fmuladd.v8f32(<8 x float> %val, <8 x float>, <8 x float>) 31 %fmuladd = call <8 x float> @llvm.fmuladd.v8f32 (<8 x float> %x, <8 x float> %x, <8 x float> %x)
|
D | fminnum.ll | 12 declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>) 84 %z = call <8 x float> @llvm.minnum.v8f32(<8 x float> %x, <8 x float> %y) readnone
|
D | fmaxnum.ll | 12 declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>) 84 %z = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %x, <8 x float> %y) readnone
|
/external/llvm/test/CodeGen/X86/ |
D | vec_floor.ll | 35 %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) 38 declare <8 x float> @llvm.floor.v8f32(<8 x float> %p) 71 %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 74 declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p) 107 %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 110 declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p) 143 %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) 146 declare <8 x float> @llvm.rint.v8f32(<8 x float> %p) 179 %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) 182 declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
|
D | fnabs.ll | 54 %fabs = tail call <8 x float> @llvm.fabs.v8f32(< 8 x float> %a) #1 65 %fabs = tail call <8 x float> @llvm.fabs.v8f32(<8 x float> %a) #1 72 declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
|
D | vec_fabs.ll | 35 %t = call <8 x float> @llvm.fabs.v8f32(<8 x float> %p) 38 declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
|
D | sqrt-fastmath.ll | 10 declare <8 x float> @llvm.sqrt.v8f32(<8 x float>) #0 134 %sqrt = tail call <8 x float> @llvm.sqrt.v8f32(<8 x float> %x)
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 446 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps in getShuffleCost() 540 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, in getCastInstrCost() 547 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, in getCastInstrCost() 551 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, in getCastInstrCost() 553 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, in getCastInstrCost() 586 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, in getCastInstrCost() 592 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, in getCastInstrCost() 593 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, in getCastInstrCost() 606 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, in getCastInstrCost() 635 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, in getCastInstrCost() [all …]
|
D | X86InstrFMA.td | 94 loadv8f32, X86Fmadd, v4f32, v8f32>; 96 loadv8f32, X86Fmsub, v4f32, v8f32>; 99 v4f32, v8f32>; 102 v4f32, v8f32>; 121 loadv8f32, X86Fnmadd, v4f32, v8f32>; 123 loadv8f32, X86Fnmsub, v4f32, v8f32>; 397 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32, 399 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32, 401 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32, 403 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32, [all …]
|
D | X86CallingConv.td | 62 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 118 CCIfType<[v8f32, v4f64, v8i32, v4i64], 145 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 306 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 328 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 369 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>, 411 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 486 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 502 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 521 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], [all …]
|
D | X86InstrSSE.td | 342 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))), 343 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>; 365 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>; 375 def : Pat<(v8f32 (scalar_to_vector FR32:$src)), 423 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>; 428 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>; 429 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>; 430 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>; 431 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>; 432 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>; [all …]
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 113 v8f32 = 58, // 8 x f32 enumerator 250 return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 || in is256BitVector() 355 case v8f32: in getVectorElementType() 394 case v8f32: in getVectorNumElements() 487 case v8f32: in getSizeInBits() 645 if (NumElements == 8) return MVT::v8f32; in getVectorVT()
|
/external/clang/test/CodeGen/ |
D | x86_64-arguments.c | 194 typedef float v8f32 __attribute__((__vector_size__(32))); typedef 196 v8f32 v; 205 v8f32 v[1];
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.round.ll | 56 %result = call <8 x float> @llvm.round.v8f32(<8 x float> %in) #1 64 declare <8 x float> @llvm.round.v8f32(<8 x float>) #1
|
D | ftrunc.ll | 9 declare <8 x float> @llvm.trunc.v8f32(<8 x float>) nounwind readnone 78 %y = call <8 x float> @llvm.trunc.v8f32(<8 x float> %x) nounwind readnone
|
D | fceil.ll | 9 declare <8 x float> @llvm.ceil.v8f32(<8 x float>) nounwind readnone 86 %y = call <8 x float> @llvm.ceil.v8f32(<8 x float> %x) nounwind readnone
|
/external/llvm/lib/Target/ARM/ |
D | ARMTargetTransformInfo.cpp | 118 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, in getCastInstrCost() 119 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, in getCastInstrCost() 120 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, in getCastInstrCost() 121 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, in getCastInstrCost() 147 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, in getCastInstrCost() 148 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, in getCastInstrCost()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64TargetTransformInfo.cpp | 237 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, in getCastInstrCost() 238 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, in getCastInstrCost() 239 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, in getCastInstrCost() 240 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, in getCastInstrCost()
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-fmuladd.ll | 44 …%tmp4 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %tmp1, <8 x float> %tmp2, <8 x float> %tm… 85 declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | masked-intrinsic-cost.ll | 32 …%res = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %addr, i32 4, <8 x i1>%mask, <8 x flo… 80 declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
|
/external/llvm/lib/IR/ |
D | ValueTypes.cpp | 185 case MVT::v8f32: return "v8f32"; in getEVTString() 263 case MVT::v8f32: return VectorType::get(Type::getFloatTy(Context), 8); in getTypeForEVT()
|
/external/llvm/test/CodeGen/ARM/ |
D | fusedMAC.ll | 215 …%call = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) noun… 224 declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIRegisterInfo.td | 211 def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 32, (add SGPR_256)> { 239 def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 32, (add VGPR_256)> {
|