/external/llvm/test/CodeGen/ARM/ |
D | vfloatintrinsics.ll | 131 %v4f32 = type <4 x float> 133 define %v4f32 @test_v4f32.sqrt(%v4f32 %a) { 135 %1 = call %v4f32 @llvm.sqrt.v4f32(%v4f32 %a) 136 ret %v4f32 %1 139 define %v4f32 @test_v4f32.powi(%v4f32 %a, i32 %b) { 141 %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b) 142 ret %v4f32 %1 145 define %v4f32 @test_v4f32.sin(%v4f32 %a) { 147 %1 = call %v4f32 @llvm.sin.v4f32(%v4f32 %a) 148 ret %v4f32 %1 [all …]
|
D | 2011-11-29-128bitArithmetics.ll | 20 %1 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0) 25 declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind readonly 52 %1 = call <4 x float> @llvm.cos.v4f32(<4 x float> %0) 57 declare <4 x float> @llvm.cos.v4f32(<4 x float>) nounwind readonly 83 %1 = call <4 x float> @llvm.exp.v4f32(<4 x float> %0) 88 declare <4 x float> @llvm.exp.v4f32(<4 x float>) nounwind readonly 114 %1 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %0) 119 declare <4 x float> @llvm.exp2.v4f32(<4 x float>) nounwind readonly 145 %1 = call <4 x float> @llvm.log10.v4f32(<4 x float> %0) 150 declare <4 x float> @llvm.log10.v4f32(<4 x float>) nounwind readonly [all …]
|
D | spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 21 …%0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %vecptr, i32 1) nounwind ; <<4 x float>> [#use… 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind [all …]
|
D | vrec.ll | 31 %tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1) 39 declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone 55 %tmp3 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 60 declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone 90 %tmp2 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %tmp1) 98 declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone 114 %tmp3 = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 119 declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
D | 2012-01-23-PostRA-LICM.ll | 32 %tmp16 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp11) nounwind 33 …%tmp17 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp16, <4 x float> %tmp11) noun… 35 …%tmp19 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp18, <4 x float> %tmp11) noun… 38 …%tmp22 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp21, <4 x float> undef) nounwi… 51 …%tmp34 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> undef, <4 x float> %tmp28) nounw… 73 …%tmp57 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp56, <4 x float> %tmp55) nounw… 97 declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone 99 declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone 101 declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
|
D | 2009-11-01-NeonMoves.ll | 23 …%8 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %7) nounwind ; <<4 x float>> [#uses… 25 …%10 = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %9, <4 x float> %7) nounwind ; <<4… 38 declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone 40 declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
D | 2012-01-24-RegSequenceLiveRange.ll | 55 tail call void @llvm.arm.neon.vst1.v4f32(i8* undef, <4 x float> %0, i32 4) nounwind 56 tail call void @llvm.arm.neon.vst1.v4f32(i8* undef, <4 x float> %2, i32 4) nounwind 66 tail call void @llvm.arm.neon.vst2.v4f32(i8* %p, <4 x float> undef, <4 x float> undef, i32 4) 71 declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind 72 declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
|
D | fabs-neon.ll | 6 %foo = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) 9 declare <4 x float> @llvm.fabs.v4f32(<4 x float> %a)
|
D | coalesce-subregs.ll | 17 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) 21 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> undef, i32 4) 30 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) 34 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) 36 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> %vld2215, i32 4) 45 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) 55 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) 58 …tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %qq0.0.1.0, <4 x float> %vld2215, i32… 67 declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32) nounwind readonly 68 declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind [all …]
|
D | neon-fma.ll | 17 …%call = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) noun… 22 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
|
D | vcvt.ll | 108 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %tmp1, i32 1) 116 %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %tmp1, i32 1) 124 %tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1) 132 %tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1) 136 declare <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32) nounwind readnone 137 declare <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32) nounwind readnone 138 declare <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone 139 declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
|
D | 2013-02-27-expand-vfma.ll | 13 %tmp = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %b, <4 x float> %c, <4 x float> %a) #2 17 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #1
|
D | 2010-05-21-BuildVector.ll | 39 tail call void @llvm.arm.neon.vst1.v4f32(i8* %20, <4 x float> %19, i32 1) 43 declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind
|
/external/llvm/test/CodeGen/Thumb2/ |
D | thumb2-spill-q.ll | 10 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 21 %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %vecptr, i32 1) nounwind 23 …%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 25 …%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=… 26 %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 28 %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 30 %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 32 %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind 36 %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind [all …]
|
D | machine-licm.ll | 62 %tmp2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %gep1, i32 1) 63 …%tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> <float 1.000000e+00, float 1.00000… 65 call void @llvm.arm.neon.vst1.v4f32(i8* %gep2, <4 x float> %tmp3, i32 1) 76 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly 78 declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind 80 declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
/external/llvm/lib/Target/ARM/ |
D | ARMTargetTransformInfo.cpp | 185 { ISD::FP_EXTEND, MVT::v4f32, 4 } in getCastInstrCost() 223 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, in getCastInstrCost() 224 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, in getCastInstrCost() 232 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, in getCastInstrCost() 233 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, in getCastInstrCost() 234 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, in getCastInstrCost() 235 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, in getCastInstrCost() 236 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost() 237 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, in getCastInstrCost() 247 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, in getCastInstrCost() [all …]
|
D | ARMCallingConv.td | 28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 73 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 89 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 140 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 150 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 168 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 180 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
/external/llvm/test/CodeGen/X86/ |
D | vec_floor.ll | 17 %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) 20 declare <4 x float> @llvm.floor.v4f32(<4 x float> %p) 53 %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 56 declare <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 89 %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 92 declare <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 125 %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) 128 declare <4 x float> @llvm.rint.v4f32(<4 x float> %p) 161 %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) 164 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
|
D | vec_fabs.ll | 17 %t = call <4 x float> @llvm.fabs.v4f32(<4 x float> %p) 20 declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_rounding.ll | 98 declare <4 x float> @llvm.floor.v4f32(<4 x float> %p) 101 %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) 117 declare <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 120 %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) 136 declare <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 139 %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) 155 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) 158 %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | intrinsic-cost.ll | 15 %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load) 25 …estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.… 28 …n estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.… 32 declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 551 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>; 552 def VCMPBFPo : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>; 553 def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>; 554 def VCMPEQFPo : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>; 555 def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>; 556 def VCMPGEFPo : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>; 557 def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>; 558 def VCMPGTFPo : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>; 632 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>; 636 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>; [all …]
|
/external/clang/test/CodeGen/ |
D | x86_64-arguments.c | 155 typedef float v4f32 __attribute__((__vector_size__(16))); typedef 156 v4f32 f25(v4f32 X) { in f25() 179 v4f32 v;
|
/external/llvm/lib/Target/X86/ |
D | X86InstrFMA.td | 78 memopv8f32, X86Fmadd, v4f32, v8f32>; 80 memopv8f32, X86Fmsub, v4f32, v8f32>; 83 v4f32, v8f32>; 86 v4f32, v8f32>; 105 memopv8f32, X86Fnmadd, v4f32, v8f32>; 107 memopv8f32, X86Fnmsub, v4f32, v8f32>; 340 defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32, 342 defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32, 344 defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32, 346 defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32, [all …]
|
D | X86InstrSSE.td | 244 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))), 245 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>; 253 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))), 254 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>; 275 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)), 284 def : Pat<(v4f32 (scalar_to_vector FR32:$src)), 301 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>; 306 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>; 311 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>; 316 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>; [all …]
|