| /external/llvm/test/CodeGen/PowerPC/ |
| D | 2007-03-30-SpillerCrash.ll | 3 define void @test(<4 x float>*, { { i16, i16, i32 } }*) { 5 …%.sub7896 = getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 0 ; <<4 x i32>*> [#u… 6 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 175, i32 3 ;… 7 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 2 ;… 8 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 3 ;… 9 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 1 ;… 10 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 2 ;… 11 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 3 ;… 12 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 1 ;… 13 …getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 2 ;… [all …]
|
| D | p9-xxinsertw-xxextractuw.ll | 6 define <4 x float> @_Z7testInsILj0ELj0EDv4_fET1_S1_S1_(<4 x float> %a, <4 x float> %b) { 14 %vecins = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 2, i32 3> 15 ret <4 x float> %vecins 18 define <4 x float> @_Z7testInsILj0ELj1EDv4_fET1_S1_S1_(<4 x float> %a, <4 x float> %b) { 26 %vecins = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 5, i32 1, i32 2, i32 3> 27 ret <4 x float> %vecins 30 define <4 x float> @_Z7testInsILj0ELj2EDv4_fET1_S1_S1_(<4 x float> %a, <4 x float> %b) { 38 %vecins = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 1, i32 2, i32 3> 39 ret <4 x float> %vecins 42 define <4 x float> @_Z7testInsILj0ELj3EDv4_fET1_S1_S1_(<4 x float> %a, <4 x float> %b) { [all …]
|
| D | ppc64-fastcc.ll | 7 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 15 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 19 ; CHECK: mr 3, 4 23 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 31 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 39 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 47 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 55 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 63 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… 71 …4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, dou… [all …]
|
| /external/llvm/test/CodeGen/X86/ |
| D | 2008-07-19-movups-spills.ll | 7 @0 = external global <4 x float>, align 1 ; <<4 x float>*>:0 [#uses=2] 8 @1 = external global <4 x float>, align 1 ; <<4 x float>*>:1 [#uses=1] 9 @2 = external global <4 x float>, align 1 ; <<4 x float>*>:2 [#uses=1] 10 @3 = external global <4 x float>, align 1 ; <<4 x float>*>:3 [#uses=1] 11 @4 = external global <4 x float>, align 1 ; <<4 x float>*>:4 [#uses=1] 12 @5 = external global <4 x float>, align 1 ; <<4 x float>*>:5 [#uses=1] 13 @6 = external global <4 x float>, align 1 ; <<4 x float>*>:6 [#uses=1] 14 @7 = external global <4 x float>, align 1 ; <<4 x float>*>:7 [#uses=1] 15 @8 = external global <4 x float>, align 1 ; <<4 x float>*>:8 [#uses=1] 16 @9 = external global <4 x float>, align 1 ; <<4 x float>*>:9 [#uses=1] [all …]
|
| D | 2007-04-24-VectorCrash.ll | 5 declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) 9 …4 x i32> zeroinitializer, and (<4 x i32> bitcast (<4 x float> shufflevector (<4 x float> undef, <4… 10 bitcast <4 x i32> %0 to <4 x float> ; <<4 x float>>:1 [#uses=1] 11 fsub <4 x float> %1, zeroinitializer ; <<4 x float>>:2 [#uses=1] 12 …fsub <4 x float> shufflevector (<4 x float> undef, <4 x float> undef, <4 x i32> zeroinitializer), … 13 …shufflevector <4 x float> zeroinitializer, <4 x float> %3, <4 x i32> < i32 0, i32 5, i32 6, i32 7 … 14 …shufflevector <4 x float> zeroinitializer, <4 x float> %4, <4 x i32> < i32 0, i32 5, i32 6, i32 7 … 15 …shufflevector <4 x float> zeroinitializer, <4 x float> %5, <4 x i32> < i32 0, i32 1, i32 2, i32 7 … 16 …shufflevector <4 x float> %6, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 … 17 …shufflevector <4 x float> %7, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 … [all …]
|
| D | 2006-05-01-SchedCausingSpills.ll | 7 define i32 @foo(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) { 8 %tmp44 = load <4 x float>, <4 x float>* %a ; <<4 x float>> [#uses=9] 9 %tmp46 = load <4 x float>, <4 x float>* %b ; <<4 x float>> [#uses=1] 10 %tmp48 = load <4 x float>, <4 x float>* %c ; <<4 x float>> [#uses=1] 11 %tmp50 = load <4 x float>, <4 x float>* %d ; <<4 x float>> [#uses=1] 12 %tmp51 = bitcast <4 x float> %tmp44 to <4 x i32> ; <<4 x i32>> [#uses=1] 13 …%tmp = shufflevector <4 x i32> %tmp51, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > … 14 %tmp52 = bitcast <4 x i32> %tmp to <4 x float> ; <<4 x float>> [#uses=1] 15 …%tmp60 = xor <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 … 16 %tmp61 = bitcast <4 x i32> %tmp60 to <4 x float> ; <<4 x float>> [#uses=1] [all …]
|
| D | sha.ll | 4 declare <4 x i32> @llvm.x86.sha1rnds4(<4 x i32>, <4 x i32>, i8) nounwind readnone 6 define <4 x i32> @test_sha1rnds4rr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable { 8 %0 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a, <4 x i32> %b, i8 3) 9 ret <4 x i32> %0 14 define <4 x i32> @test_sha1rnds4rm(<4 x i32> %a, <4 x i32>* %b) nounwind uwtable { 16 %0 = load <4 x i32>, <4 x i32>* %b 17 %1 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a, <4 x i32> %0, i8 3) 18 ret <4 x i32> %1 23 declare <4 x i32> @llvm.x86.sha1nexte(<4 x i32>, <4 x i32>) nounwind readnone 25 define <4 x i32> @test_sha1nexterr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable { [all …]
|
| D | pr24139.ll | 10 define <2 x double> @PR24139(<2 x double> %arg, <2 x double> %arg1, <2 x double> %arg2) { 11 %tmp = bitcast <2 x double> %arg to <4 x float> 12 …%tmp3 = fmul <4 x float> %tmp, <float 0x3FE45F3060000000, float 0x3FE45F3060000000, float 0x3FE45F… 13 %tmp4 = bitcast <2 x double> %arg to <4 x i32> 14 %tmp5 = and <4 x i32> %tmp4, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648> 15 %tmp6 = or <4 x i32> %tmp5, <i32 1056964608, i32 1056964608, i32 1056964608, i32 1056964608> 16 %tmp7 = bitcast <4 x i32> %tmp6 to <4 x float> 17 %tmp8 = fadd <4 x float> %tmp3, %tmp7 18 %tmp9 = tail call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp8) #2 19 %tmp10 = bitcast <4 x i32> %tmp9 to <2 x i64> [all …]
|
| D | avx512vl-vec-cmp.ll | 4 define <4 x i64> @test256_1(<4 x i64> %x, <4 x i64> %y) nounwind { 10 %mask = icmp eq <4 x i64> %x, %y 11 %max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y 12 ret <4 x i64> %max 15 define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind { 21 %mask = icmp sgt <4 x i64> %x, %y 22 %max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y 23 ret <4 x i64> %max 26 define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind { 32 %mask = icmp sge <8 x i32> %x, %y [all …]
|
| /external/llvm/test/Transforms/InstCombine/ |
| D | fold-vector-select.ll | 5 define void @foo(<4 x i32> *%A, <4 x i32> *%B, <4 x i32> *%C, <4 x i32> *%D, 6 <4 x i32> *%E, <4 x i32> *%F, <4 x i32> *%G, <4 x i32> *%H, 7 <4 x i32> *%I, <4 x i32> *%J, <4 x i32> *%K, <4 x i32> *%L, 8 <4 x i32> *%M, <4 x i32> *%N, <4 x i32> *%O, <4 x i32> *%P, 9 <4 x i32> *%Q, <4 x i32> *%R, <4 x i32> *%S, <4 x i32> *%T, 10 <4 x i32> *%U, <4 x i32> *%V, <4 x i32> *%W, <4 x i32> *%X, 11 <4 x i32> *%Y, <4 x i32> *%Z, <4 x i32> *%BA, <4 x i32> *%BB, 12 <4 x i32> *%BC, <4 x i32> *%BD, <4 x i32> *%BE, <4 x i32> *%BF, 13 <4 x i32> *%BG, <4 x i32> *%BH, <4 x i32> *%BI, <4 x i32> *%BJ, 14 <4 x i32> *%BK, <4 x i32> *%BL, <4 x i32> *%BM, <4 x i32> *%BN, [all …]
|
| D | x86-vperm2.ll | 5 define <4 x double> @perm2pd_non_const_imm(<4 x double> %a0, <4 x double> %a1, i8 %b) { 6 …%res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 %b… 7 ret <4 x double> %res 10 ; CHECK-NEXT: call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1… 11 ; CHECK-NEXT: ret <4 x double> 15 ; In the following 4 tests, both zero mask bits of the immediate are set. 17 define <4 x double> @perm2pd_0x88(<4 x double> %a0, <4 x double> %a1) { 18 …%res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 13… 19 ret <4 x double> %res 22 ; CHECK-NEXT: ret <4 x double> zeroinitializer [all …]
|
| D | x86-insertps.ll | 3 declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone 7 define <4 x float> @insertps_non_const_imm(<4 x float> %v1, <4 x float> %v2, i8 %c) { 8 %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 %c) 9 ret <4 x float> %res 12 ; CHECK-NEXT: call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 %c) 13 ; CHECK-NEXT: ret <4 x float> 18 define <4 x float> @insertps_0x0f(<4 x float> %v1, <4 x float> %v2) { 19 %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 15) 20 ret <4 x float> %res 23 ; CHECK-NEXT: ret <4 x float> zeroinitializer [all …]
|
| D | vec_shuffle.ll | 3 define <4 x float> @test1(<4 x float> %v1) { 5 ; CHECK: ret <4 x float> %v1 6 %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 7 ret <4 x float> %v2 10 define <4 x float> @test2(<4 x float> %v1) { 12 ; CHECK: ret <4 x float> %v1 13 %v2 = shufflevector <4 x float> %v1, <4 x float> %v1, <4 x i32> <i32 0, i32 5, i32 2, i32 7> 14 ret <4 x float> %v2 17 define float @test3(<4 x float> %A, <4 x float> %B, float %f) { 20 %C = insertelement <4 x float> %A, float %f, i32 0 [all …]
|
| D | x86-sse.ll | 7 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float %a, i32 0 8 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> [[TMP1]]) 9 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[TMP2]], i32 0 12 %1 = insertelement <4 x float> undef, float %a, i32 0 13 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1 14 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2 15 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3 16 %5 = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %4) 17 %6 = extractelement <4 x float> %5, i32 0 25 %1 = insertelement <4 x float> undef, float %a, i32 0 [all …]
|
| D | vector-mul.ll | 6 define <4 x i8> @Zero_i8(<4 x i8> %InVec) { 8 %mul = mul <4 x i8> %InVec, <i8 0, i8 0, i8 0, i8 0> 9 ret <4 x i8> %mul 13 ; CHECK: ret <4 x i8> zeroinitializer 15 define <4 x i8> @Identity_i8(<4 x i8> %InVec) { 17 %mul = mul <4 x i8> %InVec, <i8 1, i8 1, i8 1, i8 1> 18 ret <4 x i8> %mul 22 ; CHECK: ret <4 x i8> %InVec 24 define <4 x i8> @AddToSelf_i8(<4 x i8> %InVec) { 26 %mul = mul <4 x i8> %InVec, <i8 2, i8 2, i8 2, i8 2> [all …]
|
| /external/webrtc/data/voice_engine/stereo_rtp_files/ |
| D | stereo_g729.rtp | 2 …4���EI��Xp2�@��agp`B�ђ��k�������Vx�<B��X�<4���I��X��u��B�ax��<�܊��x��… 3 …Z�[VxB�\Kzh�<4P���I��Xx�W<���(���A������0�I����Vr��Ğ���V<4d��eI��XG���:…
|
| D | stereo_g729_jitter.rtp | 2 …4���EI��Xp2�@��agp`B�ђ��k�������Vx�<B��X�<42���I��X��u��B�ax��<�܊��x��… 3 …4g���I��X0B�1H�]6�XtX�����kPtv�<����V`B�L':V<4����I��Xx�W<���(���A������0�…
|
| /external/llvm/test/CodeGen/AArch64/ |
| D | arm64-vext_reverse.ll | 6 define <4 x i16> @vext_6701_0(<4 x i16> %a1, <4 x i16> %a2) { 9 ; CHECK: ext v0.8b, v1.8b, v0.8b, #4 10 %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 7, i32 0, i32 1> 11 ret <4 x i16> %x 14 define <4 x i16> @vext_6701_12(<4 x i16> %a1, <4 x i16> %a2) { 17 ; CHECK: ext v0.8b, v0.8b, v0.8b, #4 18 %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1> 19 ret <4 x i16> %x 22 define <4 x i16> @vext_6701_13(<4 x i16> %a1, <4 x i16> %a2) { 25 ; CHECK: ext v0.8b, v1.8b, v0.8b, #4 [all …]
|
| D | fp16-vector-load-store.ll | 4 define <4 x half> @load_64(<4 x half>* nocapture readonly %a) #0 { 8 %0 = load <4 x half>, <4 x half>* %a, align 8 9 ret <4 x half> %0 13 define <8 x half> @load_128(<8 x half>* nocapture readonly %a) #0 { 17 %0 = load <8 x half>, <8 x half>* %a, align 16 18 ret <8 x half> %0 22 define <4 x half> @load_dup_64(half* nocapture readonly %a) #0 { 24 ; CHECK: ld1r { v0.4h }, [x0] 27 %1 = insertelement <4 x half> undef, half %0, i32 0 28 %2 = shufflevector <4 x half> %1, <4 x half> undef, <4 x i32> zeroinitializer [all …]
|
| D | arm64-vmul.ll | 4 define <8 x i16> @smull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind { 7 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp2 = load <8 x i8>, <8 x i8>* %B 9 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) 10 ret <8 x i16> %tmp3 13 define <4 x i32> @smull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind { 15 ;CHECK: smull.4s 16 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp2 = load <4 x i16>, <4 x i16>* %B 18 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) [all …]
|
| /external/llvm/test/CodeGen/AMDGPU/ |
| D | pv.ll | 4 ; CHECK: MAX T{{[0-9].[XYZW]}}, 0.0, PV.X 6 …4 x float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3,… 8 %0 = extractelement <4 x float> %reg1, i32 0 9 %1 = extractelement <4 x float> %reg1, i32 1 10 %2 = extractelement <4 x float> %reg1, i32 2 11 %3 = extractelement <4 x float> %reg1, i32 3 12 %4 = extractelement <4 x float> %reg2, i32 0 13 %5 = extractelement <4 x float> %reg2, i32 1 14 %6 = extractelement <4 x float> %reg2, i32 2 15 %7 = extractelement <4 x float> %reg2, i32 3 [all …]
|
| D | r600-export-fix.ll | 13 define amdgpu_vs void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) { 15 %0 = extractelement <4 x float> %reg1, i32 0 16 %1 = extractelement <4 x float> %reg1, i32 1 17 %2 = extractelement <4 x float> %reg1, i32 2 18 %3 = extractelement <4 x float> %reg1, i32 3 19 …%4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 20 %5 = extractelement <4 x float> %4, i32 0 22 …%7 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 23 %8 = extractelement <4 x float> %7, i32 1 25 …%10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 … [all …]
|
| D | fetch-limits.r700+.ll | 21 %0 = load <4 x float>, <4 x float> addrspace(8)* null 22 …%1 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 23 …%2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 24 …%3 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 25 …%4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 26 …%5 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 27 …%6 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 28 …%7 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 29 …%8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… 30 …%9 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x… [all …]
|
| /external/llvm/test/CodeGen/SystemZ/ |
| D | vec-cmp-03.ll | 6 define <4 x i32> @f1(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) { 10 %cmp = icmp eq <4 x i32> %val1, %val2 11 %ret = sext <4 x i1> %cmp to <4 x i32> 12 ret <4 x i32> %ret 16 define <4 x i32> @f2(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) { 21 %cmp = icmp ne <4 x i32> %val1, %val2 22 %ret = sext <4 x i1> %cmp to <4 x i32> 23 ret <4 x i32> %ret 27 define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) { 31 %cmp = icmp sgt <4 x i32> %val1, %val2 [all …]
|
| /external/llvm/test/CodeGen/ARM/ |
| D | vqdmul.ll | 5 define <4 x i16> @vqdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 8 %tmp1 = load <4 x i16>, <4 x i16>* %A 9 %tmp2 = load <4 x i16>, <4 x i16>* %B 10 %tmp3 = call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 11 ret <4 x i16> %tmp3 14 define <2 x i32> @vqdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind { 17 %tmp1 = load <2 x i32>, <2 x i32>* %A 18 %tmp2 = load <2 x i32>, <2 x i32>* %B 19 %tmp3 = call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 20 ret <2 x i32> %tmp3 [all …]
|