/external/llvm/test/CodeGen/ARM/ |
D | vshift.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 44 ret <8 x i8> %tmp2 [all …]
|
D | vbits.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <16 x i8>* %B 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
D | vshl.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vsub.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = sub <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = sub <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = sub <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = sub <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <2 x float>* %B 44 %tmp3 = fsub <2 x float> %tmp1, %tmp2 [all …]
|
D | vadd.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = add <8 x i8> %tmp1, %tmp2 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = add <4 x i16> %tmp1, %tmp2 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = add <2 x i32> %tmp1, %tmp2 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = add <1 x i64> %tmp1, %tmp2 43 %tmp2 = load <2 x float>* %B 44 %tmp3 = fadd <2 x float> %tmp1, %tmp2 [all …]
|
D | vneg.ll | 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 [all …]
|
D | vcvt.ll | 7 %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32> 8 ret <2 x i32> %tmp2 15 %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32> 16 ret <2 x i32> %tmp2 23 %tmp2 = sitofp <2 x i32> %tmp1 to <2 x float> 24 ret <2 x float> %tmp2 31 %tmp2 = uitofp <2 x i32> %tmp1 to <2 x float> 32 ret <2 x float> %tmp2 39 %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32> 40 ret <4 x i32> %tmp2 [all …]
|
D | vrev.ll | 7 …%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3… 8 ret <8 x i8> %tmp2 15 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> 16 ret <4 x i16> %tmp2 23 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0> 24 ret <2 x i32> %tmp2 31 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0> 32 ret <2 x float> %tmp2 39 …%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i3… 40 ret <16 x i8> %tmp2 [all …]
|
D | vqshl.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <1 x i64>* %B 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 43 %tmp2 = load <8 x i8>* %B 44 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) [all …]
|
D | vdup.ll | 7 %tmp2 = insertelement <8 x i8> %tmp1, i8 %A, i32 1 8 %tmp3 = insertelement <8 x i8> %tmp2, i8 %A, i32 2 21 %tmp2 = insertelement <4 x i16> %tmp1, i16 %A, i32 1 22 %tmp3 = insertelement <4 x i16> %tmp2, i16 %A, i32 2 31 %tmp2 = insertelement <2 x i32> %tmp1, i32 %A, i32 1 32 ret <2 x i32> %tmp2 39 %tmp2 = insertelement <2 x float> %tmp1, float %A, i32 1 40 ret <2 x float> %tmp2 47 %tmp2 = insertelement <16 x i8> %tmp1, i8 %A, i32 1 48 %tmp3 = insertelement <16 x i8> %tmp2, i8 %A, i32 2 [all …]
|
D | vldlane.ll | 9 %tmp2 = load i8* %A, align 8 10 %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 3 19 %tmp2 = load i16* %A, align 8 20 %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 2 29 %tmp2 = load i32* %A, align 8 30 %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1 39 %tmp2 = load i32* %A, align 4 40 %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1 48 %tmp2 = load float* %A, align 4 49 %tmp3 = insertelement <2 x float> %tmp1, float %tmp2, i32 1 [all …]
|
D | vcnt.ll | 7 %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) 8 ret <8 x i8> %tmp2 15 %tmp2 = call <16 x i8> @llvm.arm.neon.vcnt.v16i8(<16 x i8> %tmp1) 16 ret <16 x i8> %tmp2 26 %tmp2 = call <8 x i8> @llvm.arm.neon.vclz.v8i8(<8 x i8> %tmp1) 27 ret <8 x i8> %tmp2 34 %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1) 35 ret <4 x i16> %tmp2 42 %tmp2 = call <2 x i32> @llvm.arm.neon.vclz.v2i32(<2 x i32> %tmp1) 43 ret <2 x i32> %tmp2 [all …]
|
D | vabs.ll | 7 %tmp2 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %tmp1) 8 ret <8 x i8> %tmp2 15 %tmp2 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %tmp1) 16 ret <4 x i16> %tmp2 23 %tmp2 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %tmp1) 24 ret <2 x i32> %tmp2 31 %tmp2 = call <2 x float> @llvm.arm.neon.vabs.v2f32(<2 x float> %tmp1) 32 ret <2 x float> %tmp2 39 %tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1) 40 ret <16 x i8> %tmp2 [all …]
|
D | vget_lane.ll | 9 %tmp2 = extractelement <8 x i8> %tmp1, i32 1 10 %tmp3 = sext i8 %tmp2 to i32 18 %tmp2 = extractelement <4 x i16> %tmp1, i32 1 19 %tmp3 = sext i16 %tmp2 to i32 27 %tmp2 = extractelement <8 x i8> %tmp1, i32 1 28 %tmp3 = zext i8 %tmp2 to i32 36 %tmp2 = extractelement <4 x i16> %tmp1, i32 1 37 %tmp3 = zext i16 %tmp2 to i32 46 %tmp2 = add <2 x i32> %tmp1, %tmp1 47 %tmp3 = extractelement <2 x i32> %tmp2, i32 1 [all …]
|
D | vtbl.ll | 11 %tmp2 = load <8 x i8>* %B 12 %tmp3 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %tmp1, <8 x i8> %tmp2) 20 %tmp2 = load %struct.__neon_int8x8x2_t* %B 21 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 22 %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 31 %tmp2 = load %struct.__neon_int8x8x3_t* %B 32 %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0 33 %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1 34 %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2 43 %tmp2 = load %struct.__neon_int8x8x4_t* %B [all …]
|
D | vtrn.ll | 8 %tmp2 = load <8 x i8>* %B 9 …%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 … 10 …%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 … 20 %tmp2 = load <4 x i16>* %B 21 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6> 22 %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7> 32 %tmp2 = load <2 x i32>* %B 33 %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2> 34 %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3> 44 %tmp2 = load <2 x float>* %B [all …]
|
D | vpadd.ll | 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 34 %tmp2 = load <2 x float>* %B 35 %tmp3 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) 49 %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1) 50 ret <4 x i16> %tmp2 [all …]
|
/external/openssl/crypto/bf/asm/ |
D | bf-586.pl | 16 $tmp2="ebx"; 35 &mov($tmp2,&wparam(0)); 41 &mov($L,&DWP(0,$tmp2,"",0)); 42 &mov($R,&DWP(4,$tmp2,"",0)); 50 &mov($tmp2,&DWP(0,$P,"",0)); 53 &xor($L,$tmp2); 58 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 62 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 69 &mov($tmp2,&DWP(($BF_ROUNDS+1)*4,$P,"",0)); 72 &xor($L,$tmp2); [all …]
|
/external/openssl/crypto/sha/asm/ |
D | sha512-sparcv9.pl | 110 $tmp2="%g5"; 176 sllx @pair[1],$tmp31,$tmp2 ! Xload($i) 181 or $tmp1,$tmp2,$tmp2 182 or @pair[1],$tmp2,$tmp2 184 add $h,$tmp2,$T1 185 $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`] 193 sllx @pair[1],$tmp31,$tmp2 ! Xload($i) 199 or $tmp1,$tmp2,$tmp2 201 or @pair[1],$tmp2,$tmp2 203 add $h,$tmp2,$T1 [all …]
|
/external/qemu/target-arm/ |
D | translate.c | 225 TCGv tmp2 = tcg_temp_new_i32(); in gen_smul_dual() local 227 tcg_gen_ext16s_i32(tmp2, b); in gen_smul_dual() 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2); in gen_smul_dual() 229 tcg_temp_free_i32(tmp2); in gen_smul_dual() 323 TCGv_i64 tmp2 = tcg_temp_new_i64(); in gen_mulu_i64_i32() local 327 tcg_gen_extu_i32_i64(tmp2, b); in gen_mulu_i64_i32() 329 tcg_gen_mul_i64(tmp1, tmp1, tmp2); in gen_mulu_i64_i32() 330 tcg_temp_free_i64(tmp2); in gen_mulu_i64_i32() 337 TCGv_i64 tmp2 = tcg_temp_new_i64(); in gen_muls_i64_i32() local 341 tcg_gen_ext_i32_i64(tmp2, b); in gen_muls_i64_i32() [all …]
|
/external/llvm/test/Transforms/ObjCARC/ |
D | retain-block.ll | 21 %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0 22 tail call void @use_pointer(i8* %tmp2) 23 tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0 31 ; CHECK: %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind 32 ; CHECK: tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0 36 %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind 37 tail call void @use_pointer(i8* %tmp2) 38 tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0 46 ; CHECK: %tmp2 = tail call i8* @objc_retainBlock(i8* %tmp) nounwind, !clang.arc.copy_on_escape !0 47 ; CHECK: tail call void @objc_release(i8* %tmp2) nounwind, !clang.imprecise_release !0 [all …]
|
/external/aac/libFDK/include/arm/ |
D | cplx_mul.h | 110 LONG tmp1,tmp2; in cplxMultDiv2() local 118 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2() 123 *c_Im = tmp2; in cplxMultDiv2() 135 LONG tmp1, tmp2; in cplxMultDiv2() local 142 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2() 147 *c_Im = tmp2; in cplxMultDiv2() 159 LONG tmp1, tmp2; in cplxMultAddDiv2() local 166 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultAddDiv2() 171 *c_Im += tmp2; in cplxMultAddDiv2() 184 LONG tmp1, tmp2; in cplxMultDiv2() local [all …]
|
/external/valgrind/main/none/tests/s390x/ |
D | mul.h | 6 unsigned long tmp2 = m1; \ 12 : "+d" (tmp1), "+d" (tmp2) \ 15 printf(#insn " %16.16lX * %16.16lX = %16.16lX%16.16lX\n", m1, m2, tmp1, tmp2); \ 21 unsigned long tmp2 = m1; \ 27 : "+d" (tmp1), "+d" (tmp2) \ 30 printf(#insn " %16.16lX * %16.16lX = %16.16lX%16.16lX\n", m1, m2, tmp1, tmp2); \ 36 unsigned long tmp2 = m1; \ 42 : "+d" (tmp1), "+d" (tmp2) \ 44 printf(#insn " %16.16lX * %16.16lX = %16.16lX%16.16lX\n", m1, (unsigned long) m2, tmp1, tmp2); \ 50 unsigned long tmp2 = m1; \ [all …]
|
/external/dropbear/libtommath/ |
D | bn_mp_toom_mul.c | 27 mp_int w0, w1, w2, w3, w4, tmp1, tmp2, a0, a1, a2, b0, b1, b2; in mp_toom_mul() local 33 &b2, &tmp1, &tmp2, NULL)) != MP_OKAY) { in mp_toom_mul() 96 if ((res = mp_mul_2(&b0, &tmp2)) != MP_OKAY) { in mp_toom_mul() 99 if ((res = mp_add(&tmp2, &b1, &tmp2)) != MP_OKAY) { in mp_toom_mul() 102 if ((res = mp_mul_2(&tmp2, &tmp2)) != MP_OKAY) { in mp_toom_mul() 105 if ((res = mp_add(&tmp2, &b2, &tmp2)) != MP_OKAY) { in mp_toom_mul() 109 if ((res = mp_mul(&tmp1, &tmp2, &w1)) != MP_OKAY) { in mp_toom_mul() 127 if ((res = mp_mul_2(&b2, &tmp2)) != MP_OKAY) { in mp_toom_mul() 130 if ((res = mp_add(&tmp2, &b1, &tmp2)) != MP_OKAY) { in mp_toom_mul() 133 if ((res = mp_mul_2(&tmp2, &tmp2)) != MP_OKAY) { in mp_toom_mul() [all …]
|
/external/openssl/crypto/ripemd/asm/ |
D | rmd-586.pl | 20 $tmp2="edx"; 86 &mov($tmp2, &Xv($pos)); 88 &add($a, $tmp2); 99 &mov($tmp2, &Xv($pos)); 106 &add($a, $tmp2); 108 &mov($tmp2, &Xv($pos2)) if $o == 1; 109 &mov($tmp2, &wparam(0)) if $o == 2; 126 &add($a, $tmp2); 127 &mov($tmp2, $c); 129 &and($tmp2, $b); [all …]
|