/external/webrtc/src/common_audio/signal_processing/ |
D | resample_by_2_internal.c | 34 WebRtc_Word32 tmp0, tmp1, diff; in WebRtcSpl_DownBy2IntToShort() local 46 tmp1 = state[0] + diff * kResampleAllpass[1][0]; in WebRtcSpl_DownBy2IntToShort() 48 diff = tmp1 - state[2]; in WebRtcSpl_DownBy2IntToShort() 54 state[1] = tmp1; in WebRtcSpl_DownBy2IntToShort() 76 tmp1 = state[4] + diff * kResampleAllpass[0][0]; in WebRtcSpl_DownBy2IntToShort() 78 diff = tmp1 - state[6]; in WebRtcSpl_DownBy2IntToShort() 84 state[5] = tmp1; in WebRtcSpl_DownBy2IntToShort() 104 tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15; in WebRtcSpl_DownBy2IntToShort() 110 if (tmp1 > (WebRtc_Word32)0x00007FFF) in WebRtcSpl_DownBy2IntToShort() 111 tmp1 = 0x00007FFF; in WebRtcSpl_DownBy2IntToShort() [all …]
|
/external/dropbear/libtommath/ |
D | bn_mp_toom_sqr.c | 22 mp_int w0, w1, w2, w3, w4, tmp1, a0, a1, a2; in mp_toom_sqr() local 26 if ((res = mp_init_multi(&w0, &w1, &w2, &w3, &w4, &a0, &a1, &a2, &tmp1, NULL)) != MP_OKAY) { in mp_toom_sqr() 60 if ((res = mp_mul_2(&a0, &tmp1)) != MP_OKAY) { in mp_toom_sqr() 63 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_sqr() 66 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_sqr() 69 if ((res = mp_add(&tmp1, &a2, &tmp1)) != MP_OKAY) { in mp_toom_sqr() 73 if ((res = mp_sqr(&tmp1, &w1)) != MP_OKAY) { in mp_toom_sqr() 78 if ((res = mp_mul_2(&a2, &tmp1)) != MP_OKAY) { in mp_toom_sqr() 81 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_sqr() 84 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_sqr() [all …]
|
D | bn_mp_toom_mul.c | 27 mp_int w0, w1, w2, w3, w4, tmp1, tmp2, a0, a1, a2, b0, b1, b2; in mp_toom_mul() local 33 &b2, &tmp1, &tmp2, NULL)) != MP_OKAY) { in mp_toom_mul() 83 if ((res = mp_mul_2(&a0, &tmp1)) != MP_OKAY) { in mp_toom_mul() 86 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_mul() 89 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_mul() 92 if ((res = mp_add(&tmp1, &a2, &tmp1)) != MP_OKAY) { in mp_toom_mul() 109 if ((res = mp_mul(&tmp1, &tmp2, &w1)) != MP_OKAY) { in mp_toom_mul() 114 if ((res = mp_mul_2(&a2, &tmp1)) != MP_OKAY) { in mp_toom_mul() 117 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_mul() 120 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_mul() [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vst4.ll | 7 %tmp1 = load <8 x i8>* %B 8 …l void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 17 %tmp1 = load <8 x i8>* %B 18 …l void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 29 %tmp1 = load <4 x i16>* %B 30 … @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16>… 39 %tmp1 = load <2 x i32>* %B 40 … @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32>… 48 %tmp1 = load <2 x float>* %B 49 …m.arm.neon.vst4.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x flo… [all …]
|
D | vstlane.ll | 7 %tmp1 = load <8 x i8>* %B 8 %tmp2 = extractelement <8 x i8> %tmp1, i32 3 18 %tmp1 = load <8 x i8>* %B 19 %tmp2 = extractelement <8 x i8> %tmp1, i32 3 30 %tmp1 = load <4 x i16>* %B 31 %tmp2 = extractelement <4 x i16> %tmp1, i32 2 40 %tmp1 = load <2 x i32>* %B 41 %tmp2 = extractelement <2 x i32> %tmp1, i32 1 49 %tmp1 = load <2 x float>* %B 50 %tmp2 = extractelement <2 x float> %tmp1, i32 1 [all …]
|
D | vshift.ll | 6 %tmp1 = load <8 x i8>* %A 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>* %A 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>* %A 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>* %A 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <8 x i8>* %A 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > [all …]
|
D | vbits.ll | 6 %tmp1 = load <8 x i8>* %A 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>* %A 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>* %A 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>* %A 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <16 x i8>* %A 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
D | vst3.ll | 8 %tmp1 = load <8 x i8>* %B 9 call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 32) 17 %tmp1 = load <4 x i16>* %B 18 …call void @llvm.arm.neon.vst3.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, … 26 %tmp1 = load <2 x i32>* %B 27 …call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, … 37 %tmp1 = load <2 x i32>* %B 38 …call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, … 48 %tmp1 = load <2 x float>* %B 49 …void @llvm.arm.neon.vst3.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1,… [all …]
|
D | uxtb.ll | 5 %tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1] 6 ret i32 %tmp1 10 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 11 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 16 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 17 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 22 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 23 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 28 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 29 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] [all …]
|
D | vneg.ll | 6 %tmp1 = load <8 x i8>* %A 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 14 %tmp1 = load <4 x i16>* %A 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 22 %tmp1 = load <2 x i32>* %A 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 30 %tmp1 = load <2 x float>* %A 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 38 %tmp1 = load <16 x i8>* %A 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 [all …]
|
D | vst2.ll | 7 %tmp1 = load <8 x i8>* %B 8 call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8) 17 %tmp1 = load <8 x i8>* %B 18 call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4) 29 %tmp1 = load <4 x i16>* %B 30 call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32) 38 %tmp1 = load <2 x i32>* %B 39 call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1) 47 %tmp1 = load <2 x float>* %B 48 call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1) [all …]
|
/external/chromium_org/third_party/openssl/openssl/crypto/sha/asm/ |
D | sha512-sparcv9.pl | 109 $tmp1="%g4"; 141 srlx @X[$j+1],$tmp32,$tmp1 143 or $tmp1,@X[$j],@X[$j] 178 sllx @pair[0],$tmp0,$tmp1 181 or $tmp1,$tmp2,$tmp2 196 sllx @pair[0],$tmp0,$tmp1 199 or $tmp1,$tmp2,$tmp2 224 $SLL $e,`$SZ*8-@Sigma1[2]`,$tmp1 227 xor $tmp1,$h,$h 228 $SLL $e,`$SZ*8-@Sigma1[1]`,$tmp1 [all …]
|
D | sha1-sparcv9.pl | 49 $tmp1="%i4"; 59 srl $a,27,$tmp1 62 add $tmp1,$e,$e 64 andn $d,$b,$tmp1 66 or $tmp1,$tmp0,$tmp1 75 add $tmp1,$e,$e 87 srl $a,27,$tmp1 93 srlx @X[($j+7)%8],32,$tmp1 96 or $tmp1,$Xi,$Xi 103 srl $a,27,$tmp1 !! [all …]
|
D | sha1-sparcv9a.pl | 50 $tmp1="%i4"; 154 srl $a,27,$tmp1 158 add $tmp1,$e,$e 159 andn $d,$b,$tmp1 163 or $tmp1,$tmp3,$tmp1 165 add $tmp1,$e,$e 173 srl $a,27,$tmp1 177 add $tmp1,$e,$e 179 andn $d,$b,$tmp1 183 or $tmp1,$tmp3,$tmp1 [all …]
|
/external/openssl/crypto/sha/asm/ |
D | sha512-sparcv9.pl | 109 $tmp1="%g4"; 141 srlx @X[$j+1],$tmp32,$tmp1 143 or $tmp1,@X[$j],@X[$j] 178 sllx @pair[0],$tmp0,$tmp1 181 or $tmp1,$tmp2,$tmp2 196 sllx @pair[0],$tmp0,$tmp1 199 or $tmp1,$tmp2,$tmp2 224 $SLL $e,`$SZ*8-@Sigma1[2]`,$tmp1 227 xor $tmp1,$h,$h 228 $SLL $e,`$SZ*8-@Sigma1[1]`,$tmp1 [all …]
|
D | sha1-sparcv9.pl | 49 $tmp1="%i4"; 59 srl $a,27,$tmp1 62 add $tmp1,$e,$e 64 andn $d,$b,$tmp1 66 or $tmp1,$tmp0,$tmp1 75 add $tmp1,$e,$e 87 srl $a,27,$tmp1 93 srlx @X[($j+7)%8],32,$tmp1 96 or $tmp1,$Xi,$Xi 103 srl $a,27,$tmp1 !! [all …]
|
D | sha1-sparcv9a.pl | 50 $tmp1="%i4"; 154 srl $a,27,$tmp1 158 add $tmp1,$e,$e 159 andn $d,$b,$tmp1 163 or $tmp1,$tmp3,$tmp1 165 add $tmp1,$e,$e 173 srl $a,27,$tmp1 177 add $tmp1,$e,$e 179 andn $d,$b,$tmp1 183 or $tmp1,$tmp3,$tmp1 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | neon-bitwise-instructions.ll | 6 %tmp1 = and <8 x i8> %a, %b; 7 ret <8 x i8> %tmp1 12 %tmp1 = and <16 x i8> %a, %b; 13 ret <16 x i8> %tmp1 19 %tmp1 = or <8 x i8> %a, %b; 20 ret <8 x i8> %tmp1 25 %tmp1 = or <16 x i8> %a, %b; 26 ret <16 x i8> %tmp1 32 %tmp1 = xor <8 x i8> %a, %b; 33 ret <8 x i8> %tmp1 [all …]
|
/external/chromium_org/third_party/openssl/openssl/crypto/md5/asm/ |
D | md5-586.pl | 20 $tmp1="edi"; 47 &mov($tmp1,$C) if $pos < 0; 53 &xor($tmp1,$d); # F function - part 2 55 &and($tmp1,$b); # F function - part 3 58 &xor($tmp1,$d); # F function - part 4 60 &add($a,$tmp1); 61 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 62 &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1 79 &xor($tmp1,$b); # G function - part 2 80 &and($tmp1,$d); # G function - part 3 [all …]
|
/external/openssl/crypto/md5/asm/ |
D | md5-586.pl | 20 $tmp1="edi"; 47 &mov($tmp1,$C) if $pos < 0; 53 &xor($tmp1,$d); # F function - part 2 55 &and($tmp1,$b); # F function - part 3 58 &xor($tmp1,$d); # F function - part 4 60 &add($a,$tmp1); 61 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 62 &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1 79 &xor($tmp1,$b); # G function - part 2 80 &and($tmp1,$d); # G function - part 3 [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | bswap-load-store.ll | 8 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] 9 %tmp1.upgrd.1 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1] 11 store i32 %tmp13, i32* %tmp1.upgrd.1 16 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] 17 %tmp1.upgrd.2 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1] 18 %tmp = load i32* %tmp1.upgrd.2 ; <i32> [#uses=1] 24 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] 25 %tmp1.upgrd.3 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1] 27 store i16 %tmp5, i16* %tmp1.upgrd.3 32 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] [all …]
|
/external/chromium_org/third_party/openssl/openssl/crypto/bf/asm/ |
D | bf-586.pl | 15 $tmp1="eax"; 44 &xor( $tmp1, $tmp1); 58 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 62 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 77 &BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 80 &BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 87 &mov(&DWP(4,$tmp1,"",0),$L); 89 &mov(&DWP(0,$tmp1,"",0),$R); 95 local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_; 105 &movb( &LB($tmp1), &HB($tmp2)); # A [all …]
|
/external/openssl/crypto/bf/asm/ |
D | bf-586.pl | 15 $tmp1="eax"; 44 &xor( $tmp1, $tmp1); 58 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 62 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 77 &BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 80 &BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 87 &mov(&DWP(4,$tmp1,"",0),$L); 89 &mov(&DWP(0,$tmp1,"",0),$R); 95 local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_; 105 &movb( &LB($tmp1), &HB($tmp2)); # A [all …]
|
/external/aac/libFDK/include/arm/ |
D | cplx_mul.h | 110 LONG tmp1,tmp2; in cplxMultDiv2() local 118 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2() 122 *c_Re = tmp1; in cplxMultDiv2() 135 LONG tmp1, tmp2; in cplxMultDiv2() local 142 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2() 146 *c_Re = tmp1; in cplxMultDiv2() 159 LONG tmp1, tmp2; in cplxMultAddDiv2() local 166 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultAddDiv2() 170 *c_Re += tmp1; in cplxMultAddDiv2() 184 LONG tmp1, tmp2; in cplxMultDiv2() local [all …]
|
/external/wpa_supplicant_8/src/crypto/ |
D | milenage.c | 39 u8 tmp1[16], tmp2[16], tmp3[16]; in milenage_f1() local 44 tmp1[i] = _rand[i] ^ opc[i]; in milenage_f1() 45 if (aes_128_encrypt_block(k, tmp1, tmp1)) in milenage_f1() 60 tmp3[i] ^= tmp1[i]; in milenage_f1() 64 if (aes_128_encrypt_block(k, tmp3, tmp1)) in milenage_f1() 67 tmp1[i] ^= opc[i]; in milenage_f1() 69 os_memcpy(mac_a, tmp1, 8); /* f1 */ in milenage_f1() 71 os_memcpy(mac_s, tmp1 + 8, 8); /* f1* */ in milenage_f1() 91 u8 tmp1[16], tmp2[16], tmp3[16]; in milenage_f2345() local 96 tmp1[i] = _rand[i] ^ opc[i]; in milenage_f2345() [all …]
|