Home
last modified time | relevance | path

Searched refs:tmp1 (Results 1 – 25 of 713) sorted by relevance

12345678910>>...29

/external/webrtc/src/common_audio/signal_processing/
Dresample_by_2_internal.c34 WebRtc_Word32 tmp0, tmp1, diff; in WebRtcSpl_DownBy2IntToShort() local
46 tmp1 = state[0] + diff * kResampleAllpass[1][0]; in WebRtcSpl_DownBy2IntToShort()
48 diff = tmp1 - state[2]; in WebRtcSpl_DownBy2IntToShort()
54 state[1] = tmp1; in WebRtcSpl_DownBy2IntToShort()
76 tmp1 = state[4] + diff * kResampleAllpass[0][0]; in WebRtcSpl_DownBy2IntToShort()
78 diff = tmp1 - state[6]; in WebRtcSpl_DownBy2IntToShort()
84 state[5] = tmp1; in WebRtcSpl_DownBy2IntToShort()
104 tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15; in WebRtcSpl_DownBy2IntToShort()
110 if (tmp1 > (WebRtc_Word32)0x00007FFF) in WebRtcSpl_DownBy2IntToShort()
111 tmp1 = 0x00007FFF; in WebRtcSpl_DownBy2IntToShort()
[all …]
/external/dropbear/libtommath/
Dbn_mp_toom_sqr.c22 mp_int w0, w1, w2, w3, w4, tmp1, a0, a1, a2; in mp_toom_sqr() local
26 if ((res = mp_init_multi(&w0, &w1, &w2, &w3, &w4, &a0, &a1, &a2, &tmp1, NULL)) != MP_OKAY) { in mp_toom_sqr()
60 if ((res = mp_mul_2(&a0, &tmp1)) != MP_OKAY) { in mp_toom_sqr()
63 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_sqr()
66 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_sqr()
69 if ((res = mp_add(&tmp1, &a2, &tmp1)) != MP_OKAY) { in mp_toom_sqr()
73 if ((res = mp_sqr(&tmp1, &w1)) != MP_OKAY) { in mp_toom_sqr()
78 if ((res = mp_mul_2(&a2, &tmp1)) != MP_OKAY) { in mp_toom_sqr()
81 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_sqr()
84 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_sqr()
[all …]
Dbn_mp_toom_mul.c27 mp_int w0, w1, w2, w3, w4, tmp1, tmp2, a0, a1, a2, b0, b1, b2; in mp_toom_mul() local
33 &b2, &tmp1, &tmp2, NULL)) != MP_OKAY) { in mp_toom_mul()
83 if ((res = mp_mul_2(&a0, &tmp1)) != MP_OKAY) { in mp_toom_mul()
86 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_mul()
89 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_mul()
92 if ((res = mp_add(&tmp1, &a2, &tmp1)) != MP_OKAY) { in mp_toom_mul()
109 if ((res = mp_mul(&tmp1, &tmp2, &w1)) != MP_OKAY) { in mp_toom_mul()
114 if ((res = mp_mul_2(&a2, &tmp1)) != MP_OKAY) { in mp_toom_mul()
117 if ((res = mp_add(&tmp1, &a1, &tmp1)) != MP_OKAY) { in mp_toom_mul()
120 if ((res = mp_mul_2(&tmp1, &tmp1)) != MP_OKAY) { in mp_toom_mul()
[all …]
/external/llvm/test/CodeGen/ARM/
Dvst4.ll7 %tmp1 = load <8 x i8>* %B
8 …l void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %
17 %tmp1 = load <8 x i8>* %B
18 …l void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %
29 %tmp1 = load <4 x i16>* %B
30 … @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16>…
39 %tmp1 = load <2 x i32>* %B
40 … @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32>…
48 %tmp1 = load <2 x float>* %B
49 …m.arm.neon.vst4.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x flo…
[all …]
Dvstlane.ll7 %tmp1 = load <8 x i8>* %B
8 %tmp2 = extractelement <8 x i8> %tmp1, i32 3
18 %tmp1 = load <8 x i8>* %B
19 %tmp2 = extractelement <8 x i8> %tmp1, i32 3
30 %tmp1 = load <4 x i16>* %B
31 %tmp2 = extractelement <4 x i16> %tmp1, i32 2
40 %tmp1 = load <2 x i32>* %B
41 %tmp2 = extractelement <2 x i32> %tmp1, i32 1
49 %tmp1 = load <2 x float>* %B
50 %tmp2 = extractelement <2 x float> %tmp1, i32 1
[all …]
Dvbits.ll6 %tmp1 = load <8 x i8>* %A
8 %tmp3 = and <8 x i8> %tmp1, %tmp2
15 %tmp1 = load <4 x i16>* %A
17 %tmp3 = and <4 x i16> %tmp1, %tmp2
24 %tmp1 = load <2 x i32>* %A
26 %tmp3 = and <2 x i32> %tmp1, %tmp2
33 %tmp1 = load <1 x i64>* %A
35 %tmp3 = and <1 x i64> %tmp1, %tmp2
42 %tmp1 = load <16 x i8>* %A
44 %tmp3 = and <16 x i8> %tmp1, %tmp2
[all …]
Dvshift.ll6 %tmp1 = load <8 x i8>* %A
8 %tmp3 = shl <8 x i8> %tmp1, %tmp2
15 %tmp1 = load <4 x i16>* %A
17 %tmp3 = shl <4 x i16> %tmp1, %tmp2
24 %tmp1 = load <2 x i32>* %A
26 %tmp3 = shl <2 x i32> %tmp1, %tmp2
33 %tmp1 = load <1 x i64>* %A
35 %tmp3 = shl <1 x i64> %tmp1, %tmp2
42 %tmp1 = load <8 x i8>* %A
43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
[all …]
Dvst3.ll8 %tmp1 = load <8 x i8>* %B
9 call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 32)
17 %tmp1 = load <4 x i16>* %B
18 …call void @llvm.arm.neon.vst3.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, …
26 %tmp1 = load <2 x i32>* %B
27 …call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, …
37 %tmp1 = load <2 x i32>* %B
38 …call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, …
48 %tmp1 = load <2 x float>* %B
49 …void @llvm.arm.neon.vst3.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1,…
[all …]
Dvshl.ll6 %tmp1 = load <8 x i8>* %A
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
15 %tmp1 = load <4 x i16>* %A
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
24 %tmp1 = load <2 x i32>* %A
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
33 %tmp1 = load <1 x i64>* %A
35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
42 %tmp1 = load <8 x i8>* %A
44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
[all …]
Duxtb.ll5 %tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1]
6 ret i32 %tmp1
10 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
11 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
16 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
17 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
22 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
23 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
28 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
29 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
[all …]
Dvsub.ll6 %tmp1 = load <8 x i8>* %A
8 %tmp3 = sub <8 x i8> %tmp1, %tmp2
15 %tmp1 = load <4 x i16>* %A
17 %tmp3 = sub <4 x i16> %tmp1, %tmp2
24 %tmp1 = load <2 x i32>* %A
26 %tmp3 = sub <2 x i32> %tmp1, %tmp2
33 %tmp1 = load <1 x i64>* %A
35 %tmp3 = sub <1 x i64> %tmp1, %tmp2
42 %tmp1 = load <2 x float>* %A
44 %tmp3 = fsub <2 x float> %tmp1, %tmp2
[all …]
Dvadd.ll6 %tmp1 = load <8 x i8>* %A
8 %tmp3 = add <8 x i8> %tmp1, %tmp2
15 %tmp1 = load <4 x i16>* %A
17 %tmp3 = add <4 x i16> %tmp1, %tmp2
24 %tmp1 = load <2 x i32>* %A
26 %tmp3 = add <2 x i32> %tmp1, %tmp2
33 %tmp1 = load <1 x i64>* %A
35 %tmp3 = add <1 x i64> %tmp1, %tmp2
42 %tmp1 = load <2 x float>* %A
44 %tmp3 = fadd <2 x float> %tmp1, %tmp2
[all …]
Dvneg.ll6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1
14 %tmp1 = load <4 x i16>* %A
15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1
30 %tmp1 = load <2 x float>* %A
31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1
38 %tmp1 = load <16 x i8>* %A
39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1
[all …]
Dvst2.ll7 %tmp1 = load <8 x i8>* %B
8 call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
17 %tmp1 = load <8 x i8>* %B
18 call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4)
29 %tmp1 = load <4 x i16>* %B
30 call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32)
38 %tmp1 = load <2 x i32>* %B
39 call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
47 %tmp1 = load <2 x float>* %B
48 call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
[all …]
Dvcvt.ll6 %tmp1 = load <2 x float>* %A
7 %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
14 %tmp1 = load <2 x float>* %A
15 %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = sitofp <2 x i32> %tmp1 to <2 x float>
30 %tmp1 = load <2 x i32>* %A
31 %tmp2 = uitofp <2 x i32> %tmp1 to <2 x float>
38 %tmp1 = load <4 x float>* %A
39 %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
[all …]
/external/openssl/crypto/sha/asm/
Dsha512-sparcv9.pl109 $tmp1="%g4";
141 srlx @X[$j+1],$tmp32,$tmp1
143 or $tmp1,@X[$j],@X[$j]
178 sllx @pair[0],$tmp0,$tmp1
181 or $tmp1,$tmp2,$tmp2
196 sllx @pair[0],$tmp0,$tmp1
199 or $tmp1,$tmp2,$tmp2
224 $SLL $e,`$SZ*8-@Sigma1[2]`,$tmp1
227 xor $tmp1,$h,$h
228 $SLL $e,`$SZ*8-@Sigma1[1]`,$tmp1
[all …]
Dsha1-sparcv9.pl49 $tmp1="%i4";
59 srl $a,27,$tmp1
62 add $tmp1,$e,$e
64 andn $d,$b,$tmp1
66 or $tmp1,$tmp0,$tmp1
75 add $tmp1,$e,$e
87 srl $a,27,$tmp1
93 srlx @X[($j+7)%8],32,$tmp1
96 or $tmp1,$Xi,$Xi
103 srl $a,27,$tmp1 !!
[all …]
Dsha1-sparcv9a.pl50 $tmp1="%i4";
154 srl $a,27,$tmp1
158 add $tmp1,$e,$e
159 andn $d,$b,$tmp1
163 or $tmp1,$tmp3,$tmp1
165 add $tmp1,$e,$e
173 srl $a,27,$tmp1
177 add $tmp1,$e,$e
179 andn $d,$b,$tmp1
183 or $tmp1,$tmp3,$tmp1
[all …]
Dsha1-586.pl128 $tmp1="ebp";
143 if ($n==0) { &mov($tmp1,$a); }
144 else { &mov($a,$tmp1); }
145 &rotl($tmp1,5); # tmp1=ROTATE(a,5)
147 &add($tmp1,$e); # tmp1+=e;
154 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
157 &add($f,$tmp1); } # f+=tmp1
158 else { &add($tmp1,$f); } # f becomes a in next round
159 &mov($tmp1,$a) if ($alt && $n==15);
171 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d
[all …]
/external/openssl/crypto/md5/asm/
Dmd5-586.pl20 $tmp1="edi";
47 &mov($tmp1,$C) if $pos < 0;
53 &xor($tmp1,$d); # F function - part 2
55 &and($tmp1,$b); # F function - part 3
58 &xor($tmp1,$d); # F function - part 4
60 &add($a,$tmp1);
61 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0
62 &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1
79 &xor($tmp1,$b); # G function - part 2
80 &and($tmp1,$d); # G function - part 3
[all …]
/external/openssl/crypto/bf/asm/
Dbf-586.pl15 $tmp1="eax";
44 &xor( $tmp1, $tmp1);
58 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1);
62 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1);
77 &BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0);
80 &BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0);
87 &mov(&DWP(4,$tmp1,"",0),$L);
89 &mov(&DWP(0,$tmp1,"",0),$R);
95 local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_;
105 &movb( &LB($tmp1), &HB($tmp2)); # A
[all …]
/external/aac/libFDK/include/arm/
Dcplx_mul.h110 LONG tmp1,tmp2; in cplxMultDiv2() local
118 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2()
122 *c_Re = tmp1; in cplxMultDiv2()
135 LONG tmp1, tmp2; in cplxMultDiv2() local
142 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2()
146 *c_Re = tmp1; in cplxMultDiv2()
159 LONG tmp1, tmp2; in cplxMultAddDiv2() local
166 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultAddDiv2()
170 *c_Re += tmp1; in cplxMultAddDiv2()
184 LONG tmp1, tmp2; in cplxMultDiv2() local
[all …]
/external/wpa_supplicant_8/src/crypto/
Dmilenage.c39 u8 tmp1[16], tmp2[16], tmp3[16]; in milenage_f1() local
44 tmp1[i] = _rand[i] ^ opc[i]; in milenage_f1()
45 if (aes_128_encrypt_block(k, tmp1, tmp1)) in milenage_f1()
60 tmp3[i] ^= tmp1[i]; in milenage_f1()
64 if (aes_128_encrypt_block(k, tmp3, tmp1)) in milenage_f1()
67 tmp1[i] ^= opc[i]; in milenage_f1()
69 os_memcpy(mac_a, tmp1, 8); /* f1 */ in milenage_f1()
71 os_memcpy(mac_s, tmp1 + 8, 8); /* f1* */ in milenage_f1()
91 u8 tmp1[16], tmp2[16], tmp3[16]; in milenage_f2345() local
96 tmp1[i] = _rand[i] ^ opc[i]; in milenage_f2345()
[all …]
/external/valgrind/main/none/tests/s390x/
Dmul.h5 unsigned long tmp1 = m1; \
12 : "+d" (tmp1), "+d" (tmp2) \
15 printf(#insn " %16.16lX * %16.16lX = %16.16lX%16.16lX\n", m1, m2, tmp1, tmp2); \
20 unsigned long tmp1 = m1; \
27 : "+d" (tmp1), "+d" (tmp2) \
30 printf(#insn " %16.16lX * %16.16lX = %16.16lX%16.16lX\n", m1, m2, tmp1, tmp2); \
35 unsigned long tmp1 = m1; \
42 : "+d" (tmp1), "+d" (tmp2) \
44 printf(#insn " %16.16lX * %16.16lX = %16.16lX%16.16lX\n", m1, (unsigned long) m2, tmp1, tmp2); \
49 unsigned long tmp1 = m1; \
[all …]
/external/openssl/crypto/des/asm/
Dcrypt586.pl79 local($r,$L,$R,$S,$trans,$u,$tmp1,$tmp2,$t)=@_;
90 &mov( $tmp1, $u);
91 &shl( $tmp1, 16); # 1
94 &xor( $u, $tmp1); # 2
96 &mov( $tmp1, &DWP(&n2a($S*4),$trans,"",0)); # 2
97 &xor( $u, $tmp1);
104 &xor( $tmp1, $tmp1); # 1
107 &movb( &LB($tmp1), &LB($u) );
111 &xor( $L, &DWP(" ",$trans,$tmp1,0));
112 &movb( &LB($tmp1), &LB($t) );
[all …]

12345678910>>...29