/external/chromium_org/third_party/webrtc/common_audio/signal_processing/ |
D | resample_by_2_internal.c | 34 int32_t tmp0, tmp1, diff; in WebRtcSpl_DownBy2IntToShort() local 46 tmp1 = state[0] + diff * kResampleAllpass[1][0]; in WebRtcSpl_DownBy2IntToShort() 48 diff = tmp1 - state[2]; in WebRtcSpl_DownBy2IntToShort() 54 state[1] = tmp1; in WebRtcSpl_DownBy2IntToShort() 76 tmp1 = state[4] + diff * kResampleAllpass[0][0]; in WebRtcSpl_DownBy2IntToShort() 78 diff = tmp1 - state[6]; in WebRtcSpl_DownBy2IntToShort() 84 state[5] = tmp1; in WebRtcSpl_DownBy2IntToShort() 104 tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15; in WebRtcSpl_DownBy2IntToShort() 110 if (tmp1 > (int32_t)0x00007FFF) in WebRtcSpl_DownBy2IntToShort() 111 tmp1 = 0x00007FFF; in WebRtcSpl_DownBy2IntToShort() [all …]
|
/external/webrtc/src/common_audio/signal_processing/ |
D | resample_by_2_internal.c | 34 WebRtc_Word32 tmp0, tmp1, diff; in WebRtcSpl_DownBy2IntToShort() local 46 tmp1 = state[0] + diff * kResampleAllpass[1][0]; in WebRtcSpl_DownBy2IntToShort() 48 diff = tmp1 - state[2]; in WebRtcSpl_DownBy2IntToShort() 54 state[1] = tmp1; in WebRtcSpl_DownBy2IntToShort() 76 tmp1 = state[4] + diff * kResampleAllpass[0][0]; in WebRtcSpl_DownBy2IntToShort() 78 diff = tmp1 - state[6]; in WebRtcSpl_DownBy2IntToShort() 84 state[5] = tmp1; in WebRtcSpl_DownBy2IntToShort() 104 tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15; in WebRtcSpl_DownBy2IntToShort() 110 if (tmp1 > (WebRtc_Word32)0x00007FFF) in WebRtcSpl_DownBy2IntToShort() 111 tmp1 = 0x00007FFF; in WebRtcSpl_DownBy2IntToShort() [all …]
|
/external/libunwind/src/dwarf/ |
D | Gexpr.c | 193 unw_word_t operand1 = 0, operand2 = 0, tmp1, tmp2, tmp3, end_addr; in dwarf_eval_expr() local 296 &tmp1)) < 0) in dwarf_eval_expr() 298 push (tmp1 + operand1); in dwarf_eval_expr() 305 dwarf_to_unw_regnum (operand1), &tmp1)) < 0) in dwarf_eval_expr() 307 push (tmp1 + operand2); in dwarf_eval_expr() 367 tmp1 = pop (); in dwarf_eval_expr() 368 if ((ret = dwarf_readw (as, a, &tmp1, &tmp2, arg)) < 0) in dwarf_eval_expr() 375 tmp1 = pop (); in dwarf_eval_expr() 384 if ((ret = dwarf_readu8 (as, a, &tmp1, &u8, arg)) < 0) in dwarf_eval_expr() 390 if ((ret = dwarf_readu16 (as, a, &tmp1, &u16, arg)) < 0) in dwarf_eval_expr() [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vst4.ll | 7 %tmp1 = load <8 x i8>* %B 8 …l void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 17 %tmp1 = load <8 x i8>* %B 18 …l void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 29 %tmp1 = load <4 x i16>* %B 30 … @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16>… 39 %tmp1 = load <2 x i32>* %B 40 … @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32>… 48 %tmp1 = load <2 x float>* %B 49 …m.arm.neon.vst4.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x flo… [all …]
|
D | vstlane.ll | 7 %tmp1 = load <8 x i8>* %B 8 %tmp2 = extractelement <8 x i8> %tmp1, i32 3 18 %tmp1 = load <8 x i8>* %B 19 %tmp2 = extractelement <8 x i8> %tmp1, i32 3 30 %tmp1 = load <4 x i16>* %B 31 %tmp2 = extractelement <4 x i16> %tmp1, i32 2 40 %tmp1 = load <2 x i32>* %B 41 %tmp2 = extractelement <2 x i32> %tmp1, i32 1 49 %tmp1 = load <2 x float>* %B 50 %tmp2 = extractelement <2 x float> %tmp1, i32 1 [all …]
|
D | vshift.ll | 6 %tmp1 = load <8 x i8>* %A 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>* %A 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>* %A 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>* %A 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <8 x i8>* %A 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > [all …]
|
D | vbits.ll | 6 %tmp1 = load <8 x i8>* %A 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>* %A 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>* %A 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>* %A 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <16 x i8>* %A 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
D | vst3.ll | 8 %tmp1 = load <8 x i8>* %B 9 call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 32) 17 %tmp1 = load <4 x i16>* %B 18 …call void @llvm.arm.neon.vst3.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, … 26 %tmp1 = load <2 x i32>* %B 27 …call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, … 37 %tmp1 = load <2 x i32>* %B 38 …call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, … 48 %tmp1 = load <2 x float>* %B 49 …void @llvm.arm.neon.vst3.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1,… [all …]
|
D | uxtb.ll | 5 %tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1] 6 ret i32 %tmp1 10 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 11 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 16 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 17 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 22 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 23 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] 28 %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1] 29 %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1] [all …]
|
D | vneg.ll | 6 %tmp1 = load <8 x i8>* %A 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 14 %tmp1 = load <4 x i16>* %A 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 22 %tmp1 = load <2 x i32>* %A 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 30 %tmp1 = load <2 x float>* %A 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 38 %tmp1 = load <16 x i8>* %A 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 [all …]
|
D | vst2.ll | 7 %tmp1 = load <8 x i8>* %B 8 call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8) 17 %tmp1 = load <8 x i8>* %B 18 call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4) 29 %tmp1 = load <4 x i16>* %B 30 call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32) 38 %tmp1 = load <2 x i32>* %B 39 call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1) 47 %tmp1 = load <2 x float>* %B 48 call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1) [all …]
|
/external/openssl/crypto/sha/asm/ |
D | sha512-sparcv9.pl | 109 $tmp1="%g4"; 141 srlx @X[$j+1],$tmp32,$tmp1 143 or $tmp1,@X[$j],@X[$j] 178 sllx @pair[0],$tmp0,$tmp1 181 or $tmp1,$tmp2,$tmp2 196 sllx @pair[0],$tmp0,$tmp1 199 or $tmp1,$tmp2,$tmp2 224 $SLL $e,`$SZ*8-@Sigma1[2]`,$tmp1 227 xor $tmp1,$h,$h 228 $SLL $e,`$SZ*8-@Sigma1[1]`,$tmp1 [all …]
|
D | sha1-sparcv9.pl | 49 $tmp1="%i4"; 59 srl $a,27,$tmp1 62 add $tmp1,$e,$e 64 andn $d,$b,$tmp1 66 or $tmp1,$tmp0,$tmp1 75 add $tmp1,$e,$e 87 srl $a,27,$tmp1 93 srlx @X[($j+7)%8],32,$tmp1 96 or $tmp1,$Xi,$Xi 103 srl $a,27,$tmp1 !! [all …]
|
D | sha1-sparcv9a.pl | 50 $tmp1="%i4"; 154 srl $a,27,$tmp1 158 add $tmp1,$e,$e 159 andn $d,$b,$tmp1 163 or $tmp1,$tmp3,$tmp1 165 add $tmp1,$e,$e 173 srl $a,27,$tmp1 177 add $tmp1,$e,$e 179 andn $d,$b,$tmp1 183 or $tmp1,$tmp3,$tmp1 [all …]
|
D | sha1-586.pl | 128 $tmp1="ebp"; 143 if ($n==0) { &mov($tmp1,$a); } 144 else { &mov($a,$tmp1); } 145 &rotl($tmp1,5); # tmp1=ROTATE(a,5) 147 &add($tmp1,$e); # tmp1+=e; 154 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi 157 &add($f,$tmp1); } # f+=tmp1 158 else { &add($tmp1,$f); } # f becomes a in next round 159 &mov($tmp1,$a) if ($alt && $n==15); 171 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | neon-bitwise-instructions.ll | 6 %tmp1 = and <8 x i8> %a, %b; 7 ret <8 x i8> %tmp1 13 %tmp1 = and <16 x i8> %a, %b; 14 ret <16 x i8> %tmp1 21 %tmp1 = or <8 x i8> %a, %b; 22 ret <8 x i8> %tmp1 28 %tmp1 = or <16 x i8> %a, %b; 29 ret <16 x i8> %tmp1 36 %tmp1 = xor <8 x i8> %a, %b; 37 ret <8 x i8> %tmp1 [all …]
|
D | neon-scalar-by-elem-fma.ll | 9 %tmp1 = extractelement <4 x float> %v, i32 3 10 %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) 17 %tmp1 = extractelement <4 x float> %v, i32 3 18 %tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a) 25 %tmp1 = extractelement <2 x float> %v, i32 1 26 %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) 33 %tmp1 = extractelement <1 x double> %v, i32 0 34 %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) 41 %tmp1 = extractelement <2 x double> %v, i32 1 42 %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) [all …]
|
D | arm64-neon-scalar-by-elem-mul.ll | 6 %tmp1 = extractelement <2 x float> %v, i32 1 7 %tmp2 = fmul float %a, %tmp1; 14 %tmp1 = extractelement <2 x float> %v, i32 1 15 %tmp2 = fmul float %tmp1, %a; 23 %tmp1 = extractelement <4 x float> %v, i32 3 24 %tmp2 = fmul float %a, %tmp1; 31 %tmp1 = extractelement <4 x float> %v, i32 3 32 %tmp2 = fmul float %tmp1, %a; 40 %tmp1 = extractelement <1 x double> %v, i32 0 41 %tmp2 = fmul double %a, %tmp1; [all …]
|
/external/chromium_org/third_party/openmax_dl/dl/sp/src/mips/ |
D | mips_FFTFwd_RToCCS_F32_real.c | 23 OMX_F32 tmp1, tmp2, tmp3, tmp4; in mips_FFTFwd_RToCCS_F32_real() local 32 tmp1 = pSrc[p_bitrev[0]] + pSrc[p_bitrev[1]]; in mips_FFTFwd_RToCCS_F32_real() 37 p_dst[0].Re = tmp1 + tmp2; in mips_FFTFwd_RToCCS_F32_real() 38 p_dst[2].Re = tmp1 - tmp2; in mips_FFTFwd_RToCCS_F32_real() 57 tmp1 = pSrc[p_bitrev[0]] + pSrc[p_bitrev[1]]; in mips_FFTFwd_RToCCS_F32_real() 62 p_tmp[0].Re = tmp1 + tmp2; in mips_FFTFwd_RToCCS_F32_real() 63 p_tmp[2].Re = tmp1 - tmp2; in mips_FFTFwd_RToCCS_F32_real() 86 tmp1 = pSrc[p_bitrev[4]] + pSrc[p_bitrev[5]]; in mips_FFTFwd_RToCCS_F32_real() 88 tmp3 = tmp1 + tmp2; in mips_FFTFwd_RToCCS_F32_real() 89 tmp4 = tmp1 - tmp2; in mips_FFTFwd_RToCCS_F32_real() [all …]
|
/external/openssl/crypto/md5/asm/ |
D | md5-586.pl | 20 $tmp1="edi"; 47 &mov($tmp1,$C) if $pos < 0; 53 &xor($tmp1,$d); # F function - part 2 55 &and($tmp1,$b); # F function - part 3 58 &xor($tmp1,$d); # F function - part 4 60 &add($a,$tmp1); 61 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 62 &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1 79 &xor($tmp1,$b); # G function - part 2 80 &and($tmp1,$d); # G function - part 3 [all …]
|
/external/chromium_org/third_party/boringssl/src/crypto/md5/asm/ |
D | md5-586.pl | 20 $tmp1="edi"; 47 &mov($tmp1,$C) if $pos < 0; 53 &xor($tmp1,$d); # F function - part 2 55 &and($tmp1,$b); # F function - part 3 58 &xor($tmp1,$d); # F function - part 4 60 &add($a,$tmp1); 61 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 62 &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1 79 &xor($tmp1,$b); # G function - part 2 80 &and($tmp1,$d); # G function - part 3 [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | bswap-load-store.ll | 8 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] 9 %tmp1.upgrd.1 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1] 11 store i32 %tmp13, i32* %tmp1.upgrd.1 16 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] 17 %tmp1.upgrd.2 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1] 18 %tmp = load i32* %tmp1.upgrd.2 ; <i32> [#uses=1] 24 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] 25 %tmp1.upgrd.3 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1] 27 store i16 %tmp5, i16* %tmp1.upgrd.3 32 %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1] [all …]
|
/external/openssl/crypto/bf/asm/ |
D | bf-586.pl | 15 $tmp1="eax"; 44 &xor( $tmp1, $tmp1); 58 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 62 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 77 &BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 80 &BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 87 &mov(&DWP(4,$tmp1,"",0),$L); 89 &mov(&DWP(0,$tmp1,"",0),$R); 95 local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_; 105 &movb( &LB($tmp1), &HB($tmp2)); # A [all …]
|
/external/pdfium/core/src/fxcodec/jbig2/ |
D | JBig2_Image.cpp | 780 FX_DWORD s1, d1, d2, shift, shift1, shift2, tmp, tmp1, tmp2, maskL, maskR, maskM; in composeTo_opt2() local 839 tmp1 = JBIG2_GETDWORD(lineSrc) << shift; in composeTo_opt2() 843 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in composeTo_opt2() 846 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in composeTo_opt2() 849 tmp = (tmp2 & ~maskM) | ((tmp1 ^ tmp2) & maskM); in composeTo_opt2() 852 tmp = (tmp2 & ~maskM) | ((~(tmp1 ^ tmp2)) & maskM); in composeTo_opt2() 855 tmp = (tmp2 & ~maskM) | (tmp1 & maskM); in composeTo_opt2() 868 tmp1 = JBIG2_GETDWORD(lineSrc) >> shift; in composeTo_opt2() 872 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in composeTo_opt2() 875 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in composeTo_opt2() [all …]
|
/external/aac/libFDK/include/arm/ |
D | cplx_mul.h | 110 LONG tmp1,tmp2; in cplxMultDiv2() local 118 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2() 122 *c_Re = tmp1; in cplxMultDiv2() 135 LONG tmp1, tmp2; in cplxMultDiv2() local 142 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultDiv2() 146 *c_Re = tmp1; in cplxMultDiv2() 159 LONG tmp1, tmp2; in cplxMultAddDiv2() local 166 : "=&r"(tmp1), "=&r"(tmp2) in cplxMultAddDiv2() 170 *c_Re += tmp1; in cplxMultAddDiv2() 184 LONG tmp1, tmp2; in cplxMultDiv2() local [all …]
|