/external/libopus/celt/x86/ |
D | pitch_sse2.c | 48 __m128i inVec1_76543210, inVec1_FEDCBA98, acc1; in celt_inner_prod_sse2() local 54 acc1 = _mm_setzero_si128(); in celt_inner_prod_sse2() 68 acc1 = _mm_add_epi32(acc1, inVec1_76543210); in celt_inner_prod_sse2() 72 acc1 = _mm_add_epi32( acc1, acc2 ); in celt_inner_prod_sse2() 81 acc1 = _mm_add_epi32(acc1, inVec1_76543210); in celt_inner_prod_sse2() 85 acc1 = _mm_add_epi32(acc1, _mm_unpackhi_epi64( acc1, acc1)); in celt_inner_prod_sse2() 86 acc1 = _mm_add_epi32(acc1, _mm_shufflelo_epi16( acc1, 0x0E)); in celt_inner_prod_sse2() 87 sum += _mm_cvtsi128_si32(acc1); in celt_inner_prod_sse2()
|
D | pitch_sse4_1.c | 50 __m128i inVec1_76543210, inVec1_FEDCBA98, acc1; in celt_inner_prod_sse4_1() local 57 acc1 = _mm_setzero_si128(); in celt_inner_prod_sse4_1() 70 acc1 = _mm_add_epi32(acc1, inVec1_76543210); in celt_inner_prod_sse4_1() 74 acc1 = _mm_add_epi32(acc1, acc2); in celt_inner_prod_sse4_1() 83 acc1 = _mm_add_epi32(acc1, inVec1_76543210); in celt_inner_prod_sse4_1() 94 acc1 = _mm_add_epi32(acc1, inVec1_3210); in celt_inner_prod_sse4_1() 98 acc1 = _mm_add_epi32(acc1, _mm_unpackhi_epi64(acc1, acc1)); in celt_inner_prod_sse4_1() 99 acc1 = _mm_add_epi32(acc1, _mm_shufflelo_epi16(acc1, 0x0E)); in celt_inner_prod_sse4_1() 101 sum += _mm_cvtsi128_si32(acc1); in celt_inner_prod_sse4_1()
|
/external/libldac/src/ |
D | sigana_fixp_ldac.c | 196 INT64 acc1, acc2; in calc_mdct_pseudo_spectrum_ldac() local 201 acc1 = (INT64)y1 * (INT64)y1; in calc_mdct_pseudo_spectrum_ldac() 203 acc1 = acc1 + acc2; in calc_mdct_pseudo_spectrum_ldac() 204 low_energy = acc1 >> LDAC_Q_ADD_LOWENERGY; /* Q26 <- (Q15 * Q15) >> 4 */ in calc_mdct_pseudo_spectrum_ldac() 205 e = calc_exp_ldac((INT32)(acc1>>32), (UINT32)(acc1&0xffffffff)); in calc_mdct_pseudo_spectrum_ldac() 206 tmp = (INT32)((acc1 << e) >> 32); in calc_mdct_pseudo_spectrum_ldac() 214 acc1 = (INT64)y1 * (INT64)y1; in calc_mdct_pseudo_spectrum_ldac() 216 acc1 = acc1 + acc2; in calc_mdct_pseudo_spectrum_ldac() 217 low_energy += acc1 >> LDAC_Q_ADD_LOWENERGY; /* Q26 <- (Q15 * Q15) >> 4 */ in calc_mdct_pseudo_spectrum_ldac() 218 e = calc_exp_ldac((INT32)(acc1>>32), (UINT32)(acc1&0xffffffff)); in calc_mdct_pseudo_spectrum_ldac() [all …]
|
/external/boringssl/src/crypto/fipsmodule/bn/asm/ |
D | armv8-mont.pl | 279 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("x$_",(19..26)); 328 mov $acc1,xzr 374 adds $acc1,$acc1,$t0 // t[1]+lo(a[1]*a[0]) 388 stp $acc0,$acc1,[$tp],#8*2 // t[0..1] 418 adc $acc1,xzr,xzr // t[9] 429 adc $acc1,$acc1,$t1 440 adcs $acc1,$acc1,$t2 450 adcs $acc1,$acc1,$t2 459 adcs $acc1,$acc1,$t2 467 adcs $acc1,$acc1,$t1 [all …]
|
D | x86-mont.pl | 142 $acc1="mm1"; 178 &movd ($acc1,&DWP(4,$np)); # np[1] 187 &pmuludq($acc1,$mul1); # np[j]*m1 189 &paddq ($car1,$acc1); # +=c1 193 &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] 205 &pmuludq($acc1,$mul1); # np[num-1]*m1 207 &paddq ($car1,$acc1); # +=c1 241 &movd ($acc1,&DWP(4,$np)); # np[1] 252 &pmuludq($acc1,$mul1); # np[j]*m1 254 &paddq ($car1,$acc1); # +=c1 [all …]
|
/external/boringssl/src/crypto/fipsmodule/ec/asm/ |
D | p256-x86_64-asm.pl | 151 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15)); 200 mov %rdx, $acc1 203 add %rax, $acc1 235 add $t0, $acc1 237 add %rax, $acc1 256 add %rax, $acc1 276 mov $acc1, $t0 277 imulq %r15, $acc1 285 mov $acc1, %rax 291 mov $acc1, $t1 [all …]
|
/external/libopus/silk/fixed/x86/ |
D | vector_ops_FIX_sse4_1.c | 50 __m128i inVec1_76543210, acc1; in silk_inner_prod16_aligned_64_sse4_1() local 56 acc1 = _mm_setzero_si128(); in silk_inner_prod16_aligned_64_sse4_1() 71 acc1 = _mm_add_epi64( acc1, xmm_tempa ); in silk_inner_prod16_aligned_64_sse4_1() 75 acc1 = _mm_add_epi64( acc1, acc2 ); in silk_inner_prod16_aligned_64_sse4_1() 78 acc2 = _mm_shuffle_epi32( acc1, _MM_SHUFFLE( 0, 0, 3, 2 ) ); in silk_inner_prod16_aligned_64_sse4_1() 79 acc1 = _mm_add_epi64( acc1, acc2 ); in silk_inner_prod16_aligned_64_sse4_1() 81 _mm_storel_epi64( (__m128i *)&sum, acc1 ); in silk_inner_prod16_aligned_64_sse4_1()
|
/external/boringssl/src/crypto/fipsmodule/aes/asm/ |
D | aes-x86_64.pl | 61 $acc1="%edi"; $maskfe="%rdi"; 104 movzb `&lo("$s1")`,$acc1 107 mov 0($sbox,$acc1,8),$t1 111 movzb `&hi("$s2")`,$acc1 114 xor 3($sbox,$acc1,8),$t1 129 movzb `&lo("$s3")`,$acc1 132 xor 2($sbox,$acc1,8),$t1 136 movzb `&hi("$s0")`,$acc1 139 xor 1($sbox,$acc1,8),$t1 143 movzb `&hi("$s1")`,$acc1 [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | depthwiseconv_uint8_3x3_filter.h | 5035 int32x4_t acc1; 5040 acc1 = adjusted_bias_data; 5045 acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg); 5057 acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg); 5058 acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg); 5068 acc1 = vqrdmulhq_n_s32(acc1, output_multiplier); 5069 acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run( 5070 acc1, -output_shift); 5079 vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1)); 5136 acc1 = adjusted_bias_data; [all …]
|
D | depthwiseconv_uint8_transitional.h | 3060 int32x4_t acc1; 3065 acc1 = adjusted_bias_data; 3070 acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg); 3082 acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg); 3083 acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg); 3093 acc1 = vqrdmulhq_n_s32(acc1, output_multiplier); 3094 acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run( 3095 acc1, -output_shift); 3104 vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1)); 3161 acc1 = adjusted_bias_data; [all …]
|
D | depthwiseconv_uint8.h | 1869 int32x4_t acc1 = vld1q_s32(acc_buffer + i + 4); 1873 acc1 = vqrdmulhq_n_s32(acc1, output_multiplier); 1876 acc1 = RoundingDivideByPOT(acc1, -output_shift); 1882 acc1 = vmulq_n_s32(acc1, multiplier_power_of_two); 1883 acc1 = vqrdmulhq_n_s32(acc1, output_multiplier); 1887 acc1 = vaddq_s32(acc1, output_offset_vec); 1890 acc1 = vmaxq_s32(acc1, output_activation_min_vec); 1892 acc1 = vminq_s32(acc1, output_activation_max_vec); 1895 const int16x4_t acc1_s16 = vqmovn_s32(acc1);
|
/external/tensorflow/tensorflow/contrib/recurrent/python/kernel_tests/ |
D | recurrent_test.py | 167 acc1, final1 = recurrent.Recurrent( 173 assert isinstance(acc1, _ElmanState) 175 acc1, final1 = acc1.h, final1.h 176 loss1 = math_ops.reduce_sum(acc1) + math_ops.reduce_sum(final1) 181 (acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0, 183 [acc0, acc1, final0, final1, dw0, dw1, db0, db1, dh0, dh1, di0, di1]) 184 self.assertAllClose(acc0, acc1)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | fdot2.ll | 37 %acc1 = fadd half %mul2, %acc 38 %acc2 = fadd half %mul1, %acc1 78 %acc1 = fadd float %mul2, %acc 79 %acc2 = fadd float %mul1, %acc1 117 %acc1 = fadd float %mul2, %acc 118 %acc2 = fadd float %mul1, %acc1 154 %acc1 = fadd float %mul2, %acc 155 %acc2 = fadd float %mul1, %acc1 191 %acc1 = fadd float %mul2, %acc 192 %acc2 = fadd float %mul1, %acc1 [all …]
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | temporal_filter_msa.c | 31 v4i32 acc0, acc1, acc2, acc3; in temporal_filter_apply_8size_msa() local 46 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_8size_msa() 87 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 95 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_8size_msa() 133 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 157 v4i32 acc0, acc1, acc2, acc3; in temporal_filter_apply_16size_msa() local 172 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_16size_msa() 211 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 219 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_16size_msa() 258 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa()
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | temporal_filter_msa.c | 27 v4i32 acc0, acc1, acc2, acc3; in temporal_filter_apply_16size_msa() local 41 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_16size_msa() 74 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 79 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_16size_msa() 113 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 140 v4i32 acc0, acc1, acc2, acc3; in temporal_filter_apply_8size_msa() local 157 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_8size_msa() 195 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 201 LD_SW2(acc, 4, acc0, acc1); in temporal_filter_apply_8size_msa() 235 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa()
|
/external/boringssl/src/crypto/cipher_extra/asm/ |
D | chacha20_poly1305_x86_64.pl | 85 my ($acc0,$acc1,$acc2)=map("%r$_",(10..12)); 133 adc 8+$src, $acc1 144 mul $acc1 158 mul $acc1 171 mov $t1, $acc1 180 adc $t1, $acc1 183 adc $t3, $acc1 378 xor $acc1, $acc1 385 mov 5($adp), $acc1 386 shr \$24, $acc1 [all …]
|
/external/eigen/unsupported/Eigen/ |
D | MPRealSupport | 180 mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())), 193 acc1 = 0; 197 … mpfr_add(acc1.mpfr_ptr(), acc1.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd()); 200 … mpfr_mul(acc1.mpfr_ptr(), acc1.mpfr_srcptr(), alpha.mpfr_srcptr(), mpreal::get_default_rnd()); 201 …mpfr_add(res(i,j).mpfr_ptr(), res(i,j).mpfr_srcptr(), acc1.mpfr_srcptr(), mpreal::get_default_rnd…
|
/external/libxaac/decoder/ |
D | ixheaacd_aac_tns.c | 294 WORD32 acc1; in ixheaacd_tns_ar_filter_fixed_non_neon_armv7() local 300 acc1 = (WORD32)(acc >> 32); in ixheaacd_tns_ar_filter_fixed_non_neon_armv7() 302 y = ixheaacd_sub32_sat(y, ixheaacd_shl32_sat(acc1, 1)); in ixheaacd_tns_ar_filter_fixed_non_neon_armv7() 344 WORD32 acc1; in ixheaacd_tns_ar_filter_fixed_armv8() local 350 acc1 = (WORD32)(acc >> 32); in ixheaacd_tns_ar_filter_fixed_armv8() 352 y = ixheaacd_sub32(y, ixheaacd_shl32_sat(acc1, 1)); in ixheaacd_tns_ar_filter_fixed_armv8()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | frame-20.ll | 81 %acc1 = fsub double %l1, %acc0 82 %acc2 = fsub double %l2, %acc1 113 store volatile double %acc1, double *%ptr 210 %acc1 = fsub double %l1, %acc0 211 %acc2 = fsub double %l2, %acc1 241 store volatile double %acc1, double *%ptr 312 %acc1 = fsub double %l1, %acc0 313 %acc2 = fsub double %l2, %acc1 337 store volatile double %acc1, double *%ptr 397 %acc1 = fsub double %l1, %acc0 [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-20.ll | 81 %acc1 = fsub double %l1, %acc0 82 %acc2 = fsub double %l2, %acc1 113 store volatile double %acc1, double *%ptr 210 %acc1 = fsub double %l1, %acc0 211 %acc2 = fsub double %l2, %acc1 241 store volatile double %acc1, double *%ptr 312 %acc1 = fsub double %l1, %acc0 313 %acc2 = fsub double %l2, %acc1 337 store volatile double %acc1, double *%ptr 397 %acc1 = fsub double %l1, %acc0 [all …]
|
/external/sonivox/arm-wt-22k/lib_src/ |
D | eas_wtengine.c | 379 EAS_I32 acc1; in WT_VoiceFilter() local 406 acc1 = z1 * b1; in WT_VoiceFilter() 407 acc1 += z2 * b2; in WT_VoiceFilter() 408 acc0 = acc1 + k * acc0; in WT_VoiceFilter()
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | fmacs.ll | 57 define void @t4(float %acc1, float %a, float %b, float %acc2, float %c, float* %P1, float* %P2) { 74 %1 = fadd float %acc1, %0
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | crash_cmpop.ll | 12 %acc1.056 = phi float [ 0.000000e+00, %entry ], [ %add13, %for.body ] 19 store float %acc1.056, float* %arrayidx2, align 4
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | crash_cmpop.ll | 12 %acc1.056 = phi float [ 0.000000e+00, %entry ], [ %add13, %for.body ] 19 store float %acc1.056, float* %arrayidx2, align 4
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | fmacs.ll | 57 define void @t4(float %acc1, float %a, float %b, float %acc2, float %c, float* %P1, float* %P2) { 74 %1 = fadd float %acc1, %0
|