Home
last modified time | relevance | path

Searched refs:acc1 (Results 1 – 25 of 92) sorted by relevance

1234

/third_party/cmsis/CMSIS/DSP/Source/FilteringFunctions/
Darm_biquad_cascade_df2T_f64.c148 float64_t acc1; /* Accumulator */ in arm_biquad_cascade_df2T_f64() local
184 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f64()
187 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64()
190 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64()
192 *pOut++ = acc1; in arm_biquad_cascade_df2T_f64()
198 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f64()
201 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64()
204 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64()
206 *pOut++ = acc1; in arm_biquad_cascade_df2T_f64()
211 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f64()
[all …]
Darm_biquad_cascade_df2T_f16.c59 float16_t acc0, acc1; in arm_biquad_cascade_df2T_f16() local
134 acc1 = vgetq_lane(state, 1); in arm_biquad_cascade_df2T_f16()
135 state = vfmaq(state, a1Coeffs, acc1); in arm_biquad_cascade_df2T_f16()
146 *pOut++ = acc1; in arm_biquad_cascade_df2T_f16()
200 _Float16 acc1; /* Accumulator */ in arm_biquad_cascade_df2T_f16() local
235 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f16()
238 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16()
241 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16()
243 *pOut++ = acc1; in arm_biquad_cascade_df2T_f16()
248 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f16()
[all …]
Darm_biquad_cascade_df2T_f32.c59 float32_t acc0, acc1; in arm_biquad_cascade_df2T_f32() local
129 acc1 = vgetq_lane(state, 1); in arm_biquad_cascade_df2T_f32()
130 state = vfmaq(state, a1Coeffs, acc1); in arm_biquad_cascade_df2T_f32()
141 *pOut++ = acc1; in arm_biquad_cascade_df2T_f32()
195 float32_t acc1; /* accumulator */ in arm_biquad_cascade_df2T_f32() local
315 acc1 = (b0 * Xn1) + d1; in arm_biquad_cascade_df2T_f32()
318 *pOut++ = acc1; in arm_biquad_cascade_df2T_f32()
322 d1 = ((b1 * Xn1) + (a1 * acc1)) + d2; in arm_biquad_cascade_df2T_f32()
325 d2 = (b2 * Xn1) + (a2 * acc1); in arm_biquad_cascade_df2T_f32()
357 float32_t acc1; /* Accumulator */ in arm_biquad_cascade_df2T_f32() local
[all …]
Darm_fir_q31.c235 q63_t acc0=0, acc1=0, acc2=0, acc3=0; in arm_fir_q31_1_4_mve() local
257 acc1 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve()
266 acc1 = asrl(acc1, 23); in arm_fir_q31_1_4_mve()
271 *pOutput++ = (q31_t) acc1; in arm_fir_q31_1_4_mve()
298 acc1 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve()
304 acc1 = asrl(acc1, 23); in arm_fir_q31_1_4_mve()
308 *pOutput++ = (q31_t) acc1; in arm_fir_q31_1_4_mve()
326 acc1 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve()
329 acc1 = asrl(acc1, 23); in arm_fir_q31_1_4_mve()
332 *pOutput++ = (q31_t) acc1; in arm_fir_q31_1_4_mve()
[all …]
Darm_fir_interpolate_q31.c116 q63_t acc1 = 0LL; in arm_fir_interpolate_q31() local
129 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31()
152 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31()
162 acc1 = asrl(acc1, 31 - 8); in arm_fir_interpolate_q31()
167 *pDst++ = (q31_t) acc1; in arm_fir_interpolate_q31()
180 q63_t acc1 = 0LL; in arm_fir_interpolate_q31() local
192 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31()
212 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31()
219 acc1 = asrl(acc1, 31 - 8); in arm_fir_interpolate_q31()
223 *pDst++ = (q31_t) acc1; in arm_fir_interpolate_q31()
[all …]
Darm_fir_interpolate_q15.c119 q63_t acc1 = 0LL; in arm_fir_interpolate_q15() local
132 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15()
155 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15()
165 acc1 = asrl(acc1, 15); in arm_fir_interpolate_q15()
170 *pDst++ = (q15_t) __SSAT(acc1, 16); in arm_fir_interpolate_q15()
183 q63_t acc1 = 0LL; in arm_fir_interpolate_q15() local
195 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15()
215 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15()
222 acc1 = asrl(acc1, 15); in arm_fir_interpolate_q15()
226 *pDst++ = (q15_t) __SSAT(acc1, 16);; in arm_fir_interpolate_q15()
[all …]
Darm_conv_q15.c117 int64_t acc1 = 0LL; in arm_conv_q15() local
122 MVE_INTR_CONV_DUAL_INC_Y_INC_SIZE_Q15(acc0, acc1, pX, pY, count); in arm_conv_q15()
124 *pDst++ = (q15_t) acc1; in arm_conv_q15()
144 int64_t acc1 = 0LL; in arm_conv_q15() local
155 MVE_INTR_CONV_QUAD_INC_X_FIXED_SIZE_Q15(acc0, acc1, acc2, acc3, pX, pY, count); in arm_conv_q15()
157 *pDst++ = (q15_t) acc1; in arm_conv_q15()
167 int64_t acc1 = 0LL; in arm_conv_q15() local
176 MVE_INTR_CONV_DUAL_INC_X_FIXED_SIZE_Q15(acc0, acc1, pX, pY, count); in arm_conv_q15()
178 *pDst++ = (q15_t) acc1; in arm_conv_q15()
199 int64_t acc1 = 0LL; in arm_conv_q15() local
[all …]
Darm_correlate_q15.c137 int64_t acc1 = 0LL; in arm_correlate_q15() local
146 MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_Q15(acc0, acc1, pX, pY, count); in arm_correlate_q15()
150 *pDst = (q15_t) acc1; in arm_correlate_q15()
171 int64_t acc1 = 0LL; in arm_correlate_q15() local
182 MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_Q15(acc0, acc1, acc2, acc3, pX, pY, srcBLen); in arm_correlate_q15()
186 *pDst = (q15_t) acc1; in arm_correlate_q15()
198 int64_t acc1 = 0LL; in arm_correlate_q15() local
207 MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_Q15(acc0, acc1, pX, pY, srcBLen); in arm_correlate_q15()
211 *pDst = (q15_t) acc1; in arm_correlate_q15()
234 int64_t acc1 = 0LL; in arm_correlate_q15() local
[all …]
/third_party/cmsis/CMSIS/DSP/PrivateInclude/
Darm_vec_filtering.h38 #define MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_F32(acc0, acc1, acc2, acc3, pX, pY, count)\ argument
87 acc1 = vecAddAcrossF32Mve(acc1Vec); \
127 #define MVE_INTR_CORR_DUAL_INC_X_DEC_SIZE_F32(acc0, acc1, pX, pY, count)\ argument
168 acc1 = vecAddAcrossF32Mve(acc1Vec); \
171 #define MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_F32(acc0, acc1, pX, pY, count)\ argument
210 acc1 = vecAddAcrossF32Mve(acc1Vec); \
213 #define MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_F32(acc0, acc1, pX, pY, count)\ argument
253 acc1 = vecAddAcrossF32Mve(acc1Vec); \
256 #define MVE_INTR_CONV_DUAL_INC_X_DEC_SIZE_F32(acc0, acc1, pX, pY, count) … argument
289acc1 = vecAddAcrossF32Mve(acc1Vec); \
[all …]
/third_party/openssl/crypto/ec/asm/
Decp_nistz256-armv8.pl50 $acc0,$acc1,$acc2,$acc3,$acc4,$acc5) =
228 ldp $acc0,$acc1,[$ap]
251 ldp $acc0,$acc1,[$ap]
272 ldp $acc0,$acc1,[$ap]
277 mov $t1,$acc1
297 ldp $acc0,$acc1,[$ap]
302 mov $t1,$acc1
306 mov $a1,$acc1
334 ldp $acc0,$acc1,[$ap]
357 mov $acc1,xzr
[all …]
Decp_nistz256-ppc64.pl45 my ($rp,$ap,$bp,$bi,$acc0,$acc1,$acc2,$acc3,$poly1,$poly3,
213 ld $acc1,8($ap)
251 ld $acc1,8($ap)
286 ld $acc1,8($ap)
291 mr $t1,$acc1
326 ld $acc1,8($ap)
332 mr $t1,$acc1
333 std $acc1,72($sp)
378 ld $acc1,8($ap)
414 li $acc1,0
[all …]
Decp_nistz256-x86_64.pl492 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
540 mov %rdx, $acc1
543 add %rax, $acc1
575 add $t0, $acc1
577 add %rax, $acc1
596 add %rax, $acc1
616 mov $acc1, $t0
617 imulq %r15, $acc1
625 mov $acc1, %rax
631 mov $acc1, $t1
[all …]
/third_party/cmsis/CMSIS/DSP/Source/MatrixFunctions/
Darm_mat_cmplx_mult_f32.c82 f32x4_t acc0, acc1; in arm_mat_cmplx_mult_f32_2x2_mve() local
100 acc1 = vcmulq(vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve()
101 acc1 = vcmlaq_rot90(acc1, vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve()
105 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = acc1[0] + acc1[2]; in arm_mat_cmplx_mult_f32_2x2_mve()
106 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = acc1[1] + acc1[3]; in arm_mat_cmplx_mult_f32_2x2_mve()
121 acc1 = vcmulq(vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve()
122 acc1 = vcmlaq_rot90(acc1, vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve()
126 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = acc1[0] + acc1[2]; in arm_mat_cmplx_mult_f32_2x2_mve()
127 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = acc1[1] + acc1[3]; in arm_mat_cmplx_mult_f32_2x2_mve()
147 f32x4_t acc0, acc1, acc2; in arm_mat_cmplx_mult_f32_3x3_mve() local
[all …]
Darm_mat_cmplx_mult_q31.c78 q63_t acc0, acc1, acc2, acc3; in arm_mat_cmplx_mult_q31_2x2_mve() local
93 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_2x2_mve()
100 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
111 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_2x2_mve()
120 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
141 q63_t acc0, acc1, acc2, acc3; in arm_mat_cmplx_mult_q31_3x3_mve() local
167 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_3x3_mve()
175 acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1); in arm_mat_cmplx_mult_q31_3x3_mve()
182 pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_3x3_mve()
188 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_3x3_mve()
[all …]
Darm_mat_mult_q31.c78 q63_t acc0, acc1; in arm_mat_mult_q31_2x2_mve() local
96 acc1 = vrmlaldavhq(vecA1, vecB); in arm_mat_mult_q31_2x2_mve()
99 acc1 = asrl(acc1, 23); in arm_mat_mult_q31_2x2_mve()
102 pOut[1 * MATRIX_DIM2] = (q31_t) acc1; in arm_mat_mult_q31_2x2_mve()
111 acc1 = vrmlaldavhq(vecA1, vecB); in arm_mat_mult_q31_2x2_mve()
114 acc1 = asrl(acc1, 23); in arm_mat_mult_q31_2x2_mve()
117 pOut[1 * MATRIX_DIM2] = (q31_t) acc1; in arm_mat_mult_q31_2x2_mve()
138 q63_t acc0, acc1, acc2; in arm_mat_mult_q31_3x3_mve() local
153 acc1 = vrmlaldavhq(vecA, vecB); in arm_mat_mult_q31_3x3_mve()
158 acc1 = asrl(acc1, 23); in arm_mat_mult_q31_3x3_mve()
[all …]
Darm_mat_mult_q15.c79 q63_t acc0, acc1; in arm_mat_mult_q15_2x2_mve() local
93 acc1 = vmlaldavq(vecA1, vecB); in arm_mat_mult_q15_2x2_mve()
96 acc1 = asrl(acc1, 15); in arm_mat_mult_q15_2x2_mve()
99 pOut[1 * MATRIX_DIM2] = (q15_t) __SSAT(acc1, 16); in arm_mat_mult_q15_2x2_mve()
108 acc1 = vmlaldavq(vecA1, vecB); in arm_mat_mult_q15_2x2_mve()
111 acc1 = asrl(acc1, 15); in arm_mat_mult_q15_2x2_mve()
114 pOut[1 * MATRIX_DIM2] = (q15_t) __SSAT(acc1, 16); in arm_mat_mult_q15_2x2_mve()
136 q63_t acc0, acc1, acc2; in arm_mat_mult_q15_3x3_mve() local
152 acc1 = vmlaldavq(vecA1, vecB); in arm_mat_mult_q15_3x3_mve()
156 acc1 = asrl(acc1, 15); in arm_mat_mult_q15_3x3_mve()
[all …]
Darm_mat_cmplx_mult_f16.c76 f16x8_t acc0, acc1; in arm_mat_cmplx_mult_f16_2x2_mve() local
107 acc1 = vcmulq(vecA1, vecB); in arm_mat_cmplx_mult_f16_2x2_mve()
108 acc1 = vcmlaq_rot90(acc1, vecA1, vecB); in arm_mat_cmplx_mult_f16_2x2_mve()
124 vecTmp = (f16x8_t) vrev64q_s32((int32x4_t) acc1); in arm_mat_cmplx_mult_f16_2x2_mve()
125 vecTmp = vaddq(vecTmp, acc1); in arm_mat_cmplx_mult_f16_2x2_mve()
151 f16x8_t acc0, acc1, acc2; in arm_mat_cmplx_mult_f16_3x3_mve() local
176 acc1 = vcmulq(vecA1, vecB); in arm_mat_cmplx_mult_f16_3x3_mve()
177 acc1 = vcmlaq_rot90(acc1, vecA1, vecB); in arm_mat_cmplx_mult_f16_3x3_mve()
183 mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]); in arm_mat_cmplx_mult_f16_3x3_mve()
196 acc1 = vcmulq(vecA1, vecB); in arm_mat_cmplx_mult_f16_3x3_mve()
[all …]
/third_party/openssl/crypto/bn/asm/
Darmv8-mont.pl663 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("x$_",(19..26));
713 mov $acc1,xzr
759 adds $acc1,$acc1,$t0 // t[1]+lo(a[1]*a[0])
773 stp $acc0,$acc1,[$tp],#8*2 // t[0..1]
803 adc $acc1,xzr,xzr // t[9]
814 adc $acc1,$acc1,$t1
825 adcs $acc1,$acc1,$t2
835 adcs $acc1,$acc1,$t2
844 adcs $acc1,$acc1,$t2
852 adcs $acc1,$acc1,$t1
[all …]
Dsparcv9-mont.pl69 $acc1="%o4";
133 mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
146 add $acc1,$car1,$car1
154 mov $tmp1,$acc1
164 add $acc1,$car1,$car1
204 mulx $npj,$mul1,$acc1 !prologue!
217 add $acc1,$car1,$car1
227 mov $tmp1,$acc1
239 add $acc1,$car1,$car1
319 mulx $npj,$mul1,$acc1 !prologue!
[all …]
Dppc-mont.pl360 $acc0,$acc1,$acc2,$acc3,$acc4,
432 li $acc1,0
462 adde $acc1,$acc1,$t1
471 addc $acc1,$acc1,$t0
492 adde $acc0,$acc1,$t1
494 adde $acc1,$acc2,$t2
501 adde $acc1,$acc1,$t1
532 adde $acc1,$acc1,$t1
540 addc $acc1,$acc1,$t0
550 adde $acc1,$acc1,$t1
[all …]
/third_party/cmsis/CMSIS/DSP/Source/ComplexMathFunctions/
Darm_cmplx_mag_squared_q31.c65 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_squared_q31() local
97 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31()
100 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31()
115 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_squared_q31() local
129 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31()
131 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31()
136 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31()
137 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31()
142 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31()
143 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31()
[all …]
Darm_cmplx_mag_q31.c68 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_q31() local
111 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31()
114 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31()
129 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_q31() local
143 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31()
146 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31()
151 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31()
152 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31()
157 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31()
158 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31()
[all …]
/third_party/openssl/crypto/aes/asm/
Daes-sparcv9.pl47 $acc1="%o0";
215 srl $s1,13,$acc1 !
225 and $acc1,2040,$acc1
229 ldx [$tbl+$acc1],$acc1
277 srlx $acc1,8,$acc1
282 xor $acc1,$t0,$t0
311 srl $t1,13,$acc1
316 and $acc1,2040,$acc1
320 ldx [$tbl+$acc1],$acc1
360 srlx $acc1,8,$acc1
[all …]
Daes-x86_64.pl63 $acc1="%edi"; $maskfe="%rdi";
106 movzb `&lo("$s1")`,$acc1
109 mov 0($sbox,$acc1,8),$t1
113 movzb `&hi("$s2")`,$acc1
116 xor 3($sbox,$acc1,8),$t1
131 movzb `&lo("$s3")`,$acc1
134 xor 2($sbox,$acc1,8),$t1
138 movzb `&hi("$s0")`,$acc1
141 xor 1($sbox,$acc1,8),$t1
145 movzb `&hi("$s1")`,$acc1
[all …]
Daes-parisc.pl67 ($acc0, $acc1, $acc2, $acc3, $acc4, $acc5, $acc6, $acc7,
136 _srm $s0,16,$acc1
139 stb $acc1,1($out)
148 _srm $s2,16,$acc1
155 stb $acc1,9($out)
208 _srm $s1,16,$acc1
218 ldwx,s $acc1($tbl),$acc1
248 _ror $acc1,8,$acc1
252 xor $acc1,$t0,$t0
280 _srm $t1,16,$acc1
[all …]

1234