/third_party/openssl/crypto/ec/asm/ |
D | ecp_nistz256-armv8.pl | 50 $acc0,$acc1,$acc2,$acc3,$acc4,$acc5) = 230 ldp $acc2,$acc3,[$ap,#16] 252 ldp $acc2,$acc3,[$ap,#16] 273 ldp $acc2,$acc3,[$ap,#16] 278 mov $t2,$acc2 298 ldp $acc2,$acc3,[$ap,#16] 303 mov $t2,$acc2 307 mov $a2,$acc2 335 ldp $acc2,$acc3,[$ap,#16] 358 mov $acc2,xzr [all …]
|
D | ecp_nistz256-ppc64.pl | 45 my ($rp,$ap,$bp,$bi,$acc0,$acc1,$acc2,$acc3,$poly1,$poly3, 215 ld $acc2,16($ap) 252 ld $acc2,16($ap) 287 ld $acc2,16($ap) 292 mr $t2,$acc2 327 ld $acc2,16($ap) 334 mr $t2,$acc2 335 std $acc2,80($sp) 379 ld $acc2,16($ap) 415 li $acc2,0 [all …]
|
D | ecp_nistz256-x86_64.pl | 492 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15)); 546 mov %rdx, $acc2 549 add %rax, $acc2 571 sub $acc0, $acc2 579 adc %rdx, $acc2 602 add $t1, $acc2 604 add %rax, $acc2 640 add $t0, $acc2 642 add %rax, $acc2 661 add %rax, $acc2 [all …]
|
D | ecp_nistz256-sparcv9.pl | 1603 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5)); 1613 addxccc $acc2,$acc2,$acc2 1631 addxccc $t2,$acc2,$acc2 1639 addxccc $acc2,$minus1,$t2 1646 movrz $acc4,$t2,$acc2 1649 stx $acc2,[$rp+16] 1684 srlx $acc2,32,$acc4 1687 subccc $acc2,$t0,$acc2 1690 and $acc2,$poly1,$acc2 1695 or $acc2,$acc4,$acc2 [all …]
|
D | x25519-x86_64.pl | 485 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7) = map("%r$_",(8..15)); 537 mulx $acc6,$acc2,%rax # a[0]*b[2] 538 adcx %rbx,$acc2 547 adcx %rbx,$acc2 549 adox %rax,$acc2 561 adcx %rax,$acc2 627 mulx %rbp,$acc2,%rbx # a[0]*a[2] 628 adcx %rax,$acc2 652 adcx $acc2,$acc2 656 adox %rax,$acc2 [all …]
|
/third_party/cmsis/CMSIS/DSP/Source/MatrixFunctions/ |
D | arm_mat_cmplx_mult_f32.c | 147 f32x4_t acc0, acc1, acc2; in arm_mat_cmplx_mult_f32_3x3_mve() local 175 acc2 = vcmulq(vecA, vecB); in arm_mat_cmplx_mult_f32_3x3_mve() 176 acc2 = vcmlaq_rot90(acc2, vecA, vecB); in arm_mat_cmplx_mult_f32_3x3_mve() 190 acc2 = vcmlaq(acc2, vecA, vecB); in arm_mat_cmplx_mult_f32_3x3_mve() 191 acc2 = vcmlaq_rot90(acc2, vecA, vecB); in arm_mat_cmplx_mult_f32_3x3_mve() 198 pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 0] = acc2[0] + acc2[2]; in arm_mat_cmplx_mult_f32_3x3_mve() 199 pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 1] = acc2[1] + acc2[3]; in arm_mat_cmplx_mult_f32_3x3_mve() 218 acc2 = vcmulq(vecA, vecB); in arm_mat_cmplx_mult_f32_3x3_mve() 219 acc2 = vcmlaq_rot90(acc2, vecA, vecB); in arm_mat_cmplx_mult_f32_3x3_mve() 232 acc2 = vcmlaq(acc2, vecA, vecB); in arm_mat_cmplx_mult_f32_3x3_mve() [all …]
|
D | arm_mat_mult_q31.c | 138 q63_t acc0, acc1, acc2; in arm_mat_mult_q31_3x3_mve() local 155 acc2 = vrmlaldavhq(vecA, vecB); in arm_mat_mult_q31_3x3_mve() 159 acc2 = asrl(acc2, 23); in arm_mat_mult_q31_3x3_mve() 163 pOut[2 * MATRIX_DIM3] = (q31_t) acc2; in arm_mat_mult_q31_3x3_mve() 176 acc2 = vrmlaldavhq(vecA, vecB); in arm_mat_mult_q31_3x3_mve() 180 acc2 = asrl(acc2, 23); in arm_mat_mult_q31_3x3_mve() 184 pOut[2 * MATRIX_DIM3] = (q31_t) acc2; in arm_mat_mult_q31_3x3_mve() 197 acc2 = vrmlaldavhq(vecA, vecB); in arm_mat_mult_q31_3x3_mve() 201 acc2 = asrl(acc2, 23); in arm_mat_mult_q31_3x3_mve() 205 pOut[2 * MATRIX_DIM3] = (q31_t) acc2; in arm_mat_mult_q31_3x3_mve() [all …]
|
D | arm_mat_cmplx_mult_q31.c | 78 q63_t acc0, acc1, acc2, acc3; in arm_mat_cmplx_mult_q31_2x2_mve() local 96 acc2 = vmlsldavq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_2x2_mve() 101 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc2, 31); in arm_mat_cmplx_mult_q31_2x2_mve() 114 acc2 = vmlsldavq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_2x2_mve() 121 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc2, 31); in arm_mat_cmplx_mult_q31_2x2_mve() 141 q63_t acc0, acc1, acc2, acc3; in arm_mat_cmplx_mult_q31_3x3_mve() local 170 acc2 = vmlsldavq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_3x3_mve() 178 acc2 = vmlsldavaq_s32(acc2, vecA, vecB1); in arm_mat_cmplx_mult_q31_3x3_mve() 183 pOut[1 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc2, 31); in arm_mat_cmplx_mult_q31_3x3_mve() 211 acc2 = vmlsldavq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_3x3_mve() [all …]
|
D | arm_mat_cmplx_mult_f16.c | 151 f16x8_t acc0, acc1, acc2; in arm_mat_cmplx_mult_f16_3x3_mve() local 179 acc2 = vcmulq(vecA2, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 180 acc2 = vcmlaq_rot90(acc2, vecA2, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 184 mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]); in arm_mat_cmplx_mult_f16_3x3_mve() 199 acc2 = vcmulq(vecA2, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 200 acc2 = vcmlaq_rot90(acc2, vecA2, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 204 mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]); in arm_mat_cmplx_mult_f16_3x3_mve() 219 acc2 = vcmulq(vecA2, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 220 acc2 = vcmlaq_rot90(acc2, vecA2, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 224 mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]); in arm_mat_cmplx_mult_f16_3x3_mve() [all …]
|
D | arm_mat_mult_q15.c | 136 q63_t acc0, acc1, acc2; in arm_mat_mult_q15_3x3_mve() local 153 acc2 = vmlaldavq(vecA2, vecB); in arm_mat_mult_q15_3x3_mve() 157 acc2 = asrl(acc2, 15); in arm_mat_mult_q15_3x3_mve() 161 pOut[2 * MATRIX_DIM3] = (q15_t) __SSAT(acc2, 16); in arm_mat_mult_q15_3x3_mve() 171 acc2 = vmlaldavq(vecA2, vecB); in arm_mat_mult_q15_3x3_mve() 175 acc2 = asrl(acc2, 15); in arm_mat_mult_q15_3x3_mve() 179 pOut[2 * MATRIX_DIM3] = (q15_t) __SSAT(acc2, 16); in arm_mat_mult_q15_3x3_mve() 189 acc2 = vmlaldavq(vecA2, vecB); in arm_mat_mult_q15_3x3_mve() 193 acc2 = asrl(acc2, 15); in arm_mat_mult_q15_3x3_mve() 197 pOut[2 * MATRIX_DIM3] = (q15_t) __SSAT(acc2, 16); in arm_mat_mult_q15_3x3_mve() [all …]
|
/third_party/openssl/crypto/bn/asm/ |
D | armv8-mont.pl | 663 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("x$_",(19..26)); 714 mov $acc2,xzr 761 adcs $acc2,$acc2,$t1 775 adds $acc2,$acc2,$t3 // t[2]+lo(a[1]*a[0]) 802 stp $acc2,$acc3,[$tp],#8*2 // t[2..3] 828 adc $acc2,xzr,xzr // t[10] 837 adc $acc2,$acc2,$t3 846 adcs $acc2,$acc2,$t3 854 adcs $acc2,$acc2,$t2 861 adcs $acc2,$acc2,$t1 [all …]
|
D | ppc-mont.pl | 360 $acc0,$acc1,$acc2,$acc3,$acc4, 434 li $acc2,0 464 adde $acc2,$acc2,$t2 474 adde $acc2,$acc2,$t1 494 adde $acc1,$acc2,$t2 496 adde $acc2,$acc3,$t3 502 adde $acc2,$acc2,$t2 534 adde $acc2,$acc2,$t2 542 adde $acc2,$acc2,$t1 552 adde $acc2,$acc2,$t2 [all …]
|
/third_party/openssl/crypto/aes/asm/ |
D | aes-sparcv9.pl | 48 $acc2="%o1"; 224 srl $s2,5,$acc2 ! 228 and $acc2,2040,$acc2 232 ldx [$tbl+$acc2],$acc2 ! 281 srlx $acc2,16,$acc2 ! 285 xor $acc2,$t0,$t0 315 srl $t2,5,$acc2 319 and $acc2,2040,$acc2 324 ldx [$tbl+$acc2],$acc2 364 srlx $acc2,16,$acc2 [all …]
|
D | aes-parisc.pl | 67 ($acc0, $acc1, $acc2, $acc3, $acc4, $acc5, $acc6, $acc7, 138 _srm $s0,8,$acc2 141 stb $acc2,2($out) 150 _srm $s2,8,$acc2 157 stb $acc2,10($out) 215 _srm $s2,8,$acc2 220 ldwx,s $acc2($tbl),$acc2 251 _ror $acc2,16,$acc2 255 xor $acc2,$t0,$t0 283 _srm $t2,8,$acc2 [all …]
|
D | aes-x86_64.pl | 64 $acc2="%ebp"; $mask1b="%rbp"; 107 movzb `&lo("$s2")`,$acc2 110 mov 0($sbox,$acc2,8),$t2 114 movzb `&lo("$s3")`,$acc2 117 mov 0($sbox,$acc2,8),$t3 121 movzb `&hi("$s0")`,$acc2 124 xor 3($sbox,$acc2,8),$t3 132 movzb `&lo("$s0")`,$acc2 135 xor 2($sbox,$acc2,8),$t2 139 movzb `&lo("$s1")`,$acc2 [all …]
|
/third_party/cmsis/CMSIS/DSP/Source/FilteringFunctions/ |
D | arm_fir_q31.c | 235 q63_t acc0=0, acc1=0, acc2=0, acc3=0; in arm_fir_q31_1_4_mve() local 260 acc2 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve() 267 acc2 = asrl(acc2, 23); in arm_fir_q31_1_4_mve() 272 *pOutput++ = (q31_t) acc2; in arm_fir_q31_1_4_mve() 301 acc2 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve() 305 acc2 = asrl(acc2, 23); in arm_fir_q31_1_4_mve() 309 *pOutput++ = (q31_t) acc2; in arm_fir_q31_1_4_mve() 465 q63_t acc0, acc1, acc2, acc3; in arm_fir_q31_29_32_mve() local 543 acc2 = vrmlaldavhq(vecIn0, vecCoeffs0); in arm_fir_q31_29_32_mve() 545 acc2 = vrmlaldavhaq(acc2, vecIn0, vecCoeffs1); in arm_fir_q31_29_32_mve() [all …]
|
D | arm_fir_interpolate_q31.c | 117 q63_t acc2 = 0LL; in arm_fir_interpolate_q31() local 132 acc2 = vrmlaldavhaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q31() 155 acc2 = vrmlaldavhaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q31() 163 acc2 = asrl(acc2, 31 - 8); in arm_fir_interpolate_q31() 168 *pDst++ = (q31_t) acc2; in arm_fir_interpolate_q31() 181 q63_t acc2 = 0LL; in arm_fir_interpolate_q31() local 195 acc2 = vrmlaldavhaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q31() 215 acc2 = vrmlaldavhaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q31() 220 acc2 = asrl(acc2, 31 - 8); in arm_fir_interpolate_q31() 224 *pDst++ = (q31_t) acc2; in arm_fir_interpolate_q31() [all …]
|
D | arm_fir_interpolate_q15.c | 120 q63_t acc2 = 0LL; in arm_fir_interpolate_q15() local 135 acc2 = vmlaldavaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q15() 158 acc2 = vmlaldavaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q15() 166 acc2 = asrl(acc2, 15); in arm_fir_interpolate_q15() 171 *pDst++ = (q15_t) __SSAT(acc2, 16); in arm_fir_interpolate_q15() 184 q63_t acc2 = 0LL; in arm_fir_interpolate_q15() local 198 acc2 = vmlaldavaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q15() 218 acc2 = vmlaldavaq(acc2, vecState, vecCoef); in arm_fir_interpolate_q15() 223 acc2 = asrl(acc2, 15); in arm_fir_interpolate_q15() 227 *pDst++ = (q15_t) __SSAT(acc2, 16);; in arm_fir_interpolate_q15() [all …]
|
D | arm_conv_fast_q15.c | 73 q31_t sum, acc0, acc1, acc2, acc3; /* Accumulators */ in arm_conv_fast_q15() local 281 acc2 = 0; in arm_conv_fast_q15() 314 acc2 = __SMLADX(x2, c0, acc2); in arm_conv_fast_q15() 336 acc2 = __SMLADX(x0, c0, acc2); in arm_conv_fast_q15() 368 acc2 = __SMLADX(x1, c0, acc2); in arm_conv_fast_q15() 387 acc2 = __SMLADX(x3, c0, acc2); in arm_conv_fast_q15() 405 acc2 = __SMLADX(x3, c0, acc2); in arm_conv_fast_q15() 423 acc2 = __SMLADX(x2, c0, acc2); in arm_conv_fast_q15() 430 write_q15x2_ia (&pOut, __PKHBT((acc2 >> 15), (acc3 >> 15), 16)); in arm_conv_fast_q15() 433 write_q15x2_ia (&pOut, __PKHBT((acc3 >> 15), (acc2 >> 15), 16)); in arm_conv_fast_q15()
|
D | arm_correlate_fast_q15.c | 73 q31_t sum, acc0, acc1, acc2, acc3; /* Accumulators */ in arm_correlate_fast_q15() local 267 acc2 = 0; in arm_correlate_fast_q15() 300 acc2 = __SMLAD(x2, c0, acc2); in arm_correlate_fast_q15() 322 acc2 = __SMLAD(x0, c0, acc2); in arm_correlate_fast_q15() 354 acc2 = __SMLADX(x1, c0, acc2); in arm_correlate_fast_q15() 373 acc2 = __SMLAD(x3, c0, acc2); in arm_correlate_fast_q15() 391 acc2 = __SMLAD(x3, c0, acc2); in arm_correlate_fast_q15() 409 acc2 = __SMLADX(x2, c0, acc2); in arm_correlate_fast_q15() 421 *pOut = (q15_t) (acc2 >> 15); in arm_correlate_fast_q15()
|
D | arm_fir_q15.c | 216 q63_t acc0, acc1, acc2, acc3; in arm_fir_q15() local 253 acc2 = 0LL; in arm_fir_q15() 278 acc2 = vmlaldavaq(acc2, vecIn0, vecCoeffs); in arm_fir_q15() 293 *pOutput++ = (q15_t) MVE_ASRL_SAT16(acc2, 15); in arm_fir_q15() 313 acc2 = 0LL; in arm_fir_q15() 337 acc2 = vmlaldavaq(acc2, vecIn0, vecCoeffs); in arm_fir_q15() 349 acc2 = asrl(acc2, 15); in arm_fir_q15() 353 *pOutput++ = (q15_t) MVE_ASRL_SAT16(acc2, 15); in arm_fir_q15() 475 q63_t acc1, acc2, acc3; /* Accumulators */ in arm_fir_q15() local 506 acc2 = 0; in arm_fir_q15() [all …]
|
D | arm_fir_decimate_fast_q31.c | 81 q63_t acc1, acc2, acc3; in arm_fir_decimate_fast_q31() local 108 acc2 = 0; in arm_fir_decimate_fast_q31() 140 acc2 = (q31_t) ((((q63_t) acc2 << 32) + ((q63_t) x2 * c0)) >> 32); in arm_fir_decimate_fast_q31() 155 acc2 = (q31_t) ((((q63_t) acc2 << 32) + ((q63_t) x2 * c0)) >> 32); in arm_fir_decimate_fast_q31() 170 acc2 = (q31_t) ((((q63_t) acc2 << 32) + ((q63_t) x2 * c0)) >> 32); in arm_fir_decimate_fast_q31() 185 acc2 = (q31_t) ((((q63_t) acc2 << 32) + ((q63_t) x2 * c0)) >> 32); in arm_fir_decimate_fast_q31() 209 acc2 = (q31_t) ((((q63_t) acc2 << 32) + ((q63_t) x2 * c0)) >> 32); in arm_fir_decimate_fast_q31() 223 *pDst++ = (q31_t) (acc2 << 1); in arm_fir_decimate_fast_q31()
|
D | arm_conv_partial_fast_q15.c | 68 q31_t sum, acc0, acc1, acc2, acc3; /* Accumulator */ in arm_conv_partial_fast_q15() local 303 acc2 = 0; in arm_conv_partial_fast_q15() 338 acc2 = __SMLADX(x2, c0, acc2); in arm_conv_partial_fast_q15() 360 acc2 = __SMLADX(x0, c0, acc2); in arm_conv_partial_fast_q15() 391 acc2 = __SMLADX(x1, c0, acc2); in arm_conv_partial_fast_q15() 410 acc2 = __SMLADX(x3, c0, acc2); in arm_conv_partial_fast_q15() 428 acc2 = __SMLADX(x3, c0, acc2); in arm_conv_partial_fast_q15() 445 acc2 = __SMLADX(x2, c0, acc2); in arm_conv_partial_fast_q15() 452 write_q15x2_ia (&pOut, __PKHBT(acc2 >> 15, acc3 >> 15, 16)); in arm_conv_partial_fast_q15() 455 write_q15x2_ia (&pOut, __PKHBT(acc3 >> 15, acc2 >> 15, 16)); in arm_conv_partial_fast_q15()
|
D | arm_fir_q7.c | 207 q31_t acc0, acc1, acc2, acc3; in arm_fir_q7() local 263 acc2 = 0; in arm_fir_q7() 290 acc2 = vmladavaq(acc2, vecIn0, vecCoeffs); in arm_fir_q7() 307 *pOutput++ = (q7_t) __SSAT((acc2 >> 7U), 8); in arm_fir_q7() 327 acc2 = 0; in arm_fir_q7() 347 acc2 = vmladavaq(acc2, vecIn0, vecCoeffs); in arm_fir_q7() 356 *pOutput++ = (q7_t) __SSAT((acc2 >> 7U), 8); in arm_fir_q7() 463 q31_t acc1, acc2, acc3; /* Accumulators */ in arm_fir_q7() local 494 acc2 = 0; in arm_fir_q7() 529 acc2 += ((q15_t) x2 * c0); in arm_fir_q7() [all …]
|
/third_party/mbedtls/library/ |
D | poly1305.c | 100 uint32_t acc0, acc1, acc2, acc3, acc4; in poly1305_process() local 117 acc2 = ctx->acc[2]; in poly1305_process() 133 d2 += (uint64_t) acc2 + ( d1 >> 32U ); in poly1305_process() 137 acc2 = (uint32_t) d2; in poly1305_process() 144 mul64( acc2, rs2 ) + in poly1305_process() 148 mul64( acc2, rs3 ) + in poly1305_process() 153 mul64( acc2, r0 ) + in poly1305_process() 158 mul64( acc2, r1 ) + in poly1305_process() 169 acc2 = (uint32_t) d2; in poly1305_process() 178 d0 = (uint64_t) acc2 + ( d0 >> 32U ); in poly1305_process() [all …]
|