/external/XNNPACK/src/f32-sigmoid/gen/ |
D | avx2-rr1-p5-nr2fma-x32.c | 139 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32() local 144 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32() 149 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32() 155 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x32()
|
D | avx2-rr1-p5-nr2fma-x40.c | 152 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40() local 158 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40() 164 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40() 171 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x40()
|
D | avx2-rr1-p5-nr2fma-x48.c | 165 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48() local 172 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48() 179 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48() 187 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x48()
|
D | avx2-rr1-p5-nr2fma-x56.c | 178 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56() local 186 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56() 194 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56() 203 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x56()
|
D | avx2-rr1-p5-nr2fma-x64.c | 191 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x64() local 200 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x64() 209 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x64() 219 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x64()
|
D | avx2-rr1-p5-nr2fma-x72.c | 204 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() local 214 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() 224 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() 235 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72()
|
D | avx2-rr1-p5-nr1fma-x32.c | 139 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32() local 144 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32() 151 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x32()
|
D | avx2-rr1-p5-nr2fma-x80.c | 217 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() local 228 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() 239 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() 251 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80()
|
D | avx2-rr1-p5-nr1fma-x40.c | 152 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40() local 158 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40() 166 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x40()
|
D | avx2-rr1-p5-nr1fma-x48.c | 165 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x48() local 172 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x48() 181 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x48()
|
D | avx2-rr1-p5-nr1fma-x56.c | 178 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x56() local 186 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x56() 196 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x56()
|
D | avx2-rr1-p5-nr1fma-x64.c | 191 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x64() local 200 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x64() 211 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x64()
|
D | avx2-rr1-p5-nr1fma-x72.c | 204 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72() local 214 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72() 226 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72()
|
D | avx2-rr1-p5-nr1fma-x80.c | 217 __m256 vr3 = _mm256_rcp_ps(vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80() local 228 vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80() 241 __m256 vf3 = _mm256_mul_ps(ve3, vr3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80()
|
/external/aac/libFDK/src/ |
D | fft.cpp | 740 FIXP_DBL vr3, ur3; in fft_16() local 763 vr3 = (x[2] >> 1) + (x[18] >> 1); /* Re A + Re B */ in fft_16() 798 x[16] = vr3 + (vi SHIFT_B); /* Re A' = ReA + ReB +ReC + ReD */ in fft_16() 800 x[20] = vr3 - (vi SHIFT_B); /* Re C' = -(ReC+ReD) + (ReA+ReB) */ in fft_16() 802 vr3 -= x[18]; /* Re A - Re B */ in fft_16() 806 x[18] = ui + vr3; /* Re B' = Im C - Im D + Re A - Re B */ in fft_16() 816 x[22] = vr3 - ui; /* Re D' = -Im C + Im D + Re A - Re B */ in fft_16() 1017 FIXP_DBL vr3, ur3; in fft_32() local 1040 vr3 = (x[2] + x[34]) >> 1; /* Re A + Re B */ in fft_32() 1080 x[32] = vr3 + (vi2 SHIFT_B); /* Re A' = ReA + ReB +ReC + ReD */ in fft_32() [all …]
|
/external/elfutils/tests/ |
D | run-allregs.sh | 1147 1127: vr3 (vr3), unsigned 128 bits 2170 1127: vr3 (vr3), unsigned 128 bits
|
D | run-addrcfi.sh | 1289 vector reg1127 (vr3): undefined 2311 vector reg1127 (vr3): undefined 3339 vector reg1127 (vr3): undefined
|