/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x16.c | 48 const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() local
|
D | avx512f-p5-scalef-x32.c | 49 const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32() local
|
D | avx2-p5-x8.c | 54 …const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FRO… in xnn_f32_vscaleextexp_ukernel__avx2_p5_x8() local
|
D | avx512f-p5-scalef-x48.c | 50 const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48() local
|
D | avx2-p5-x16.c | 55 …const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FRO… in xnn_f32_vscaleextexp_ukernel__avx2_p5_x16() local
|
D | avx512f-p5-scalef-x64.c | 51 const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x24.c | 56 …const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FRO… in xnn_f32_vscaleextexp_ukernel__avx2_p5_x24() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x16.c | 50 __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() local
|
D | avx2-p5-x8.c | 54 __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() local
|
D | avx512f-p5-scalef-x32.c | 52 __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32() local
|
D | avx512f-p5-scalef-x48.c | 54 __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x48() local
|
D | avx2-p5-x16.c | 56 __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() local
|
D | avx2-p5-x24.c | 58 __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x24() local
|
D | avx512f-p5-scalef-x64.c | 56 __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-p5-x2.c | 57 float vn0 = vx0 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() local
|
D | scalar-p5-x2-acc2.c | 58 float vn0 = vx0 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() local
|
D | scalar-lut64-p2-x2-acc2.c | 61 float vn0 = vx0 * vlog2e_x64 + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() local
|
D | scalar-lut64-p2-x2.c | 60 float vn0 = vx0 * vlog2e_x64 + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() local
|
D | scalar-p5-x4.c | 61 float vn0 = vx0 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4() local
|
D | scalar-p5-x4-acc2.c | 62 float vn0 = vx0 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc2() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | scalar-p5-div-x2.c | 64 float vn0 = vz0 * vminus_log2e + vmagic_bias; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2() local
|
D | scalar-lut2048-p1-div-x2.c | 66 float vn0 = vz0 * vminus_log2e_x2048 + vmagic_bias; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x2() local
|
D | scalar-lut64-p2-div-x2.c | 66 float vn0 = vz0 * vminus_log2e_x64 + vmagic_bias; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2() local
|
D | avx2-rr1-p5-div-x16.c | 64 __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16() local
|
D | scalar-p5-div-x4.c | 68 float vn0 = vz0 * vminus_log2e + vmagic_bias; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
|