• Home
  • Raw
  • Download

Lines Matching refs:vmagic_bias

29   const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);  in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()  local
72 __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
73 __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
74 __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
75 __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
76 __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
77 __m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
78 __m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
79 __m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
80 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
81 __m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
97 vn0 = _mm256_sub_ps(vn0, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
98 vn1 = _mm256_sub_ps(vn1, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
99 vn2 = _mm256_sub_ps(vn2, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
100 vn3 = _mm256_sub_ps(vn3, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
101 vn4 = _mm256_sub_ps(vn4, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
102 vn5 = _mm256_sub_ps(vn5, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
103 vn6 = _mm256_sub_ps(vn6, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
104 vn7 = _mm256_sub_ps(vn7, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
105 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
106 vn9 = _mm256_sub_ps(vn9, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
250 __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
257 vn = _mm256_sub_ps(vn, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
300 __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
307 vn = _mm256_sub_ps(vn, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()