Lines Matching refs:vn
52 __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() local
53 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
54 const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
56 vn = _mm256_sub_ps(vn, vmagic_bias); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
59 __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
61 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
88 __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() local
89 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
90 const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
92 vn = _mm256_sub_ps(vn, vmagic_bias); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
95 __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
97 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()