• Home
  • Raw
  • Download

Lines Matching refs:vtwo

39   const __m256 vtwo = _mm256_set1_ps(2.0f);  in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()  local
229 vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
230 vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
231 vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
232 vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
233 vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
234 vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
235 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
236 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
237 vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
238 vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
239 vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
240 vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
241 vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
242 vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
243 vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
244 vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
245 vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
246 vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
247 vr9 = _mm256_mul_ps(vr9, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr9, vd9))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
248 vr9 = _mm256_mul_ps(vr9, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr9, vd9))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
322 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
323 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
361 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()
362 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd))); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x80()