• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsqrt/neonfma-nr1rsqrts1fma1adj.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 #include <xnnpack/vunary.h>
17 
18 
xnn_f32_vsqrt_ukernel__neonfma_nr1rsqrts1fma1adj_x20(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsqrt_ukernel__neonfma_nr1rsqrts1fma1adj_x20(
20     size_t n,
21     const float* x,
22     float* y,
23     const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
24 {
25   assert(n != 0);
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vhalf = vmovq_n_f32(0.5f);
29   for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
30     const float32x4_t vx0123 = vld1q_f32(x); x += 4;
31     const float32x4_t vx4567 = vld1q_f32(x); x += 4;
32     const float32x4_t vx89AB = vld1q_f32(x); x += 4;
33     const float32x4_t vxCDEF = vld1q_f32(x); x += 4;
34     const float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
35 
36     float32x4_t vrsqrtx0123 = vrsqrteq_f32(vx0123);
37     float32x4_t vrsqrtx4567 = vrsqrteq_f32(vx4567);
38     float32x4_t vrsqrtx89AB = vrsqrteq_f32(vx89AB);
39     float32x4_t vrsqrtxCDEF = vrsqrteq_f32(vxCDEF);
40     float32x4_t vrsqrtxGHIJ = vrsqrteq_f32(vxGHIJ);
41 
42     const float32x4_t vrx0123 = vmulq_f32(vrsqrtx0123, vrsqrtx0123);
43     const float32x4_t vrx4567 = vmulq_f32(vrsqrtx4567, vrsqrtx4567);
44     const float32x4_t vrx89AB = vmulq_f32(vrsqrtx89AB, vrsqrtx89AB);
45     const float32x4_t vrxCDEF = vmulq_f32(vrsqrtxCDEF, vrsqrtxCDEF);
46     const float32x4_t vrxGHIJ = vmulq_f32(vrsqrtxGHIJ, vrsqrtxGHIJ);
47 
48     const float32x4_t vcorrection0123 = vrsqrtsq_f32(vx0123, vrx0123);
49     const float32x4_t vcorrection4567 = vrsqrtsq_f32(vx4567, vrx4567);
50     const float32x4_t vcorrection89AB = vrsqrtsq_f32(vx89AB, vrx89AB);
51     const float32x4_t vcorrectionCDEF = vrsqrtsq_f32(vxCDEF, vrxCDEF);
52     const float32x4_t vcorrectionGHIJ = vrsqrtsq_f32(vxGHIJ, vrxGHIJ);
53 
54     vrsqrtx0123 = vmulq_f32(vrsqrtx0123, vcorrection0123);
55     vrsqrtx4567 = vmulq_f32(vrsqrtx4567, vcorrection4567);
56     vrsqrtx89AB = vmulq_f32(vrsqrtx89AB, vcorrection89AB);
57     vrsqrtxCDEF = vmulq_f32(vrsqrtxCDEF, vcorrectionCDEF);
58     vrsqrtxGHIJ = vmulq_f32(vrsqrtxGHIJ, vcorrectionGHIJ);
59 
60     float32x4_t vsqrtx0123 = vmulq_f32(vrsqrtx0123, vx0123);
61     float32x4_t vhalfrsqrtx0123 = vmulq_f32(vrsqrtx0123, vhalf);
62     float32x4_t vsqrtx4567 = vmulq_f32(vrsqrtx4567, vx4567);
63     float32x4_t vhalfrsqrtx4567 = vmulq_f32(vrsqrtx4567, vhalf);
64     float32x4_t vsqrtx89AB = vmulq_f32(vrsqrtx89AB, vx89AB);
65     float32x4_t vhalfrsqrtx89AB = vmulq_f32(vrsqrtx89AB, vhalf);
66     float32x4_t vsqrtxCDEF = vmulq_f32(vrsqrtxCDEF, vxCDEF);
67     float32x4_t vhalfrsqrtxCDEF = vmulq_f32(vrsqrtxCDEF, vhalf);
68     float32x4_t vsqrtxGHIJ = vmulq_f32(vrsqrtxGHIJ, vxGHIJ);
69     float32x4_t vhalfrsqrtxGHIJ = vmulq_f32(vrsqrtxGHIJ, vhalf);
70 
71     const float32x4_t vresidual0123 = vfmsq_f32(vhalf, vsqrtx0123, vhalfrsqrtx0123);
72     const float32x4_t vresidual4567 = vfmsq_f32(vhalf, vsqrtx4567, vhalfrsqrtx4567);
73     const float32x4_t vresidual89AB = vfmsq_f32(vhalf, vsqrtx89AB, vhalfrsqrtx89AB);
74     const float32x4_t vresidualCDEF = vfmsq_f32(vhalf, vsqrtxCDEF, vhalfrsqrtxCDEF);
75     const float32x4_t vresidualGHIJ = vfmsq_f32(vhalf, vsqrtxGHIJ, vhalfrsqrtxGHIJ);
76 
77     vhalfrsqrtx0123 = vfmaq_f32(vhalfrsqrtx0123, vresidual0123, vhalfrsqrtx0123);
78     vsqrtx0123 = vfmaq_f32(vsqrtx0123, vresidual0123, vsqrtx0123);
79     vhalfrsqrtx4567 = vfmaq_f32(vhalfrsqrtx4567, vresidual4567, vhalfrsqrtx4567);
80     vsqrtx4567 = vfmaq_f32(vsqrtx4567, vresidual4567, vsqrtx4567);
81     vhalfrsqrtx89AB = vfmaq_f32(vhalfrsqrtx89AB, vresidual89AB, vhalfrsqrtx89AB);
82     vsqrtx89AB = vfmaq_f32(vsqrtx89AB, vresidual89AB, vsqrtx89AB);
83     vhalfrsqrtxCDEF = vfmaq_f32(vhalfrsqrtxCDEF, vresidualCDEF, vhalfrsqrtxCDEF);
84     vsqrtxCDEF = vfmaq_f32(vsqrtxCDEF, vresidualCDEF, vsqrtxCDEF);
85     vhalfrsqrtxGHIJ = vfmaq_f32(vhalfrsqrtxGHIJ, vresidualGHIJ, vhalfrsqrtxGHIJ);
86     vsqrtxGHIJ = vfmaq_f32(vsqrtxGHIJ, vresidualGHIJ, vsqrtxGHIJ);
87 
88     const float32x4_t vadjustment0123 = vfmsq_f32(vx0123, vsqrtx0123, vsqrtx0123);
89     const float32x4_t vadjustment4567 = vfmsq_f32(vx4567, vsqrtx4567, vsqrtx4567);
90     const float32x4_t vadjustment89AB = vfmsq_f32(vx89AB, vsqrtx89AB, vsqrtx89AB);
91     const float32x4_t vadjustmentCDEF = vfmsq_f32(vxCDEF, vsqrtxCDEF, vsqrtxCDEF);
92     const float32x4_t vadjustmentGHIJ = vfmsq_f32(vxGHIJ, vsqrtxGHIJ, vsqrtxGHIJ);
93 
94     const float32x4_t vy0123 = vfmaq_f32(vsqrtx0123, vhalfrsqrtx0123, vadjustment0123);
95     const float32x4_t vy4567 = vfmaq_f32(vsqrtx4567, vhalfrsqrtx4567, vadjustment4567);
96     const float32x4_t vy89AB = vfmaq_f32(vsqrtx89AB, vhalfrsqrtx89AB, vadjustment89AB);
97     const float32x4_t vyCDEF = vfmaq_f32(vsqrtxCDEF, vhalfrsqrtxCDEF, vadjustmentCDEF);
98     const float32x4_t vyGHIJ = vfmaq_f32(vsqrtxGHIJ, vhalfrsqrtxGHIJ, vadjustmentGHIJ);
99 
100     vst1q_f32(y, vy0123); y += 4;
101     vst1q_f32(y, vy4567); y += 4;
102     vst1q_f32(y, vy89AB); y += 4;
103     vst1q_f32(y, vyCDEF); y += 4;
104     vst1q_f32(y, vyGHIJ); y += 4;
105   }
106   if XNN_UNLIKELY(n != 0) {
107     do {
108       const float vx = *x++;
109       const float vy = sqrtf(vx);
110       *y++ = vy;
111       n -= sizeof(float);
112     } while (n != 0);
113   }
114 }
115