• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsqrt/neonfma-nr2fma1adj.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 #include <xnnpack/vunary.h>
17 
18 
xnn_f32_vsqrt_ukernel__neonfma_nr2fma1adj_x40(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsqrt_ukernel__neonfma_nr2fma1adj_x40(
20     size_t n,
21     const float* x,
22     float* y,
23     const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
24 {
25   assert(n != 0);
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vhalf = vmovq_n_f32(0.5f);
29   for (; n >= 40 * sizeof(float); n -= 40 * sizeof(float)) {
30     const float32x4_t vx0123 = vld1q_f32(x); x += 4;
31     const float32x4_t vx4567 = vld1q_f32(x); x += 4;
32     const float32x4_t vx89AB = vld1q_f32(x); x += 4;
33     const float32x4_t vxCDEF = vld1q_f32(x); x += 4;
34     const float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
35     const float32x4_t vxKLMN = vld1q_f32(x); x += 4;
36     const float32x4_t vxOPQR = vld1q_f32(x); x += 4;
37     const float32x4_t vxSTUV = vld1q_f32(x); x += 4;
38     const float32x4_t vxWXYZ = vld1q_f32(x); x += 4;
39     const float32x4_t vx = vld1q_f32(x); x += 4;
40 
41     const float32x4_t vrsqrtx0123 = vrsqrteq_f32(vx0123);
42     const float32x4_t vrsqrtx4567 = vrsqrteq_f32(vx4567);
43     const float32x4_t vrsqrtx89AB = vrsqrteq_f32(vx89AB);
44     const float32x4_t vrsqrtxCDEF = vrsqrteq_f32(vxCDEF);
45     const float32x4_t vrsqrtxGHIJ = vrsqrteq_f32(vxGHIJ);
46     const float32x4_t vrsqrtxKLMN = vrsqrteq_f32(vxKLMN);
47     const float32x4_t vrsqrtxOPQR = vrsqrteq_f32(vxOPQR);
48     const float32x4_t vrsqrtxSTUV = vrsqrteq_f32(vxSTUV);
49     const float32x4_t vrsqrtxWXYZ = vrsqrteq_f32(vxWXYZ);
50     const float32x4_t vrsqrtx = vrsqrteq_f32(vx);
51 
52     float32x4_t vsqrtx0123 = vmulq_f32(vrsqrtx0123, vx0123);
53     float32x4_t vhalfrsqrtx0123 = vmulq_f32(vrsqrtx0123, vhalf);
54     float32x4_t vsqrtx4567 = vmulq_f32(vrsqrtx4567, vx4567);
55     float32x4_t vhalfrsqrtx4567 = vmulq_f32(vrsqrtx4567, vhalf);
56     float32x4_t vsqrtx89AB = vmulq_f32(vrsqrtx89AB, vx89AB);
57     float32x4_t vhalfrsqrtx89AB = vmulq_f32(vrsqrtx89AB, vhalf);
58     float32x4_t vsqrtxCDEF = vmulq_f32(vrsqrtxCDEF, vxCDEF);
59     float32x4_t vhalfrsqrtxCDEF = vmulq_f32(vrsqrtxCDEF, vhalf);
60     float32x4_t vsqrtxGHIJ = vmulq_f32(vrsqrtxGHIJ, vxGHIJ);
61     float32x4_t vhalfrsqrtxGHIJ = vmulq_f32(vrsqrtxGHIJ, vhalf);
62     float32x4_t vsqrtxKLMN = vmulq_f32(vrsqrtxKLMN, vxKLMN);
63     float32x4_t vhalfrsqrtxKLMN = vmulq_f32(vrsqrtxKLMN, vhalf);
64     float32x4_t vsqrtxOPQR = vmulq_f32(vrsqrtxOPQR, vxOPQR);
65     float32x4_t vhalfrsqrtxOPQR = vmulq_f32(vrsqrtxOPQR, vhalf);
66     float32x4_t vsqrtxSTUV = vmulq_f32(vrsqrtxSTUV, vxSTUV);
67     float32x4_t vhalfrsqrtxSTUV = vmulq_f32(vrsqrtxSTUV, vhalf);
68     float32x4_t vsqrtxWXYZ = vmulq_f32(vrsqrtxWXYZ, vxWXYZ);
69     float32x4_t vhalfrsqrtxWXYZ = vmulq_f32(vrsqrtxWXYZ, vhalf);
70     float32x4_t vsqrtx = vmulq_f32(vrsqrtx, vx);
71     float32x4_t vhalfrsqrtx = vmulq_f32(vrsqrtx, vhalf);
72 
73     float32x4_t vresidual0123 = vfmsq_f32(vhalf, vsqrtx0123, vhalfrsqrtx0123);
74     float32x4_t vresidual4567 = vfmsq_f32(vhalf, vsqrtx4567, vhalfrsqrtx4567);
75     float32x4_t vresidual89AB = vfmsq_f32(vhalf, vsqrtx89AB, vhalfrsqrtx89AB);
76     float32x4_t vresidualCDEF = vfmsq_f32(vhalf, vsqrtxCDEF, vhalfrsqrtxCDEF);
77     float32x4_t vresidualGHIJ = vfmsq_f32(vhalf, vsqrtxGHIJ, vhalfrsqrtxGHIJ);
78     float32x4_t vresidualKLMN = vfmsq_f32(vhalf, vsqrtxKLMN, vhalfrsqrtxKLMN);
79     float32x4_t vresidualOPQR = vfmsq_f32(vhalf, vsqrtxOPQR, vhalfrsqrtxOPQR);
80     float32x4_t vresidualSTUV = vfmsq_f32(vhalf, vsqrtxSTUV, vhalfrsqrtxSTUV);
81     float32x4_t vresidualWXYZ = vfmsq_f32(vhalf, vsqrtxWXYZ, vhalfrsqrtxWXYZ);
82     float32x4_t vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
83 
84     vhalfrsqrtx0123 = vfmaq_f32(vhalfrsqrtx0123, vresidual0123, vhalfrsqrtx0123);
85     vsqrtx0123 = vfmaq_f32(vsqrtx0123, vresidual0123, vsqrtx0123);
86     vhalfrsqrtx4567 = vfmaq_f32(vhalfrsqrtx4567, vresidual4567, vhalfrsqrtx4567);
87     vsqrtx4567 = vfmaq_f32(vsqrtx4567, vresidual4567, vsqrtx4567);
88     vhalfrsqrtx89AB = vfmaq_f32(vhalfrsqrtx89AB, vresidual89AB, vhalfrsqrtx89AB);
89     vsqrtx89AB = vfmaq_f32(vsqrtx89AB, vresidual89AB, vsqrtx89AB);
90     vhalfrsqrtxCDEF = vfmaq_f32(vhalfrsqrtxCDEF, vresidualCDEF, vhalfrsqrtxCDEF);
91     vsqrtxCDEF = vfmaq_f32(vsqrtxCDEF, vresidualCDEF, vsqrtxCDEF);
92     vhalfrsqrtxGHIJ = vfmaq_f32(vhalfrsqrtxGHIJ, vresidualGHIJ, vhalfrsqrtxGHIJ);
93     vsqrtxGHIJ = vfmaq_f32(vsqrtxGHIJ, vresidualGHIJ, vsqrtxGHIJ);
94     vhalfrsqrtxKLMN = vfmaq_f32(vhalfrsqrtxKLMN, vresidualKLMN, vhalfrsqrtxKLMN);
95     vsqrtxKLMN = vfmaq_f32(vsqrtxKLMN, vresidualKLMN, vsqrtxKLMN);
96     vhalfrsqrtxOPQR = vfmaq_f32(vhalfrsqrtxOPQR, vresidualOPQR, vhalfrsqrtxOPQR);
97     vsqrtxOPQR = vfmaq_f32(vsqrtxOPQR, vresidualOPQR, vsqrtxOPQR);
98     vhalfrsqrtxSTUV = vfmaq_f32(vhalfrsqrtxSTUV, vresidualSTUV, vhalfrsqrtxSTUV);
99     vsqrtxSTUV = vfmaq_f32(vsqrtxSTUV, vresidualSTUV, vsqrtxSTUV);
100     vhalfrsqrtxWXYZ = vfmaq_f32(vhalfrsqrtxWXYZ, vresidualWXYZ, vhalfrsqrtxWXYZ);
101     vsqrtxWXYZ = vfmaq_f32(vsqrtxWXYZ, vresidualWXYZ, vsqrtxWXYZ);
102     vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
103     vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
104 
105     vresidual0123 = vfmsq_f32(vhalf, vsqrtx0123, vhalfrsqrtx0123);
106     vresidual4567 = vfmsq_f32(vhalf, vsqrtx4567, vhalfrsqrtx4567);
107     vresidual89AB = vfmsq_f32(vhalf, vsqrtx89AB, vhalfrsqrtx89AB);
108     vresidualCDEF = vfmsq_f32(vhalf, vsqrtxCDEF, vhalfrsqrtxCDEF);
109     vresidualGHIJ = vfmsq_f32(vhalf, vsqrtxGHIJ, vhalfrsqrtxGHIJ);
110     vresidualKLMN = vfmsq_f32(vhalf, vsqrtxKLMN, vhalfrsqrtxKLMN);
111     vresidualOPQR = vfmsq_f32(vhalf, vsqrtxOPQR, vhalfrsqrtxOPQR);
112     vresidualSTUV = vfmsq_f32(vhalf, vsqrtxSTUV, vhalfrsqrtxSTUV);
113     vresidualWXYZ = vfmsq_f32(vhalf, vsqrtxWXYZ, vhalfrsqrtxWXYZ);
114     vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
115 
116     vhalfrsqrtx0123 = vfmaq_f32(vhalfrsqrtx0123, vresidual0123, vhalfrsqrtx0123);
117     vsqrtx0123 = vfmaq_f32(vsqrtx0123, vresidual0123, vsqrtx0123);
118     vhalfrsqrtx4567 = vfmaq_f32(vhalfrsqrtx4567, vresidual4567, vhalfrsqrtx4567);
119     vsqrtx4567 = vfmaq_f32(vsqrtx4567, vresidual4567, vsqrtx4567);
120     vhalfrsqrtx89AB = vfmaq_f32(vhalfrsqrtx89AB, vresidual89AB, vhalfrsqrtx89AB);
121     vsqrtx89AB = vfmaq_f32(vsqrtx89AB, vresidual89AB, vsqrtx89AB);
122     vhalfrsqrtxCDEF = vfmaq_f32(vhalfrsqrtxCDEF, vresidualCDEF, vhalfrsqrtxCDEF);
123     vsqrtxCDEF = vfmaq_f32(vsqrtxCDEF, vresidualCDEF, vsqrtxCDEF);
124     vhalfrsqrtxGHIJ = vfmaq_f32(vhalfrsqrtxGHIJ, vresidualGHIJ, vhalfrsqrtxGHIJ);
125     vsqrtxGHIJ = vfmaq_f32(vsqrtxGHIJ, vresidualGHIJ, vsqrtxGHIJ);
126     vhalfrsqrtxKLMN = vfmaq_f32(vhalfrsqrtxKLMN, vresidualKLMN, vhalfrsqrtxKLMN);
127     vsqrtxKLMN = vfmaq_f32(vsqrtxKLMN, vresidualKLMN, vsqrtxKLMN);
128     vhalfrsqrtxOPQR = vfmaq_f32(vhalfrsqrtxOPQR, vresidualOPQR, vhalfrsqrtxOPQR);
129     vsqrtxOPQR = vfmaq_f32(vsqrtxOPQR, vresidualOPQR, vsqrtxOPQR);
130     vhalfrsqrtxSTUV = vfmaq_f32(vhalfrsqrtxSTUV, vresidualSTUV, vhalfrsqrtxSTUV);
131     vsqrtxSTUV = vfmaq_f32(vsqrtxSTUV, vresidualSTUV, vsqrtxSTUV);
132     vhalfrsqrtxWXYZ = vfmaq_f32(vhalfrsqrtxWXYZ, vresidualWXYZ, vhalfrsqrtxWXYZ);
133     vsqrtxWXYZ = vfmaq_f32(vsqrtxWXYZ, vresidualWXYZ, vsqrtxWXYZ);
134     vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
135     vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
136 
137     const float32x4_t vadjustment0123 = vfmsq_f32(vx0123, vsqrtx0123, vsqrtx0123);
138     const float32x4_t vadjustment4567 = vfmsq_f32(vx4567, vsqrtx4567, vsqrtx4567);
139     const float32x4_t vadjustment89AB = vfmsq_f32(vx89AB, vsqrtx89AB, vsqrtx89AB);
140     const float32x4_t vadjustmentCDEF = vfmsq_f32(vxCDEF, vsqrtxCDEF, vsqrtxCDEF);
141     const float32x4_t vadjustmentGHIJ = vfmsq_f32(vxGHIJ, vsqrtxGHIJ, vsqrtxGHIJ);
142     const float32x4_t vadjustmentKLMN = vfmsq_f32(vxKLMN, vsqrtxKLMN, vsqrtxKLMN);
143     const float32x4_t vadjustmentOPQR = vfmsq_f32(vxOPQR, vsqrtxOPQR, vsqrtxOPQR);
144     const float32x4_t vadjustmentSTUV = vfmsq_f32(vxSTUV, vsqrtxSTUV, vsqrtxSTUV);
145     const float32x4_t vadjustmentWXYZ = vfmsq_f32(vxWXYZ, vsqrtxWXYZ, vsqrtxWXYZ);
146     const float32x4_t vadjustment = vfmsq_f32(vx, vsqrtx, vsqrtx);
147 
148     const float32x4_t vy0123 = vfmaq_f32(vsqrtx0123, vhalfrsqrtx0123, vadjustment0123);
149     const float32x4_t vy4567 = vfmaq_f32(vsqrtx4567, vhalfrsqrtx4567, vadjustment4567);
150     const float32x4_t vy89AB = vfmaq_f32(vsqrtx89AB, vhalfrsqrtx89AB, vadjustment89AB);
151     const float32x4_t vyCDEF = vfmaq_f32(vsqrtxCDEF, vhalfrsqrtxCDEF, vadjustmentCDEF);
152     const float32x4_t vyGHIJ = vfmaq_f32(vsqrtxGHIJ, vhalfrsqrtxGHIJ, vadjustmentGHIJ);
153     const float32x4_t vyKLMN = vfmaq_f32(vsqrtxKLMN, vhalfrsqrtxKLMN, vadjustmentKLMN);
154     const float32x4_t vyOPQR = vfmaq_f32(vsqrtxOPQR, vhalfrsqrtxOPQR, vadjustmentOPQR);
155     const float32x4_t vySTUV = vfmaq_f32(vsqrtxSTUV, vhalfrsqrtxSTUV, vadjustmentSTUV);
156     const float32x4_t vyWXYZ = vfmaq_f32(vsqrtxWXYZ, vhalfrsqrtxWXYZ, vadjustmentWXYZ);
157     const float32x4_t vy = vfmaq_f32(vsqrtx, vhalfrsqrtx, vadjustment);
158 
159     vst1q_f32(y, vy0123); y += 4;
160     vst1q_f32(y, vy4567); y += 4;
161     vst1q_f32(y, vy89AB); y += 4;
162     vst1q_f32(y, vyCDEF); y += 4;
163     vst1q_f32(y, vyGHIJ); y += 4;
164     vst1q_f32(y, vyKLMN); y += 4;
165     vst1q_f32(y, vyOPQR); y += 4;
166     vst1q_f32(y, vySTUV); y += 4;
167     vst1q_f32(y, vyWXYZ); y += 4;
168     vst1q_f32(y, vy); y += 4;
169   }
170   if XNN_UNLIKELY(n != 0) {
171     do {
172       const float vx = *x++;
173       const float vy = sqrtf(vx);
174       *y++ = vy;
175       n -= sizeof(float);
176     } while (n != 0);
177   }
178 }
179