1 // Auto-generated file. Do not edit!
2 // Template: src/f32-sigmoid/neon-lut2048-p1.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
19
xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x20(size_t n,const float * x,float * y,const void * params)20 void xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x20(
21 size_t n,
22 const float* x,
23 float* y,
24 const void* params) XNN_DISABLE_TSAN
25 {
26 assert(n % sizeof(float) == 0);
27
28 const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p12f);
29 const float32x4_t vminus_log2e = vmovq_n_f32(-0x1.715476p0f);
30 const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
31 const float32x4_t vln2 = vmovq_n_f32(0x1.62E43p-1f);
32 const float32x4_t vc1 = vmovq_n_f32(-0x1.FFFFFEp-1f);
33 const float32x4_t vone = vmovq_n_f32(1.0f);
34 const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep+6f);
35
36 for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
37 const float32x4_t vx0123 = vld1q_f32(x); x += 4;
38 const float32x4_t vx4567 = vld1q_f32(x); x += 4;
39 const float32x4_t vx89AB = vld1q_f32(x); x += 4;
40 const float32x4_t vxCDEF = vld1q_f32(x); x += 4;
41 const float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
42
43 const float32x4_t vz0123 = vabsq_f32(vx0123);
44 const float32x4_t vz4567 = vabsq_f32(vx4567);
45 const float32x4_t vz89AB = vabsq_f32(vx89AB);
46 const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
47 const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
48
49 float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
50 float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
51 float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
52 float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
53 float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
54
55 const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
56 const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
57 const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
58 const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
59 const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
60
61 const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
62 const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
63 const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
64 const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
65 const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
66
67 const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
68 const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
69 float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
70 float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
71 const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
72 const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
73 float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
74 float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
75 const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
76 const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
77 float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
78 float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
79 const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
80 const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
81 float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
82 float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
83 const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
84 const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
85 float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
86 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
87
88 vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
89 vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
90 const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
91 vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
92 vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
93 const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
94 vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
95 vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
96 const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
97 vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
98 vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
99 const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
100 vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
101 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
102 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
103
104 const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
105 const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
106 const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
107 const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
108 const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
109
110 vn0123 = vsubq_f32(vn0123, vmagic_bias);
111 vn4567 = vsubq_f32(vn4567, vmagic_bias);
112 vn89AB = vsubq_f32(vn89AB, vmagic_bias);
113 vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
114 vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
115
116 float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
117 float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
118 float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
119 float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
120 float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
121
122 const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
123 const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
124 const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
125 const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
126 const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
127
128 const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
129 const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
130 const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
131 const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
132 const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
133
134 const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
135 const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
136 const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
137 const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
138 const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
139
140 float32x4_t vr0123 = vrecpeq_f32(vd0123);
141 float32x4_t vr4567 = vrecpeq_f32(vd4567);
142 float32x4_t vr89AB = vrecpeq_f32(vd89AB);
143 float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
144 float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
145
146 vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
147 vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
148 vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
149 vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
150 vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
151
152 vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
153 vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
154 vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
155 vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
156 vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
157
158 float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
159 float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
160 float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
161 float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
162 float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
163
164 vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
165 vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
166 vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
167 vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
168 vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
169
170 const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
171 const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
172 const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
173 const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
174 const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
175
176 vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
177 vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
178 vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
179 vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
180 vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
181
182 vst1q_f32(y, vf0123); y += 4;
183 vst1q_f32(y, vf4567); y += 4;
184 vst1q_f32(y, vf89AB); y += 4;
185 vst1q_f32(y, vfCDEF); y += 4;
186 vst1q_f32(y, vfGHIJ); y += 4;
187 }
188 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
189 const float32x4_t vx = vld1q_f32(x); x += 4;
190
191 const float32x4_t vz = vabsq_f32(vx);
192
193 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
194 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
195
196 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
197 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
198 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
199 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
200 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
201 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
202 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
203 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
204
205 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
206 vn = vsubq_f32(vn, vmagic_bias);
207 float32x4_t vt = vfmaq_f32(vz, vn, vln2);
208
209 const float32x4_t vp = vmulq_f32(vt, vc1);
210
211 const float32x4_t vy = vfmaq_f32(vs, vs, vp);
212 const float32x4_t vd = vaddq_f32(vy, vone);
213
214 float32x4_t vr = vrecpeq_f32(vd);
215 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
216 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
217
218 float32x4_t vf = vmulq_f32(vy, vr);
219 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
220 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
221 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
222
223 vst1q_f32(y, vf); y += 4;
224 }
225 if XNN_UNLIKELY(n != 0) {
226 const float32x4_t vx = vld1q_f32(x);
227
228 const float32x4_t vz = vabsq_f32(vx);
229
230 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
231 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
232
233 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
234 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
235 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
236 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
237 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
238 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
239 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
240 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
241
242 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
243 vn = vsubq_f32(vn, vmagic_bias);
244 float32x4_t vt = vfmaq_f32(vz, vn, vln2);
245
246 const float32x4_t vp = vmulq_f32(vt, vc1);
247
248 const float32x4_t vy = vfmaq_f32(vs, vs, vp);
249 const float32x4_t vd = vaddq_f32(vy, vone);
250
251 float32x4_t vr = vrecpeq_f32(vd);
252 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
253 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
254
255 float32x4_t vf = vmulq_f32(vy, vr);
256 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
257 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
258 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
259
260 float32x2_t vf_lo = vget_low_f32(vf);
261 if (n & (2 * sizeof(float))) {
262 vst1_f32(y, vf_lo); y += 2;
263 vf_lo = vget_high_f32(vf);
264 }
265 if (n & (1 * sizeof(float))) {
266 vst1_lane_f32(y, vf_lo, 0);
267 }
268 }
269 }
270