• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/neon-lut64-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19 
xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x24(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x24(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_rr2_lut64_p2.magic_bias);
29   const float32x4_t vminus_log2e = vld1q_dup_f32(&params->neon_rr2_lut64_p2.minus_log2e);
30   const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
31   const float32x4_t vln2_hi = vld1q_dup_f32(&params->neon_rr2_lut64_p2.ln2_hi);
32   const float32x4_t vln2_lo = vld1q_dup_f32(&params->neon_rr2_lut64_p2.ln2_lo);
33   const float32x4_t vc2 = vld1q_dup_f32(&params->neon_rr2_lut64_p2.c2);
34   const float32x4_t vone = vmovq_n_f32(1.0f);
35   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neon_rr2_lut64_p2.denorm_cutoff);
36 
37   for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
38     const float32x4_t vx0123 = vld1q_f32(x); x += 4;
39     const float32x4_t vx4567 = vld1q_f32(x); x += 4;
40     const float32x4_t vx89AB = vld1q_f32(x); x += 4;
41     const float32x4_t vxCDEF = vld1q_f32(x); x += 4;
42     const float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
43     const float32x4_t vxKLMN = vld1q_f32(x); x += 4;
44 
45     const float32x4_t vz0123 = vabsq_f32(vx0123);
46     const float32x4_t vz4567 = vabsq_f32(vx4567);
47     const float32x4_t vz89AB = vabsq_f32(vx89AB);
48     const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
49     const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
50     const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
51 
52     float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
53     float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
54     float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
55     float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
56     float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
57     float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
58 
59     const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
60     const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
61     const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
62     const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
63     const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
64     const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 17);
65 
66     // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
67     const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
68     const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
69     const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
70     const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
71     const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
72     const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
73 
74     const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
75     const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
76     float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
77     float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
78     const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
79     const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
80     float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
81     float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
82     const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
83     const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
84     float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
85     float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
86     const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
87     const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
88     float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
89     float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
90     const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
91     const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
92     float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
93     float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
94     const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
95     const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
96     float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxKL]);
97     float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxMN]);
98 
99     vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
100     vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
101     const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
102     vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
103     vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
104     const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
105     vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
106     vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
107     const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
108     vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
109     vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
110     const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
111     vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
112     vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
113     const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
114     vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxKL >> 32)], vlKL, 1);
115     vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxMN >> 32)], vlMN, 1);
116     const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
117 
118     const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
119     const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
120     const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
121     const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
122     const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
123     const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
124 
125     vn0123 = vsubq_f32(vn0123, vmagic_bias);
126     vn4567 = vsubq_f32(vn4567, vmagic_bias);
127     vn89AB = vsubq_f32(vn89AB, vmagic_bias);
128     vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
129     vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
130     vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
131 
132     float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
133     float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
134     float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
135     float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
136     float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vln2_hi);
137     float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vln2_hi);
138 
139     vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
140     vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
141     vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
142     vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
143     vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vln2_lo);
144     vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vln2_lo);
145 
146     float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
147     float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
148     float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
149     float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
150     float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
151     float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc2);
152 
153     vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
154     vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
155     vp89AB = vmlsq_f32(vt89AB, vp89AB, vt89AB);
156     vpCDEF = vmlsq_f32(vtCDEF, vpCDEF, vtCDEF);
157     vpGHIJ = vmlsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
158     vpKLMN = vmlsq_f32(vtKLMN, vpKLMN, vtKLMN);
159 
160     const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
161     const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
162     const float32x4_t vy89AB = vmlsq_f32(vs89AB, vs89AB, vp89AB);
163     const float32x4_t vyCDEF = vmlsq_f32(vsCDEF, vsCDEF, vpCDEF);
164     const float32x4_t vyGHIJ = vmlsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
165     const float32x4_t vyKLMN = vmlsq_f32(vsKLMN, vsKLMN, vpKLMN);
166 
167     const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
168     const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
169     const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
170     const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
171     const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
172     const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
173 
174     float32x4_t vr0123 = vrecpeq_f32(vd0123);
175     float32x4_t vr4567 = vrecpeq_f32(vd4567);
176     float32x4_t vr89AB = vrecpeq_f32(vd89AB);
177     float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
178     float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
179     float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
180 
181     vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
182     vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
183     vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
184     vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
185     vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
186     vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
187 
188     vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
189     vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
190     vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
191     vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
192     vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
193     vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
194 
195     float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
196     float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
197     float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
198     float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
199     float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
200     float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
201 
202     vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
203     vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
204     vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
205     vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
206     vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
207     vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
208 
209     const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
210     const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
211     const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
212     const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
213     const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
214     const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
215 
216     vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
217     vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
218     vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
219     vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
220     vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
221     vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
222 
223     vst1q_f32(y, vf0123); y += 4;
224     vst1q_f32(y, vf4567); y += 4;
225     vst1q_f32(y, vf89AB); y += 4;
226     vst1q_f32(y, vfCDEF); y += 4;
227     vst1q_f32(y, vfGHIJ); y += 4;
228     vst1q_f32(y, vfKLMN); y += 4;
229   }
230   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
231     const float32x4_t vx = vld1q_f32(x); x += 4;
232 
233     const float32x4_t vz = vabsq_f32(vx);
234 
235     float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
236     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
237 
238     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
239     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
240     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
241     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
242     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
243     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
244     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
245     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
246 
247     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
248     vn = vsubq_f32(vn, vmagic_bias);
249     float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
250     vt = vmlaq_f32(vt, vn, vln2_lo);
251 
252     float32x4_t vp = vmulq_f32(vt, vc2);
253     vp = vmlsq_f32(vt, vp, vt);
254 
255     const float32x4_t vy = vmlsq_f32(vs, vs, vp);
256     const float32x4_t vd = vaddq_f32(vy, vone);
257 
258     float32x4_t vr = vrecpeq_f32(vd);
259     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
260     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
261 
262     float32x4_t vf = vmulq_f32(vy, vr);
263     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
264     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
265     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
266 
267     vst1q_f32(y, vf); y += 4;
268   }
269   if XNN_UNLIKELY(n != 0) {
270     const float32x4_t vx = vld1q_f32(x);
271 
272     const float32x4_t vz = vabsq_f32(vx);
273 
274     float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
275     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
276 
277     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
278     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
279     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
280     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
281     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
282     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
283     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
284     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
285 
286     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
287     vn = vsubq_f32(vn, vmagic_bias);
288     float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
289     vt = vmlaq_f32(vt, vn, vln2_lo);
290 
291     float32x4_t vp = vmulq_f32(vt, vc2);
292     vp = vmlsq_f32(vt, vp, vt);
293 
294     const float32x4_t vy = vmlsq_f32(vs, vs, vp);
295     const float32x4_t vd = vaddq_f32(vy, vone);
296 
297     float32x4_t vr = vrecpeq_f32(vd);
298     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
299     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
300 
301     float32x4_t vf = vmulq_f32(vy, vr);
302     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
303     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
304     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
305 
306     float32x2_t vf_lo = vget_low_f32(vf);
307     if (n & (2 * sizeof(float))) {
308       vst1_f32(y, vf_lo); y += 2;
309       vf_lo = vget_high_f32(vf);
310     }
311     if (n & (1 * sizeof(float))) {
312       vst1_lane_f32(y, vf_lo, 0);
313     }
314   }
315 }
316