1 // Auto-generated file. Do not edit!
2 // Template: src/f32-velu/neon-lut16-p3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16
17
18 extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
19
xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(n != 0);
27 assert(n % sizeof(float) == 0);
28 assert(x != NULL);
29 assert(y != NULL);
30
31 const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.prescale);
32 const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.alpha);
33 const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.beta);
34 const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.sat_cutoff);
35 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.magic_bias);
36 const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.log2e);
37 const int32x4_t vindex_mask = vmovq_n_s32(0xF);
38 const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2);
39 const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c3);
40 const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c2);
41 const float32x4_t vone = vmovq_n_f32(1.0f);
42
43 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
44 float32x4_t vx0123 = vld1q_f32(x); x += 4;
45 float32x4_t vx4567 = vld1q_f32(x); x += 4;
46 float32x4_t vx89AB = vld1q_f32(x); x += 4;
47 float32x4_t vxCDEF = vld1q_f32(x); x += 4;
48
49 const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
50 const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
51 const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
52 const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
53
54 float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
55 float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
56 float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
57 float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
58
59 const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
60 const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
61 const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
62 const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
63 const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
64 const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
65 const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
66 const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
67
68 const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
69 const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
70 int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
71 int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
72 vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
73 vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
74 const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
75 const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
76 const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
77 int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
78 int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
79 vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
80 vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
81 const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
82 const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
83 const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
84 int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
85 int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
86 vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
87 vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
88 const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
89 const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
90 const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
91 int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
92 int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
93 vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
94 vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
95 const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
96
97 vn0123 = vsubq_f32(vn0123, vmagic_bias);
98 float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
99 vn4567 = vsubq_f32(vn4567, vmagic_bias);
100 float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
101 vn89AB = vsubq_f32(vn89AB, vmagic_bias);
102 float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
103 vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
104 float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
105
106 float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
107 float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
108 float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
109 float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
110
111 float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
112 float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
113 float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
114 float32x4_t vpCDEF = vfmaq_f32(vc2, vc3, vtCDEF);
115
116 vp0123 = vmulq_f32(vp0123, vt0123);
117 vp4567 = vmulq_f32(vp4567, vt4567);
118 vp89AB = vmulq_f32(vp89AB, vt89AB);
119 vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
120
121 vt0123 = vmulq_f32(vt0123, vs0123);
122 vs0123 = vsubq_f32(vs0123, vone);
123 vt4567 = vmulq_f32(vt4567, vs4567);
124 vs4567 = vsubq_f32(vs4567, vone);
125 vt89AB = vmulq_f32(vt89AB, vs89AB);
126 vs89AB = vsubq_f32(vs89AB, vone);
127 vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
128 vsCDEF = vsubq_f32(vsCDEF, vone);
129
130 vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
131 vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
132 vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
133 vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
134
135 const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
136 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
137 const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
138 const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
139
140 const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
141 vx0123 = vmulq_f32(vx0123, vbeta);
142 const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
143 vx4567 = vmulq_f32(vx4567, vbeta);
144 const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
145 vx89AB = vmulq_f32(vx89AB, vbeta);
146 const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
147 vxCDEF = vmulq_f32(vxCDEF, vbeta);
148
149 const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
150 const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
151 const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
152 const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
153
154 vst1q_f32(y, vy0123); y += 4;
155 vst1q_f32(y, vy4567); y += 4;
156 vst1q_f32(y, vy89AB); y += 4;
157 vst1q_f32(y, vyCDEF); y += 4;
158 }
159 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
160 float32x4_t vx = vld1q_f32(x); x += 4;
161
162 const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
163
164 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
165 const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
166 const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
167
168 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
169 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
170 int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
171 int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
172 vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
173 vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
174
175 vn = vsubq_f32(vn, vmagic_bias);
176 const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
177
178 float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
179 float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
180
181 float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
182 vp = vmulq_f32(vp, vt);
183
184 vt = vmulq_f32(vt, vs);
185 vs = vsubq_f32(vs, vone);
186 vp = vfmaq_f32(vt, vp, vt);
187 const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
188
189 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
190 vx = vmulq_f32(vx, vbeta);
191 const float32x4_t vy = vbslq_f32(vm, ve, vx);
192
193 vst1q_f32(y, vy); y += 4;
194 }
195 if XNN_UNLIKELY(n != 0) {
196 float32x4_t vx = vld1q_f32(x);
197
198 const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
199
200 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
201 const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
202 const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
203
204 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
205 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
206 int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
207 int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
208 vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
209 vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
210
211 vn = vsubq_f32(vn, vmagic_bias);
212 const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
213
214 float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
215 float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
216
217 float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
218 vp = vmulq_f32(vp, vt);
219
220 vt = vmulq_f32(vt, vs);
221 vs = vsubq_f32(vs, vone);
222 vp = vfmaq_f32(vt, vp, vt);
223 const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
224
225 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
226 vx = vmulq_f32(vx, vbeta);
227 const float32x4_t vy = vbslq_f32(vm, ve, vx);
228
229 float32x2_t vy_lo = vget_low_f32(vy);
230 if (n & (2 * sizeof(float))) {
231 vst1_f32(y, vy_lo); y += 2;
232 vy_lo = vget_high_f32(vy);
233 }
234 if (n & (1 * sizeof(float))) {
235 vst1_lane_f32(y, vy_lo, 0);
236 }
237 }
238 }
239