1 // Auto-generated file. Do not edit!
2 // Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16
17
18 extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
19
xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20_acc5(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20_acc5(
21 size_t elements,
22 const float* input,
23 const float* max,
24 float* output,
25 float* sum,
26 const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28 assert(elements % sizeof(float) == 0);
29
30 const float32x4_t vi_max = vld1q_dup_f32(max);
31 const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
32 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
33 const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
34 const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
35 const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
36 const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
37 const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
38
39 float32x4_t vacc0 = vmovq_n_f32(0.0f);
40 float32x4_t vacc1 = vmovq_n_f32(0.0f);
41 float32x4_t vacc2 = vmovq_n_f32(0.0f);
42 float32x4_t vacc3 = vmovq_n_f32(0.0f);
43 float32x4_t vacc4 = vmovq_n_f32(0.0f);
44 for (; elements >= 20 * sizeof(float); elements -= 20 * sizeof(float)) {
45 const float32x4_t vi0123 = vld1q_f32(input); input += 4;
46 const float32x4_t vi4567 = vld1q_f32(input); input += 4;
47 const float32x4_t vi89AB = vld1q_f32(input); input += 4;
48 const float32x4_t viCDEF = vld1q_f32(input); input += 4;
49 const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
50
51 const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
52 const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
53 const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
54 const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
55 const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
56
57 float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
58 float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
59 float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
60 float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
61 float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
62
63 const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
64 const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
65 const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
66 const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
67 const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
68
69 const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
70 const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
71 const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
72 const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
73 const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
74 const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
75 const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
76 const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
77 const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
78 const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
79 const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
80 const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
81 const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
82 const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
83 const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
84
85 float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
86 float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
87 float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
88 float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
89 float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
90 float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
91 float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
92 float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
93 float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
94 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
95
96 vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
97 vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
98 const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
99 vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
100 vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
101 const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
102 vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
103 vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
104 const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
105 vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
106 vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
107 const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
108 vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
109 vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
110 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
111
112 const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
113 const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
114 const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
115 const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
116 const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
117
118 vn0123 = vsubq_f32(vn0123, vmagic_bias);
119 vn4567 = vsubq_f32(vn4567, vmagic_bias);
120 vn89AB = vsubq_f32(vn89AB, vmagic_bias);
121 vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
122 vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
123
124 float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
125 float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
126 float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
127 float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
128 float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
129
130 vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
131 vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
132 vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
133 vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
134 vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
135
136 float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
137 float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
138 float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
139 float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
140 float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
141
142 vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
143 vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
144 vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
145 vpCDEF = vmlaq_f32(vtCDEF, vtCDEF, vpCDEF);
146 vpGHIJ = vmlaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
147
148 float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
149 float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
150 float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
151 float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
152 float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
153
154 vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
155 vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
156 vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
157 vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
158 vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
159
160 vst1q_f32(output, vf0123); output += 4;
161 vst1q_f32(output, vf4567); output += 4;
162 vst1q_f32(output, vf89AB); output += 4;
163 vst1q_f32(output, vfCDEF); output += 4;
164 vst1q_f32(output, vfGHIJ); output += 4;
165
166 vacc0 = vaddq_f32(vacc0, vf0123);
167 vacc4 = vaddq_f32(vacc4, vf4567);
168 vacc3 = vaddq_f32(vacc3, vf89AB);
169 vacc2 = vaddq_f32(vacc2, vfCDEF);
170 vacc1 = vaddq_f32(vacc1, vfGHIJ);
171 }
172 vacc0 = vaddq_f32(vacc0, vacc1);
173 vacc2 = vaddq_f32(vacc2, vacc3);
174 vacc0 = vaddq_f32(vacc0, vacc2);
175 vacc0 = vaddq_f32(vacc0, vacc4);
176
177 float32x4_t vacc = vacc0;
178 for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
179 const float32x4_t vi = vld1q_f32(input); input += 4;
180
181 const float32x4_t vx = vsubq_f32(vi, vi_max);
182
183 float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
184
185 const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
186
187 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
188 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
189 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
190 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
191 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
192 vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
193 vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
194 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
195 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
196
197 vn = vsubq_f32(vn, vmagic_bias);
198
199 float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
200 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
201
202 float32x4_t vp = vmulq_f32(vt, vc2);
203 vp = vmlaq_f32(vt, vt, vp);
204
205 float32x4_t vf = vmlaq_f32(vs, vs, vp);
206
207 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
208
209 vst1q_f32(output, vf); output += 4;
210
211 vacc = vaddq_f32(vacc, vf);
212 }
213 #if XNN_ARCH_ARM64
214 float vacc_lo = vaddvq_f32(vacc);
215 #else
216 float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
217 #endif
218 if (elements != 0) {
219 assert(elements >= 1 * sizeof(float));
220 assert(elements <= 3 * sizeof(float));
221 const float32x4_t vi = vld1q_f32(input); input += 4;
222
223 const float32x4_t vx = vsubq_f32(vi, vi_max);
224
225 float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
226
227 const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
228
229 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
230 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
231 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
232 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
233 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
234 vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
235 vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
236 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
237 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
238
239 vn = vsubq_f32(vn, vmagic_bias);
240
241 float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
242 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
243
244 float32x4_t vp = vmulq_f32(vt, vc2);
245 vp = vmlaq_f32(vt, vt, vp);
246
247 float32x4_t vf = vmlaq_f32(vs, vs, vp);
248
249 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
250
251 float32x2_t vf_lo = vget_low_f32(vf);
252 if (elements & (2 * sizeof(float))) {
253 vst1_f32(output, vf_lo); output += 2;
254
255 #if XNN_ARCH_ARM64
256 vacc_lo += vaddv_f32(vf_lo);
257 #else
258 vacc_lo = vadd_f32(vacc_lo, vf_lo);
259 #endif
260
261 vf_lo = vget_high_f32(vf);
262 }
263 if (elements & (1 * sizeof(float))) {
264 vst1_lane_f32(output, vf_lo, 0);
265
266 #if XNN_ARCH_ARM64
267 vacc_lo += vget_lane_f32(vf_lo, 0);
268 #else
269 vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
270 #endif
271 }
272 }
273 #if XNN_ARCH_ARM64
274 *sum = vacc_lo;
275 #else
276 vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
277 #endif
278 }
279