• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16 
17 
xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20(
19     size_t elements,
20     const float* input,
21     const float* max,
22     float* output,
23     float* sum,
24     const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(elements % sizeof(float) == 0);
27 
28   const float32x4_t vi_max = vld1q_dup_f32(max);
29   const float32x4_t vlog2e = vld1q_dup_f32(&params->neonfma_rr1_p5.log2e);
30   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neonfma_rr1_p5.magic_bias);
31   const float32x4_t vminus_ln2 = vld1q_dup_f32(&params->neonfma_rr1_p5.minus_ln2);
32   const float32x4_t vc5 = vld1q_dup_f32(&params->neonfma_rr1_p5.c5);
33   const float32x4_t vc4 = vld1q_dup_f32(&params->neonfma_rr1_p5.c4);
34   const float32x4_t vc3 = vld1q_dup_f32(&params->neonfma_rr1_p5.c3);
35   const float32x4_t vc2 = vld1q_dup_f32(&params->neonfma_rr1_p5.c2);
36   const float32x4_t vc1 = vld1q_dup_f32(&params->neonfma_rr1_p5.c1);
37   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neonfma_rr1_p5.denorm_cutoff);
38 
39   float32x4_t vacc0 = vmovq_n_f32(0.0f);
40   for (; elements >= 20 * sizeof(float); elements -= 20 * sizeof(float)) {
41     const float32x4_t vi0123 = vld1q_f32(input); input += 4;
42     const float32x4_t vi4567 = vld1q_f32(input); input += 4;
43     const float32x4_t vi89AB = vld1q_f32(input); input += 4;
44     const float32x4_t viCDEF = vld1q_f32(input); input += 4;
45     const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
46 
47     const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
48     const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
49     const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
50     const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
51     const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
52 
53     float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
54     float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
55     float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
56     float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
57     float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
58 
59     const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
60     const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
61     const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
62     const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
63     const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
64 
65     vn0123 = vsubq_f32(vn0123, vmagic_bias);
66     vn4567 = vsubq_f32(vn4567, vmagic_bias);
67     vn89AB = vsubq_f32(vn89AB, vmagic_bias);
68     vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
69     vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
70 
71     float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
72     float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
73     float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
74     float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
75     float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
76 
77     float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
78     float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
79     float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
80     float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
81     float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
82 
83     vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
84     vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
85     vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
86     vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
87     vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
88 
89     vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
90     vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
91     vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
92     vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
93     vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
94 
95     vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
96     vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
97     vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
98     vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
99     vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
100 
101     vt0123 = vmulq_f32(vt0123, vs0123);
102     vt4567 = vmulq_f32(vt4567, vs4567);
103     vt89AB = vmulq_f32(vt89AB, vs89AB);
104     vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
105     vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
106 
107     float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
108     float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
109     float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
110     float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
111     float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
112 
113     vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
114     vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
115     vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
116     vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
117     vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
118 
119     vst1q_f32(output, vf0123); output += 4;
120     vst1q_f32(output, vf4567); output += 4;
121     vst1q_f32(output, vf89AB); output += 4;
122     vst1q_f32(output, vfCDEF); output += 4;
123     vst1q_f32(output, vfGHIJ); output += 4;
124 
125     vacc0 = vaddq_f32(vacc0, vf0123);
126     vacc0 = vaddq_f32(vacc0, vf4567);
127     vacc0 = vaddq_f32(vacc0, vf89AB);
128     vacc0 = vaddq_f32(vacc0, vfCDEF);
129     vacc0 = vaddq_f32(vacc0, vfGHIJ);
130   }
131 
132   float32x4_t vacc = vacc0;
133   for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
134     const float32x4_t vi = vld1q_f32(input); input += 4;
135 
136     const float32x4_t vx = vsubq_f32(vi, vi_max);
137 
138     float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
139 
140     const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
141 
142     vn = vsubq_f32(vn, vmagic_bias);
143 
144     float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
145 
146     float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
147     vp = vfmaq_f32(vc3, vp, vt);
148     vp = vfmaq_f32(vc2, vp, vt);
149     vp = vfmaq_f32(vc1, vp, vt);
150 
151     vt = vmulq_f32(vt, vs);
152     float32x4_t vf = vfmaq_f32(vs, vp, vt);
153 
154     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
155 
156     vst1q_f32(output, vf); output += 4;
157 
158     vacc = vaddq_f32(vacc, vf);
159   }
160 #if XNN_ARCH_ARM64
161   float vacc_lo = vaddvq_f32(vacc);
162 #else
163   float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
164 #endif
165   if (elements != 0) {
166     assert(elements >= 1 * sizeof(float));
167     assert(elements <= 3 * sizeof(float));
168     const float32x4_t vi = vld1q_f32(input); input += 4;
169 
170     const float32x4_t vx = vsubq_f32(vi, vi_max);
171 
172     float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
173 
174     const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
175 
176     vn = vsubq_f32(vn, vmagic_bias);
177 
178     float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
179 
180     float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
181     vp = vfmaq_f32(vc3, vp, vt);
182     vp = vfmaq_f32(vc2, vp, vt);
183     vp = vfmaq_f32(vc1, vp, vt);
184 
185     vt = vmulq_f32(vt, vs);
186     float32x4_t vf = vfmaq_f32(vs, vp, vt);
187 
188     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
189 
190     float32x2_t vf_lo = vget_low_f32(vf);
191     if (elements & (2 * sizeof(float))) {
192       vst1_f32(output, vf_lo); output += 2;
193 
194       #if XNN_ARCH_ARM64
195         vacc_lo += vaddv_f32(vf_lo);
196       #else
197         vacc_lo = vadd_f32(vacc_lo, vf_lo);
198       #endif
199 
200       vf_lo = vget_high_f32(vf);
201     }
202     if (elements & (1 * sizeof(float))) {
203       vst1_lane_f32(output, vf_lo, 0);
204 
205       #if XNN_ARCH_ARM64
206         vacc_lo += vget_lane_f32(vf_lo, 0);
207       #else
208         vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
209       #endif
210     }
211   }
212 #if XNN_ARCH_ARM64
213   *sum = vacc_lo;
214 #else
215   vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
216 #endif
217 }
218