• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <xnnpack/common.h>
14 #include <xnnpack/vunary.h>
15 
16 #include <fp16/bitcasts.h>
17 
18 
19 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
20 
xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])21 void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6(
22     size_t n,
23     const float* x,
24     float* y,
25     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
26 {
27   assert(n % sizeof(float) == 0);
28 
29   const float vprescale = params->scalar_rr2_lut16_p3.prescale;
30   const float valpha = params->scalar_rr2_lut16_p3.alpha;
31   const float vbeta = params->scalar_rr2_lut16_p3.beta;
32   const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
33   const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
34   const uint32_t vindex_mask = UINT32_C(0xF);
35   const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
36   const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
37   const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
38   const float vc3 = params->scalar_rr2_lut16_p3.c3;
39   const float vc2 = params->scalar_rr2_lut16_p3.c2;
40   const float vone = params->scalar_rr2_lut16_p3.one;
41 
42   for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
43     float vx0 = x[0];
44     float vx1 = x[1];
45     float vx2 = x[2];
46     float vx3 = x[3];
47     float vx4 = x[4];
48     float vx5 = x[5];
49     x += 6;
50 
51     const float vz0 = vx0 * vprescale;
52     const float vz1 = vx1 * vprescale;
53     const float vz2 = vx2 * vprescale;
54     const float vz3 = vx3 * vprescale;
55     const float vz4 = vx4 * vprescale;
56     const float vz5 = vx5 * vprescale;
57 
58     float vn0 = vz0 * vlog2e + vmagic_bias;
59     float vn1 = vz1 * vlog2e + vmagic_bias;
60     float vn2 = vz2 * vlog2e + vmagic_bias;
61     float vn3 = vz3 * vlog2e + vmagic_bias;
62     float vn4 = vz4 * vlog2e + vmagic_bias;
63     float vn5 = vz5 * vlog2e + vmagic_bias;
64 
65     const uint32_t ven0 = fp32_to_bits(vn0) << 19;
66     const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
67     vn0 -= vmagic_bias;
68     const uint32_t ven1 = fp32_to_bits(vn1) << 19;
69     const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
70     vn1 -= vmagic_bias;
71     const uint32_t ven2 = fp32_to_bits(vn2) << 19;
72     const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
73     vn2 -= vmagic_bias;
74     const uint32_t ven3 = fp32_to_bits(vn3) << 19;
75     const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask;
76     vn3 -= vmagic_bias;
77     const uint32_t ven4 = fp32_to_bits(vn4) << 19;
78     const uint32_t vidx4 = fp32_to_bits(vn4) & vindex_mask;
79     vn4 -= vmagic_bias;
80     const uint32_t ven5 = fp32_to_bits(vn5) << 19;
81     const uint32_t vidx5 = fp32_to_bits(vn5) & vindex_mask;
82     vn5 -= vmagic_bias;
83 
84     float vt0 = vn0 * vminus_ln2_hi + vz0;
85     float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
86     float vt1 = vn1 * vminus_ln2_hi + vz1;
87     float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
88     float vt2 = vn2 * vminus_ln2_hi + vz2;
89     float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
90     float vt3 = vn3 * vminus_ln2_hi + vz3;
91     float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
92     float vt4 = vn4 * vminus_ln2_hi + vz4;
93     float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
94     float vt5 = vn5 * vminus_ln2_hi + vz5;
95     float vs5 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx5] + ven5);
96 
97     vt0 = vn0 * vminus_ln2_lo + vt0;
98     if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
99       vs0 = 0.0f;
100       vt0 = 0.0f;
101     }
102     vt1 = vn1 * vminus_ln2_lo + vt1;
103     if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
104       vs1 = 0.0f;
105       vt1 = 0.0f;
106     }
107     vt2 = vn2 * vminus_ln2_lo + vt2;
108     if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
109       vs2 = 0.0f;
110       vt2 = 0.0f;
111     }
112     vt3 = vn3 * vminus_ln2_lo + vt3;
113     if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
114       vs3 = 0.0f;
115       vt3 = 0.0f;
116     }
117     vt4 = vn4 * vminus_ln2_lo + vt4;
118     if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
119       vs4 = 0.0f;
120       vt4 = 0.0f;
121     }
122     vt5 = vn5 * vminus_ln2_lo + vt5;
123     if XNN_UNPREDICTABLE(vz5 <= vsat_cutoff) {
124       vs5 = 0.0f;
125       vt5 = 0.0f;
126     }
127 
128     float vp0 = vc3 * vt0 + vc2;
129     float vp1 = vc3 * vt1 + vc2;
130     float vp2 = vc3 * vt2 + vc2;
131     float vp3 = vc3 * vt3 + vc2;
132     float vp4 = vc3 * vt4 + vc2;
133     float vp5 = vc3 * vt5 + vc2;
134 
135     vp0 *= vt0;
136     vp1 *= vt1;
137     vp2 *= vt2;
138     vp3 *= vt3;
139     vp4 *= vt4;
140     vp5 *= vt5;
141 
142     vt0 *= vs0;
143     vs0 -= vone;
144     vt1 *= vs1;
145     vs1 -= vone;
146     vt2 *= vs2;
147     vs2 -= vone;
148     vt3 *= vs3;
149     vs3 -= vone;
150     vt4 *= vs4;
151     vs4 -= vone;
152     vt5 *= vs5;
153     vs5 -= vone;
154 
155     vp0 = vp0 * vt0 + vt0;
156     vp1 = vp1 * vt1 + vt1;
157     vp2 = vp2 * vt2 + vt2;
158     vp3 = vp3 * vt3 + vt3;
159     vp4 = vp4 * vt4 + vt4;
160     vp5 = vp5 * vt5 + vt5;
161 
162     const float ve0 = (vp0 + vs0) * valpha;
163     float vy0 = vx0 * vbeta;
164     const float ve1 = (vp1 + vs1) * valpha;
165     float vy1 = vx1 * vbeta;
166     const float ve2 = (vp2 + vs2) * valpha;
167     float vy2 = vx2 * vbeta;
168     const float ve3 = (vp3 + vs3) * valpha;
169     float vy3 = vx3 * vbeta;
170     const float ve4 = (vp4 + vs4) * valpha;
171     float vy4 = vx4 * vbeta;
172     const float ve5 = (vp5 + vs5) * valpha;
173     float vy5 = vx5 * vbeta;
174 
175     if XNN_UNPREDICTABLE(vx0 < 0.0f) {
176       vy0 = ve0;
177     }
178     if XNN_UNPREDICTABLE(vx1 < 0.0f) {
179       vy1 = ve1;
180     }
181     if XNN_UNPREDICTABLE(vx2 < 0.0f) {
182       vy2 = ve2;
183     }
184     if XNN_UNPREDICTABLE(vx3 < 0.0f) {
185       vy3 = ve3;
186     }
187     if XNN_UNPREDICTABLE(vx4 < 0.0f) {
188       vy4 = ve4;
189     }
190     if XNN_UNPREDICTABLE(vx5 < 0.0f) {
191       vy5 = ve5;
192     }
193 
194     y[0] = vy0;
195     y[1] = vy1;
196     y[2] = vy2;
197     y[3] = vy3;
198     y[4] = vy4;
199     y[5] = vy5;
200     y += 6;
201   }
202   if XNN_UNLIKELY(n != 0) {
203     do {
204       float vx = *x++;
205 
206       const float vz = vx * vprescale;
207 
208       float vn = vz * vlog2e + vmagic_bias;
209       const uint32_t ven = fp32_to_bits(vn) << 19;
210       const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
211       vn -= vmagic_bias;
212 
213       float vt = vn * vminus_ln2_hi + vz;
214       float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
215 
216       vt = vn * vminus_ln2_lo + vt;
217       if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
218         vs = 0.0f;
219         vt = 0.0f;
220       }
221 
222       float vp = vc3 * vt + vc2;
223       vp *= vt;
224 
225       vt *= vs;
226       vs -= vone;
227       vp = vp * vt + vt;
228       const float ve = (vp + vs) * valpha;
229 
230       float vy = vx * vbeta;
231       if XNN_UNPREDICTABLE(vx < 0.0f) {
232         vy = ve;
233       }
234 
235       *y++ = vy;
236 
237       n -= sizeof(float);
238     } while (n != 0);
239   }
240 }
241