• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <xnnpack/common.h>
14 #include <xnnpack/vunary.h>
15 
16 #include <fp16/bitcasts.h>
17 
18 
19 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
20 
xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])21 void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3(
22     size_t n,
23     const float* x,
24     float* y,
25     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
26 {
27   assert(n % sizeof(float) == 0);
28 
29   const float vprescale = params->scalar_rr2_lut16_p3.prescale;
30   const float valpha = params->scalar_rr2_lut16_p3.alpha;
31   const float vbeta = params->scalar_rr2_lut16_p3.beta;
32   const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
33   const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
34   const uint32_t vindex_mask = UINT32_C(0xF);
35   const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
36   const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
37   const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
38   const float vc3 = params->scalar_rr2_lut16_p3.c3;
39   const float vc2 = params->scalar_rr2_lut16_p3.c2;
40   const float vone = params->scalar_rr2_lut16_p3.one;
41 
42   for (; n >= 3 * sizeof(float); n -= 3 * sizeof(float)) {
43     float vx0 = x[0];
44     float vx1 = x[1];
45     float vx2 = x[2];
46     x += 3;
47 
48     const float vz0 = vx0 * vprescale;
49     const float vz1 = vx1 * vprescale;
50     const float vz2 = vx2 * vprescale;
51 
52     float vn0 = vz0 * vlog2e + vmagic_bias;
53     float vn1 = vz1 * vlog2e + vmagic_bias;
54     float vn2 = vz2 * vlog2e + vmagic_bias;
55 
56     const uint32_t ven0 = fp32_to_bits(vn0) << 19;
57     const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
58     vn0 -= vmagic_bias;
59     const uint32_t ven1 = fp32_to_bits(vn1) << 19;
60     const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
61     vn1 -= vmagic_bias;
62     const uint32_t ven2 = fp32_to_bits(vn2) << 19;
63     const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask;
64     vn2 -= vmagic_bias;
65 
66     float vt0 = vn0 * vminus_ln2_hi + vz0;
67     float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
68     float vt1 = vn1 * vminus_ln2_hi + vz1;
69     float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
70     float vt2 = vn2 * vminus_ln2_hi + vz2;
71     float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
72 
73     vt0 = vn0 * vminus_ln2_lo + vt0;
74     if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
75       vs0 = 0.0f;
76       vt0 = 0.0f;
77     }
78     vt1 = vn1 * vminus_ln2_lo + vt1;
79     if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
80       vs1 = 0.0f;
81       vt1 = 0.0f;
82     }
83     vt2 = vn2 * vminus_ln2_lo + vt2;
84     if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
85       vs2 = 0.0f;
86       vt2 = 0.0f;
87     }
88 
89     float vp0 = vc3 * vt0 + vc2;
90     float vp1 = vc3 * vt1 + vc2;
91     float vp2 = vc3 * vt2 + vc2;
92 
93     vp0 *= vt0;
94     vp1 *= vt1;
95     vp2 *= vt2;
96 
97     vt0 *= vs0;
98     vs0 -= vone;
99     vt1 *= vs1;
100     vs1 -= vone;
101     vt2 *= vs2;
102     vs2 -= vone;
103 
104     vp0 = vp0 * vt0 + vt0;
105     vp1 = vp1 * vt1 + vt1;
106     vp2 = vp2 * vt2 + vt2;
107 
108     const float ve0 = (vp0 + vs0) * valpha;
109     float vy0 = vx0 * vbeta;
110     const float ve1 = (vp1 + vs1) * valpha;
111     float vy1 = vx1 * vbeta;
112     const float ve2 = (vp2 + vs2) * valpha;
113     float vy2 = vx2 * vbeta;
114 
115     if XNN_UNPREDICTABLE(vx0 < 0.0f) {
116       vy0 = ve0;
117     }
118     if XNN_UNPREDICTABLE(vx1 < 0.0f) {
119       vy1 = ve1;
120     }
121     if XNN_UNPREDICTABLE(vx2 < 0.0f) {
122       vy2 = ve2;
123     }
124 
125     y[0] = vy0;
126     y[1] = vy1;
127     y[2] = vy2;
128     y += 3;
129   }
130   if XNN_UNLIKELY(n != 0) {
131     do {
132       float vx = *x++;
133 
134       const float vz = vx * vprescale;
135 
136       float vn = vz * vlog2e + vmagic_bias;
137       const uint32_t ven = fp32_to_bits(vn) << 19;
138       const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
139       vn -= vmagic_bias;
140 
141       float vt = vn * vminus_ln2_hi + vz;
142       float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven);
143 
144       vt = vn * vminus_ln2_lo + vt;
145       if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
146         vs = 0.0f;
147         vt = 0.0f;
148       }
149 
150       float vp = vc3 * vt + vc2;
151       vp *= vt;
152 
153       vt *= vs;
154       vs -= vone;
155       vp = vp * vt + vt;
156       const float ve = (vp + vs) * valpha;
157 
158       float vy = vx * vbeta;
159       if XNN_UNPREDICTABLE(vx < 0.0f) {
160         vy = ve;
161       }
162 
163       *y++ = vy;
164 
165       n -= sizeof(float);
166     } while (n != 0);
167   }
168 }
169