1 // Auto-generated file. Do not edit!
2 // Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11 #include <math.h>
12
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16
17
18 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
19
xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
25 {
26 assert(n % sizeof(float) == 0);
27
28 const float vprescale = params->scalar_rr2_lut16_p3.prescale;
29 const float valpha = params->scalar_rr2_lut16_p3.alpha;
30 const float vbeta = params->scalar_rr2_lut16_p3.beta;
31 const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
32 const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
33 const uint32_t vindex_mask = UINT32_C(0xF);
34 const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
35 const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
36 const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
37 const float vc3 = params->scalar_rr2_lut16_p3.c3;
38 const float vc2 = params->scalar_rr2_lut16_p3.c2;
39 const float vone = params->scalar_rr2_lut16_p3.one;
40
41 for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
42 float vx0 = x[0];
43 float vx1 = x[1];
44 float vx2 = x[2];
45 float vx3 = x[3];
46 float vx4 = x[4];
47 float vx5 = x[5];
48 x += 6;
49
50 const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
51 const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
52 const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
53 const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
54 const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
55 const float vz5 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx5 * vprescale, vsat_cutoff), 0.0f);
56
57 float vn0 = vz0 * vlog2e + vmagic_bias;
58 float vn1 = vz1 * vlog2e + vmagic_bias;
59 float vn2 = vz2 * vlog2e + vmagic_bias;
60 float vn3 = vz3 * vlog2e + vmagic_bias;
61 float vn4 = vz4 * vlog2e + vmagic_bias;
62 float vn5 = vz5 * vlog2e + vmagic_bias;
63
64 const uint32_t ven0 = float_as_uint32(vn0) << 19;
65 const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
66 vn0 -= vmagic_bias;
67 const uint32_t ven1 = float_as_uint32(vn1) << 19;
68 const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
69 vn1 -= vmagic_bias;
70 const uint32_t ven2 = float_as_uint32(vn2) << 19;
71 const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
72 vn2 -= vmagic_bias;
73 const uint32_t ven3 = float_as_uint32(vn3) << 19;
74 const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
75 vn3 -= vmagic_bias;
76 const uint32_t ven4 = float_as_uint32(vn4) << 19;
77 const uint32_t vidx4 = float_as_uint32(vn4) & vindex_mask;
78 vn4 -= vmagic_bias;
79 const uint32_t ven5 = float_as_uint32(vn5) << 19;
80 const uint32_t vidx5 = float_as_uint32(vn5) & vindex_mask;
81 vn5 -= vmagic_bias;
82
83 float vt0 = vn0 * vminus_ln2_hi + vz0;
84 float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
85 float vt1 = vn1 * vminus_ln2_hi + vz1;
86 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
87 float vt2 = vn2 * vminus_ln2_hi + vz2;
88 float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
89 float vt3 = vn3 * vminus_ln2_hi + vz3;
90 float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
91 float vt4 = vn4 * vminus_ln2_hi + vz4;
92 float vs4 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
93 float vt5 = vn5 * vminus_ln2_hi + vz5;
94 float vs5 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx5] + ven5);
95
96 vt0 = vn0 * vminus_ln2_lo + vt0;
97 vt1 = vn1 * vminus_ln2_lo + vt1;
98 vt2 = vn2 * vminus_ln2_lo + vt2;
99 vt3 = vn3 * vminus_ln2_lo + vt3;
100 vt4 = vn4 * vminus_ln2_lo + vt4;
101 vt5 = vn5 * vminus_ln2_lo + vt5;
102
103 float vp0 = vc3 * vt0 + vc2;
104 float vp1 = vc3 * vt1 + vc2;
105 float vp2 = vc3 * vt2 + vc2;
106 float vp3 = vc3 * vt3 + vc2;
107 float vp4 = vc3 * vt4 + vc2;
108 float vp5 = vc3 * vt5 + vc2;
109
110 vp0 *= vt0;
111 vp1 *= vt1;
112 vp2 *= vt2;
113 vp3 *= vt3;
114 vp4 *= vt4;
115 vp5 *= vt5;
116
117 vt0 *= vs0;
118 vs0 -= vone;
119 vt1 *= vs1;
120 vs1 -= vone;
121 vt2 *= vs2;
122 vs2 -= vone;
123 vt3 *= vs3;
124 vs3 -= vone;
125 vt4 *= vs4;
126 vs4 -= vone;
127 vt5 *= vs5;
128 vs5 -= vone;
129
130 vp0 = vp0 * vt0 + vt0;
131 vp1 = vp1 * vt1 + vt1;
132 vp2 = vp2 * vt2 + vt2;
133 vp3 = vp3 * vt3 + vt3;
134 vp4 = vp4 * vt4 + vt4;
135 vp5 = vp5 * vt5 + vt5;
136
137 const float ve0 = (vp0 + vs0) * valpha;
138 float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
139 const float ve1 = (vp1 + vs1) * valpha;
140 float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
141 const float ve2 = (vp2 + vs2) * valpha;
142 float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
143 const float ve3 = (vp3 + vs3) * valpha;
144 float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
145 const float ve4 = (vp4 + vs4) * valpha;
146 float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
147 const float ve5 = (vp5 + vs5) * valpha;
148 float vy5 = __builtin_wasm_max_f32(vx5 * vbeta, 0.0f);
149
150 vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
151 vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
152 vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
153 vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
154 vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
155 vy5 += __builtin_wasm_min_f32(ve5, 0.0f);
156
157 y[0] = vy0;
158 y[1] = vy1;
159 y[2] = vy2;
160 y[3] = vy3;
161 y[4] = vy4;
162 y[5] = vy5;
163 y += 6;
164 }
165 if XNN_UNLIKELY(n != 0) {
166 do {
167 float vx = *x++;
168
169 const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
170
171 float vn = vz * vlog2e + vmagic_bias;
172 const uint32_t ven = float_as_uint32(vn) << 19;
173 const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
174 vn -= vmagic_bias;
175
176 float vt = vn * vminus_ln2_hi + vz;
177 float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
178
179 vt = vn * vminus_ln2_lo + vt;
180
181 float vp = vc3 * vt + vc2;
182 vp *= vt;
183
184 vt *= vs;
185 vs -= vone;
186 vp = vp * vt + vt;
187 const float ve = (vp + vs) * valpha;
188
189 float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
190 vy += __builtin_wasm_min_f32(ve, 0.0f);
191
192 *y++ = vy;
193
194 n -= sizeof(float);
195 } while (n != 0);
196 }
197 }
198