• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/scalar-rr2-lut64-p2-div.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 // Note redefine as uint32[] to avoid redundant bitcasts.
19 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
20 
xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x4(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])21 void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x4(
22     size_t n,
23     const float* x,
24     float* y,
25     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
26 {
27   assert(n % sizeof(float) == 0);
28 
29   const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
30   const float vminus_log2e = params->scalar_rr2_lut64_p2.minus_log2e;
31   const uint32_t vindex_mask = UINT32_C(0x3F);
32   const float vln2_hi = params->scalar_rr2_lut64_p2.ln2_hi;
33   const float vln2_lo = params->scalar_rr2_lut64_p2.ln2_lo;
34   const float vc2 = params->scalar_rr2_lut64_p2.c2;
35   const float vone = params->scalar_rr2_lut64_p2.one;
36   const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
37 
38   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
39     const float vx0 = x[0];
40     const float vx1 = x[1];
41     const float vx2 = x[2];
42     const float vx3 = x[3];
43     x += 4;
44 
45     const float vz0 = fabsf(vx0);
46     const float vz1 = fabsf(vx1);
47     const float vz2 = fabsf(vx2);
48     const float vz3 = fabsf(vx3);
49 
50     float vn0 = vz0 * vminus_log2e + vmagic_bias;
51     float vn1 = vz1 * vminus_log2e + vmagic_bias;
52     float vn2 = vz2 * vminus_log2e + vmagic_bias;
53     float vn3 = vz3 * vminus_log2e + vmagic_bias;
54 
55     const uint32_t ve0 = float_as_uint32(vn0) << 17;
56     const uint32_t ve1 = float_as_uint32(vn1) << 17;
57     const uint32_t ve2 = float_as_uint32(vn2) << 17;
58     const uint32_t ve3 = float_as_uint32(vn3) << 17;
59 
60     const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
61     const float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx0] + ve0);
62     const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
63     const float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx1] + ve1);
64     const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
65     const float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx2] + ve2);
66     const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
67     const float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx3] + ve3);
68 
69     vn0 -= vmagic_bias;
70     vn1 -= vmagic_bias;
71     vn2 -= vmagic_bias;
72     vn3 -= vmagic_bias;
73 
74     float vt0 = vn0 * vln2_hi + vz0;
75     float vt1 = vn1 * vln2_hi + vz1;
76     float vt2 = vn2 * vln2_hi + vz2;
77     float vt3 = vn3 * vln2_hi + vz3;
78 
79     vt0 = vn0 * vln2_lo + vt0;
80     vt1 = vn1 * vln2_lo + vt1;
81     vt2 = vn2 * vln2_lo + vt2;
82     vt3 = vn3 * vln2_lo + vt3;
83 
84     float vp0 = vt0 * vc2;
85     float vp1 = vt1 * vc2;
86     float vp2 = vt2 * vc2;
87     float vp3 = vt3 * vc2;
88 
89     vp0 = vt0 - vp0 * vt0;
90     vp1 = vt1 - vp1 * vt1;
91     vp2 = vt2 - vp2 * vt2;
92     vp3 = vt3 - vp3 * vt3;
93 
94     const float vy0 = vs0 - vs0 * vp0;
95     const float vy1 = vs1 - vs1 * vp1;
96     const float vy2 = vs2 - vs2 * vp2;
97     const float vy3 = vs3 - vs3 * vp3;
98 
99     const float vd0 = vy0 + vone;
100     const float vd1 = vy1 + vone;
101     const float vd2 = vy2 + vone;
102     const float vd3 = vy3 + vone;
103 
104     float vf0 = vy0 / vd0;
105     float vf1 = vy1 / vd1;
106     float vf2 = vy2 / vd2;
107     float vf3 = vy3 / vd3;
108 
109     if XNN_UNPREDICTABLE(vz0 > vdenorm_cutoff) {
110       vf0 = 0.0f;
111     }
112     if XNN_UNPREDICTABLE(vz1 > vdenorm_cutoff) {
113       vf1 = 0.0f;
114     }
115     if XNN_UNPREDICTABLE(vz2 > vdenorm_cutoff) {
116       vf2 = 0.0f;
117     }
118     if XNN_UNPREDICTABLE(vz3 > vdenorm_cutoff) {
119       vf3 = 0.0f;
120     }
121 
122     if XNN_UNPREDICTABLE(vx0 > 0.0f) {
123       vf0 = vone - vf0;
124     }
125     if XNN_UNPREDICTABLE(vx1 > 0.0f) {
126       vf1 = vone - vf1;
127     }
128     if XNN_UNPREDICTABLE(vx2 > 0.0f) {
129       vf2 = vone - vf2;
130     }
131     if XNN_UNPREDICTABLE(vx3 > 0.0f) {
132       vf3 = vone - vf3;
133     }
134 
135     y[0] = vf0;
136     y[1] = vf1;
137     y[2] = vf2;
138     y[3] = vf3;
139     y += 4;
140   }
141   if XNN_UNLIKELY(n != 0) {
142     do {
143       const float vx = *x++;
144 
145       const float vz = fabsf(vx);
146 
147       float vn = vz * vminus_log2e + vmagic_bias;
148       const uint32_t ve = float_as_uint32(vn) << 17;
149       const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
150       const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
151       vn -= vmagic_bias;
152 
153       float vt = vn * vln2_hi + vz;
154       vt = vn * vln2_lo + vt;
155 
156       float vp = vt * vc2;
157       vp = vt - vp * vt;
158 
159       const float vy = vs - vs * vp;
160       const float vd = vy + vone;
161 
162       float vf = vy / vd;
163       if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
164         vf = 0.0f;
165       }
166       if XNN_UNPREDICTABLE(vx > 0.0f) {
167         vf = vone - vf;
168       }
169 
170       *y++ = vf;
171 
172       n -= sizeof(float);
173     } while (n != 0);
174   }
175 }
176