• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE >= 1
7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
8#include <assert.h>
9#include <math.h>
10
11#include <xnnpack/common.h>
12#include <xnnpack/math.h>
13#include <xnnpack/vunary.h>
14
15
16// Note redefine as uint32[] to avoid redundant bitcasts.
17extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
18
19void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x${BATCH_TILE}(
20    size_t n,
21    const float* x,
22    float* y,
23    const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
24{
25  assert(n % sizeof(float) == 0);
26
27  const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
28  const float vminus_log2e = params->scalar_rr2_lut64_p2.minus_log2e;
29  const uint32_t vindex_mask = UINT32_C(0x3F);
30  const float vln2_hi = params->scalar_rr2_lut64_p2.ln2_hi;
31  const float vln2_lo = params->scalar_rr2_lut64_p2.ln2_lo;
32  const float vc2 = params->scalar_rr2_lut64_p2.c2;
33  const float vone = params->scalar_rr2_lut64_p2.one;
34  const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
35
36  $if BATCH_TILE > 1:
37    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
38      $for N in range(BATCH_TILE):
39        const float vx${N} = x[${N}];
40      x += ${BATCH_TILE};
41
42      $for N in range(BATCH_TILE):
43        const float vz${N} = fabsf(vx${N});
44
45      $for N in range(BATCH_TILE):
46        float vn${N} = vz${N} * vminus_log2e + vmagic_bias;
47
48      $for N in range(BATCH_TILE):
49        const uint32_t ve${N} = float_as_uint32(vn${N}) << 17;
50
51      $for N in range(BATCH_TILE):
52        const uint32_t vidx${N} = float_as_uint32(vn${N}) & vindex_mask;
53        const float vs${N} = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx${N}] + ve${N});
54
55      $for N in range(BATCH_TILE):
56        vn${N} -= vmagic_bias;
57
58      $for N in range(BATCH_TILE):
59        float vt${N} = vn${N} * vln2_hi + vz${N};
60
61      $for N in range(BATCH_TILE):
62        vt${N} = vn${N} * vln2_lo + vt${N};
63
64      $for N in range(BATCH_TILE):
65        float vp${N} = vt${N} * vc2;
66
67      $for N in range(BATCH_TILE):
68        vp${N} = vt${N} - vp${N} * vt${N};
69
70      $for N in range(BATCH_TILE):
71        const float vy${N} = vs${N} - vs${N} * vp${N};
72
73      $for N in range(BATCH_TILE):
74        const float vd${N} = vy${N} + vone;
75
76      $for N in range(BATCH_TILE):
77        float vf${N} = vy${N} / vd${N};
78
79      $for N in range(BATCH_TILE):
80        if XNN_UNPREDICTABLE(vz${N} > vdenorm_cutoff) {
81          vf${N} = 0.0f;
82        }
83
84      $for N in range(BATCH_TILE):
85        if XNN_UNPREDICTABLE(vx${N} > 0.0f) {
86          vf${N} = vone - vf${N};
87        }
88
89      $for N in range(BATCH_TILE):
90        y[${N}] = vf${N};
91      y += ${BATCH_TILE};
92    }
93  $if BATCH_TILE == 1:
94    do {
95      const float vx = *x++;
96
97      const float vz = fabsf(vx);
98
99      float vn = vz * vminus_log2e + vmagic_bias;
100      const uint32_t ve = float_as_uint32(vn) << 17;
101      const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
102      const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
103      vn -= vmagic_bias;
104
105      float vt = vn * vln2_hi + vz;
106      vt = vn * vln2_lo + vt;
107
108      float vp = vt * vc2;
109      vp = vt - vp * vt;
110
111      const float vy = vs - vs * vp;
112      const float vd = vy + vone;
113
114      float vf = vy / vd;
115      if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
116        vf = 0.0f;
117      }
118      if XNN_UNPREDICTABLE(vx > 0.0f) {
119        vf = vone - vf;
120      }
121
122      *y++ = vf;
123
124      n -= sizeof(float);
125    } while (n != 0);
126  $elif BATCH_TILE == 2:
127    if XNN_UNLIKELY(n != 0) {
128      const float vx = *x;
129
130      const float vz = fabsf(vx);
131
132      float vn = vz * vminus_log2e + vmagic_bias;
133      const uint32_t ve = float_as_uint32(vn) << 17;
134      const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
135      const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
136      vn -= vmagic_bias;
137
138      float vt = vn * vln2_hi + vz;
139      vt = vn * vln2_lo + vt;
140
141      float vp = vt * vc2;
142      vp = vt - vp * vt;
143
144      const float vy = vs - vs * vp;
145      const float vd = vy + vone;
146
147      float vf = vy / vd;
148      if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
149        vf = 0.0f;
150      }
151      if XNN_UNPREDICTABLE(vx > 0.0f) {
152        vf = vone - vf;
153      }
154
155      *y = vf;
156    }
157  $else:
158    if XNN_UNLIKELY(n != 0) {
159      do {
160        const float vx = *x++;
161
162        const float vz = fabsf(vx);
163
164        float vn = vz * vminus_log2e + vmagic_bias;
165        const uint32_t ve = float_as_uint32(vn) << 17;
166        const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
167        const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
168        vn -= vmagic_bias;
169
170        float vt = vn * vln2_hi + vz;
171        vt = vn * vln2_lo + vt;
172
173        float vp = vt * vc2;
174        vp = vt - vp * vt;
175
176        const float vy = vs - vs * vp;
177        const float vd = vy + vone;
178
179        float vf = vy / vd;
180        if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
181          vf = 0.0f;
182        }
183        if XNN_UNPREDICTABLE(vx > 0.0f) {
184          vf = vone - vf;
185        }
186
187        *y++ = vf;
188
189        n -= sizeof(float);
190      } while (n != 0);
191    }
192}
193