• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
10#include <assert.h>
11
12#include <${SSE_HEADER}>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vunary.h>
16
17
18extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19
20$ISA = {2: "sse2", 4: "sse41"}[SSE]
21void xnn_f32_sigmoid_ukernel__${ISA}_lut64_p2_div_x${BATCH_TILE}(
22    size_t n,
23    const float* x,
24    float* y,
25    const void* params) XNN_DISABLE_TSAN
26{
27  assert(n % sizeof(float) == 0);
28
29  const __m128 vsign_mask = _mm_set1_ps(-0.0f);
30  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p17f);
31  const __m128 vlog2e = _mm_set1_ps(0x1.715476p0f);
32  const __m128i vindex_mask = _mm_set1_epi32(INT32_C(0x3F));
33  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.630000p-1f);
34  const __m128 vminus_ln2_lo = _mm_set1_ps(0x1.BD0106p-13f);
35  const __m128 vc2 = _mm_set1_ps(0x1.FFFF0Ap-2f);
36  const __m128 vone = _mm_set1_ps(1.0f);
37  const __m128 vdenorm_cutoff = _mm_set1_ps(-0x1.5D589Ep+6f);
38
39  $if BATCH_TILE > 4:
40    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
41      const __m128 vx${ABC[0:4]} = _mm_loadu_ps(x);
42      $for N in range(4, BATCH_TILE, 4):
43        const __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N});
44      x += ${BATCH_TILE};
45
46      $for N in range(0, BATCH_TILE, 4):
47        const __m128 vz${ABC[N:N+4]} = _mm_or_ps(vx${ABC[N:N+4]}, vsign_mask);
48
49      $for N in range(0, BATCH_TILE, 4):
50        __m128 vn${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
51
52      $for N in range(0, BATCH_TILE, 4):
53        const __m128i ve${ABC[N:N+4]} = _mm_slli_epi32(_mm_castps_si128(vn${ABC[N:N+4]}), 17);
54
55      $for N in range(0, BATCH_TILE, 4):
56        const __m128i vidx${ABC[N:N+4]} = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn${ABC[N:N+4]}), vindex_mask), 2);
57
58      #if XNN_ARCH_X86_64
59        $for N in range(0, BATCH_TILE, 4):
60          const uint64_t vidx${ABC[N:N+2]} = (uint64_t) _mm_cvtsi128_si64(vidx${ABC[N:N+4]});
61          $if SSE >= 4:
62            const uint64_t vidx${ABC[N+2:N+4]} = (uint64_t) _mm_extract_epi64(vidx${ABC[N:N+4]}, 1);
63          $else:
64            const uint64_t vidx${ABC[N+2:N+4]} = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx${ABC[N:N+4]}, vidx${ABC[N:N+4]}));
65          const __m128i vl${ABC[N]}   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx${ABC[N:N+2]})));
66          const __m128i vl${ABC[N+2]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx${ABC[N+2:N+4]})));
67          $if SSE >= 4:
68            const __m128i vl${ABC[N:N+2]} = _mm_insert_epi32(vl${ABC[N]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx${ABC[N:N+2]} >> 32))), 1);
69          $else:
70            const __m128i vl${ABC[N+1]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx${ABC[N:N+2]} >> 32))));
71            const __m128i vl${ABC[N:N+2]} = _mm_unpacklo_epi32(vl${ABC[N]}, vl${ABC[N+1]});
72          $if SSE >= 4:
73            const __m128i vl${ABC[N+2:N+4]} = _mm_insert_epi32(vl${ABC[N+2]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32))), 1);
74          $else:
75            const __m128i vl${ABC[N+3]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32))));
76            const __m128i vl${ABC[N+2:N+4]} = _mm_unpacklo_epi32(vl${ABC[N+2]}, vl${ABC[N+3]});
77          const __m128i vl${ABC[N:N+4]} = _mm_unpacklo_epi64(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
78      #else  // !XNN_ARCH_X86_64
79        $for N in range(0, BATCH_TILE, 4):
80          const uint32_t vidx${ABC[N]} = (uint32_t) _mm_cvtsi128_si32(vidx${ABC[N:N+4]});
81          const uint32_t vidx${ABC[N+1]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 2);
82          const uint32_t vidx${ABC[N+2]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 4);
83          const uint32_t vidx${ABC[N+3]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 6);
84          const __m128i vl${ABC[N]}   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + vidx${ABC[N]})));
85          const __m128i vl${ABC[N+2]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + vidx${ABC[N+2]})));
86          $if SSE >= 4:
87            const __m128i vl${ABC[N:N+2]} = _mm_insert_epi32(vl${ABC[N]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + vidx${ABC[N+1]})), 1);
88          $else:
89            const __m128i vl${ABC[N+1]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + vidx${ABC[N+1]})));
90            const __m128i vl${ABC[N:N+2]} = _mm_unpacklo_epi32(vl${ABC[N]}, vl${ABC[N+1]});
91          $if SSE >= 4:
92            const __m128i vl${ABC[N+2:N+4]} = _mm_insert_epi32(vl${ABC[N+2]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + vidx${ABC[N+3]})), 1);
93          $else:
94            const __m128i vl${ABC[N+3]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + vidx${ABC[N+3]})));
95            const __m128i vl${ABC[N+2:N+4]} = _mm_unpacklo_epi32(vl${ABC[N+2]}, vl${ABC[N+3]});
96          const __m128i vl${ABC[N:N+4]} = _mm_unpacklo_epi64(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
97      #endif  // XNN_ARCH_X86_64
98
99      $for N in range(0, BATCH_TILE, 4):
100        const __m128 vs${ABC[N:N+4]} = _mm_castsi128_ps(_mm_add_epi32(vl${ABC[N:N+4]}, ve${ABC[N:N+4]}));
101
102      $for N in range(0, BATCH_TILE, 4):
103        vn${ABC[N:N+4]} = _mm_sub_ps(vn${ABC[N:N+4]}, vmagic_bias);
104
105      $for N in range(0, BATCH_TILE, 4):
106        __m128 vt${ABC[N:N+4]} = _mm_add_ps(vz${ABC[N:N+4]}, _mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_hi));
107
108      $for N in range(0, BATCH_TILE, 4):
109        vt${ABC[N:N+4]} = _mm_add_ps(vt${ABC[N:N+4]}, _mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_lo));
110
111      $for N in range(0, BATCH_TILE, 4):
112        __m128 vp${ABC[N:N+4]} = _mm_mul_ps(vt${ABC[N:N+4]}, vc2);
113
114      $for N in range(0, BATCH_TILE, 4):
115        vp${ABC[N:N+4]} = _mm_add_ps(vt${ABC[N:N+4]}, _mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}));
116
117      $for N in range(0, BATCH_TILE, 4):
118        const __m128 vy${ABC[N:N+4]} = _mm_add_ps(vs${ABC[N:N+4]}, _mm_mul_ps(vs${ABC[N:N+4]}, vp${ABC[N:N+4]}));
119
120      $for N in range(0, BATCH_TILE, 4):
121        __m128 vf${ABC[N:N+4]} = _mm_div_ps(vy${ABC[N:N+4]}, _mm_add_ps(vy${ABC[N:N+4]}, vone));
122
123      $for N in range(0, BATCH_TILE, 4):
124        vf${ABC[N:N+4]} = _mm_andnot_ps(_mm_cmplt_ps(vz${ABC[N:N+4]}, vdenorm_cutoff), vf${ABC[N:N+4]});
125
126      $if SSE >= 4:
127        $for N in range(0, BATCH_TILE, 4):
128          vf${ABC[N:N+4]} = _mm_blendv_ps(_mm_sub_ps(vone, vf${ABC[N:N+4]}), vf${ABC[N:N+4]}, vx${ABC[N:N+4]});
129      $else:
130        $for N in range(0, BATCH_TILE, 4):
131          const __m128 vm${ABC[N:N+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx${ABC[N:N+4]})));
132
133        $for N in range(0, BATCH_TILE, 4):
134          vf${ABC[N:N+4]} = _mm_or_ps(_mm_and_ps(vf${ABC[N:N+4]}, vm${ABC[N:N+4]}), _mm_andnot_ps(vm${ABC[N:N+4]}, _mm_sub_ps(vone, vf${ABC[N:N+4]})));
135
136      _mm_storeu_ps(y, vf${ABC[0:4]});
137      $for N in range(4, BATCH_TILE, 4):
138        _mm_storeu_ps(y + ${N}, vf${ABC[N:N+4]});
139      y += ${BATCH_TILE};
140    }
141  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
142    const __m128 vx = _mm_loadu_ps(x);
143    x += 4;
144
145    const __m128 vz = _mm_or_ps(vx, vsign_mask);
146
147    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
148    const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 17);
149
150    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
151    #if XNN_ARCH_X86_64
152      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
153      $if SSE >= 4:
154        const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
155      $else:
156        const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
157      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo)));
158      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi)));
159      $if SSE >= 4:
160        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32))), 1);
161      $else:
162        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32))));
163        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
164      $if SSE >= 4:
165        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32))), 1);
166      $else:
167        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32))));
168        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
169    #else  // !XNN_ARCH_X86_64
170      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_cvtsi128_si32(vidx))));
171      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 4))));
172      $if SSE >= 4:
173        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
174      $else:
175        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 2))));
176        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
177      $if SSE >= 4:
178        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
179      $else:
180        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 6))));
181        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
182    #endif  // XNN_ARCH_X86_64
183    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
184
185    const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
186    vn = _mm_sub_ps(vn, vmagic_bias);
187
188    __m128 vt = _mm_add_ps(vz, _mm_mul_ps(vn, vminus_ln2_hi));
189    vt = _mm_add_ps(vt, _mm_mul_ps(vn, vminus_ln2_lo));
190
191    __m128 vp = _mm_mul_ps(vt, vc2);
192    vp = _mm_add_ps(vt, _mm_mul_ps(vp, vt));
193
194    const __m128 vy = _mm_add_ps(vs, _mm_mul_ps(vs, vp));
195
196    __m128 vf = _mm_div_ps(vy, _mm_add_ps(vy, vone));
197    vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
198    $if SSE >= 4:
199      vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
200    $else:
201      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
202      vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
203
204    _mm_storeu_ps(y, vf);
205    y += 4;
206  }
207  if XNN_UNLIKELY(n != 0) {
208    const __m128 vx = _mm_loadu_ps(x);
209
210    const __m128 vz = _mm_or_ps(vx, vsign_mask);
211
212    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
213    const __m128i ve = _mm_slli_epi32(_mm_castps_si128(vn), 17);
214
215    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
216    #if XNN_ARCH_X86_64
217      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
218      $if SSE >= 4:
219        const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
220      $else:
221        const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
222      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo)));
223      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi)));
224      $if SSE >= 4:
225        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32))), 1);
226      $else:
227        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32))));
228        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
229      $if SSE >= 4:
230        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32))), 1);
231      $else:
232        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32))));
233        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
234    #else  // !XNN_ARCH_X86_64
235      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_cvtsi128_si32(vidx))));
236      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 4))));
237      $if SSE >= 4:
238        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
239      $else:
240        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 2))));
241        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
242      $if SSE >= 4:
243        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
244      $else:
245        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) _mm_extract_epi16(vidx, 6))));
246        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
247    #endif  // XNN_ARCH_X86_64
248    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
249
250    const __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ve));
251    vn = _mm_sub_ps(vn, vmagic_bias);
252
253    __m128 vt = _mm_add_ps(vz, _mm_mul_ps(vn, vminus_ln2_hi));
254    vt = _mm_add_ps(vt, _mm_mul_ps(vn, vminus_ln2_lo));
255
256    __m128 vp = _mm_mul_ps(vt, vc2);
257    vp = _mm_add_ps(vt, _mm_mul_ps(vp, vt));
258
259    const __m128 vy = _mm_add_ps(vs, _mm_mul_ps(vs, vp));
260
261    __m128 vf = _mm_div_ps(vy, _mm_add_ps(vy, vone));
262    vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
263    $if SSE >= 4:
264      vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
265    $else:
266      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
267      vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
268
269    if (n & (2 * sizeof(float))) {
270      _mm_storel_pi((__m64*) y, vf);
271      vf = _mm_movehl_ps(vf, vf);
272      y += 2;
273    }
274    if (n & (1 * sizeof(float))) {
275      _mm_store_ss(y, vf);
276    }
277  }
278}
279