• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
10#include <assert.h>
11
12#include <${SSE_HEADER}>
13
14#include <xnnpack/vunary.h>
15#include <xnnpack/common.h>
16
17
18extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
19
20$ISA = {2: "sse2", 4: "sse41"}[SSE]
21void xnn_f32_velu_ukernel__${ISA}_rr2_lut16_p3_x${BATCH_TILE}(
22    size_t n,
23    const float* x,
24    float* y,
25    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
26{
27  assert(n != 0);
28  assert(n % sizeof(float) == 0);
29  assert(x != NULL);
30  assert(y != NULL);
31
32  const __m128 vprescale = _mm_load_ps(params->sse.prescale);
33  const __m128 valpha = _mm_load_ps(params->sse.alpha);
34  const __m128 vbeta = _mm_load_ps(params->sse.beta);
35
36  const __m128 vsat_cutoff = _mm_set1_ps(-0x1.154246p+4f);
37  const __m128 vmagic_bias = _mm_set1_ps(0x1.800000p19f);
38  const __m128 vlog2e = _mm_set1_ps(0x1.715476p+0f);
39  const __m128i vindex_mask = _mm_set1_epi32(0xF);
40  const __m128 vminus_ln2_hi = _mm_set1_ps(-0x1.62E400p-1f);
41  const __m128 vminus_ln2_lo = _mm_set1_ps(-0x1.7F7D1Cp-20f);
42  const __m128 vc3 = _mm_set1_ps(0x1.55561Cp-3f);
43  const __m128 vc2 = _mm_set1_ps(0x1.0001ECp-1f);
44  const __m128 vone = _mm_set1_ps(1.0f);
45
46  $if BATCH_TILE > 4:
47    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
48      __m128 vx${ABC[0:4]} = _mm_loadu_ps(x);
49      $for N in range(4, BATCH_TILE, 4):
50        __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N});
51      x += ${BATCH_TILE};
52
53      $for N in range(0, BATCH_TILE, 4):
54        const __m128 vz${ABC[N:N+4]} = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx${ABC[N:N+4]}, vprescale));
55
56      $for N in range(0, BATCH_TILE, 4):
57        __m128 vn${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
58
59      $for N in range(0, BATCH_TILE, 4):
60        const __m128i vidx${ABC[N:N+4]} = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn${ABC[N:N+4]}), vindex_mask), 2);
61        const __m128i ven${ABC[N:N+4]} = _mm_slli_epi32(_mm_castps_si128(vn${ABC[N:N+4]}), 19);
62
63      #if XNN_ARCH_X86_64
64        $for N in range(0, BATCH_TILE, 4):
65          const uint64_t vidx${ABC[N:N+2]} = (uint64_t) _mm_cvtsi128_si64(vidx${ABC[N:N+4]});
66          $if SSE >= 4:
67            const uint64_t vidx${ABC[N+2:N+4]} = (uint64_t) _mm_extract_epi64(vidx${ABC[N:N+4]}, 1);
68          $else:
69            const uint64_t vidx${ABC[N+2:N+4]} = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx${ABC[N:N+4]}, vidx${ABC[N:N+4]}));
70          const __m128i vl${ABC[N]}   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N:N+2]})));
71          const __m128i vl${ABC[N+2]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N+2:N+4]})));
72          $if SSE >= 4:
73            const __m128i vl${ABC[N:N+2]} = _mm_insert_epi32(vl${ABC[N]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32))), 1);
74          $else:
75            const __m128i vl${ABC[N+1]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32))));
76            const __m128i vl${ABC[N:N+2]} = _mm_unpacklo_epi32(vl${ABC[N]}, vl${ABC[N+1]});
77          $if SSE >= 4:
78            const __m128i vl${ABC[N+2:N+4]} = _mm_insert_epi32(vl${ABC[N+2]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32))), 1);
79          $else:
80            const __m128i vl${ABC[N+3]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32))));
81            const __m128i vl${ABC[N+2:N+4]} = _mm_unpacklo_epi32(vl${ABC[N+2]}, vl${ABC[N+3]});
82          const __m128i vl${ABC[N:N+4]} = _mm_unpacklo_epi64(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
83      #else  // !XNN_ARCH_X86_64
84        $for N in range(0, BATCH_TILE, 4):
85          const uint32_t vidx${ABC[N]} = (uint32_t) _mm_cvtsi128_si32(vidx${ABC[N:N+4]});
86          const uint32_t vidx${ABC[N+1]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 2);
87          const uint32_t vidx${ABC[N+2]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 4);
88          const uint32_t vidx${ABC[N+3]} = (uint32_t) _mm_extract_epi16(vidx${ABC[N:N+4]}, 6);
89          const __m128i vl${ABC[N]}   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N]})));
90          const __m128i vl${ABC[N+2]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+2]})));
91          $if SSE >= 4:
92            const __m128i vl${ABC[N:N+2]} = _mm_insert_epi32(vl${ABC[N]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+1]})), 1);
93          $else:
94            const __m128i vl${ABC[N+1]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+1]})));
95            const __m128i vl${ABC[N:N+2]} = _mm_unpacklo_epi32(vl${ABC[N]}, vl${ABC[N+1]});
96          $if SSE >= 4:
97            const __m128i vl${ABC[N+2:N+4]} = _mm_insert_epi32(vl${ABC[N+2]}, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+3]})), 1);
98          $else:
99            const __m128i vl${ABC[N+3]} = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + vidx${ABC[N+3]})));
100            const __m128i vl${ABC[N+2:N+4]} = _mm_unpacklo_epi32(vl${ABC[N+2]}, vl${ABC[N+3]});
101          const __m128i vl${ABC[N:N+4]} = _mm_unpacklo_epi64(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
102      #endif  // XNN_ARCH_X86_64
103
104      $for N in range(0, BATCH_TILE, 4):
105        vn${ABC[N:N+4]} = _mm_sub_ps(vn${ABC[N:N+4]}, vmagic_bias);
106        __m128 vs${ABC[N:N+4]} = _mm_castsi128_ps(_mm_add_epi32(vl${ABC[N:N+4]}, ven${ABC[N:N+4]}));
107
108      $for N in range(0, BATCH_TILE, 4):
109        __m128 vt${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
110
111      $for N in range(0, BATCH_TILE, 4):
112        vt${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
113
114      $for N in range(0, BATCH_TILE, 4):
115        __m128 vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vc3, vt${ABC[N:N+4]}), vc2);
116
117      $for N in range(0, BATCH_TILE, 4):
118        vp${ABC[N:N+4]} = _mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
119
120      $for N in range(0, BATCH_TILE, 4):
121        vt${ABC[N:N+4]} = _mm_mul_ps(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
122        vs${ABC[N:N+4]} = _mm_sub_ps(vs${ABC[N:N+4]}, vone);
123
124      $for N in range(0, BATCH_TILE, 4):
125        vp${ABC[N:N+4]} = _mm_add_ps(_mm_mul_ps(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
126
127      $for N in range(0, BATCH_TILE, 4):
128        const __m128 ve${ABC[N:N+4]} = _mm_mul_ps(_mm_add_ps(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
129
130      $for N in range(0, BATCH_TILE, 4):
131        $if SSE < 4:
132          const __m128 vm${ABC[N:N+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx${ABC[N:N+4]})));
133        vx${ABC[N:N+4]} = _mm_mul_ps(vx${ABC[N:N+4]}, vbeta);
134
135      $for N in range(0, BATCH_TILE, 4):
136        $if SSE >= 4:
137          const __m128 vy${ABC[N:N+4]} = _mm_blendv_ps(vx${ABC[N:N+4]}, ve${ABC[N:N+4]}, vx${ABC[N:N+4]});
138        $else:
139          const __m128 vy${ABC[N:N+4]} = _mm_or_ps(_mm_and_ps(ve${ABC[N:N+4]}, vm${ABC[N:N+4]}), _mm_andnot_ps(vm${ABC[N:N+4]}, vx${ABC[N:N+4]}));
140
141      _mm_storeu_ps(y, vy${ABC[0:4]});
142      $for N in range(4, BATCH_TILE, 4):
143        _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]});
144      y += ${BATCH_TILE};
145    }
146  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
147    __m128 vx = _mm_loadu_ps(x);
148    x += 4;
149
150    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
151
152    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
153
154    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
155    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
156    #if XNN_ARCH_X86_64
157      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
158      $if SSE >= 4:
159        const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
160      $else:
161        const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
162      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
163      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
164      $if SSE >= 4:
165        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
166      $else:
167        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
168        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
169      $if SSE >= 4:
170        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
171      $else:
172        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
173        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
174    #else  // !XNN_ARCH_X86_64
175      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
176      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
177      $if SSE >= 4:
178        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
179      $else:
180        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
181        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
182      $if SSE >= 4:
183        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
184      $else:
185        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
186        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
187    #endif  // XNN_ARCH_X86_64
188    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
189    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
190    vn = _mm_sub_ps(vn, vmagic_bias);
191
192    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
193    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
194
195    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
196    vp = _mm_mul_ps(vp, vt);
197
198    vt = _mm_mul_ps(vt, vs);
199    vs = _mm_sub_ps(vs, vone);
200    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
201    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
202
203    $if SSE < 4:
204      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
205    vx = _mm_mul_ps(vx, vbeta);
206    $if SSE >= 4:
207      const __m128 vy = _mm_blendv_ps(vx, ve, vx);
208    $else:
209      const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
210
211    _mm_storeu_ps(y, vy);
212    y += 4;
213  }
214  if XNN_UNLIKELY(n != 0) {
215    __m128 vx = _mm_loadu_ps(x);
216
217    const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
218
219    __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
220
221    const __m128i ven = _mm_slli_epi32(_mm_castps_si128(vn), 19);
222    const __m128i vidx = _mm_slli_epi32(_mm_and_si128(_mm_castps_si128(vn), vindex_mask), 2);
223    #if XNN_ARCH_X86_64
224      const uint64_t vidx_lo = (uint64_t) _mm_cvtsi128_si64(vidx);
225      $if SSE >= 4:
226        const uint64_t vidx_hi = (uint64_t) _mm_extract_epi64(vidx, 1);
227      $else:
228        const uint64_t vidx_hi = (uint64_t) _mm_cvtsi128_si64(_mm_unpackhi_epi64(vidx, vidx));
229      const __m128i vl_ll   = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo)));
230      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi)));
231      $if SSE >= 4:
232        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))), 1);
233      $else:
234        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32))));
235        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
236      $if SSE >= 4:
237        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))), 1);
238      $else:
239        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32))));
240        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
241    #else  // !XNN_ARCH_X86_64
242      const __m128i vl_ll = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_cvtsi128_si32(vidx))));
243      const __m128i vl_hl = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 4))));
244      $if SSE >= 4:
245        const __m128i vl_lo = _mm_insert_epi32(vl_ll, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))), 1);
246      $else:
247        const __m128i vl_lh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 2))));
248        const __m128i vl_lo = _mm_unpacklo_epi32(vl_ll, vl_lh);
249      $if SSE >= 4:
250        const __m128i vl_hi = _mm_insert_epi32(vl_hl, *((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))), 1);
251      $else:
252        const __m128i vl_hh = _mm_cvtsi32_si128(*((const int*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) _mm_extract_epi16(vidx, 6))));
253        const __m128i vl_hi = _mm_unpacklo_epi32(vl_hl, vl_hh);
254    #endif  // XNN_ARCH_X86_64
255    const __m128i vl = _mm_unpacklo_epi64(vl_lo, vl_hi);
256    __m128 vs = _mm_castsi128_ps(_mm_add_epi32(vl, ven));
257    vn = _mm_sub_ps(vn, vmagic_bias);
258
259    __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
260    vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
261
262    __m128 vp = _mm_add_ps(_mm_mul_ps(vc3, vt), vc2);
263    vp = _mm_mul_ps(vp, vt);
264
265    vt = _mm_mul_ps(vt, vs);
266    vs = _mm_sub_ps(vs, vone);
267    vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
268    const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
269
270    $if SSE < 4:
271      const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
272    vx = _mm_mul_ps(vx, vbeta);
273    $if SSE >= 4:
274      __m128 vy = _mm_blendv_ps(vx, ve, vx);
275    $else:
276      __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
277
278    if (n & (2 * sizeof(float))) {
279      _mm_storel_pi((__m64*) y, vy);
280      vy = _mm_movehl_ps(vy, vy);
281      y += 2;
282    }
283    if (n & (1 * sizeof(float))) {
284      _mm_store_ss(y, vy);
285    }
286  }
287}
288