• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2022 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vunary.h>
15
16
17void xnn_f16_vhswish_ukernel__f16c_x${BATCH_TILE}(
18    size_t n,
19    const void* restrict x_ptr,
20    void* restrict y_ptr,
21    const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
22{
23  assert(n != 0);
24  assert(n % sizeof(uint16_t) == 0);
25
26  const uint16_t* x = (const uint16_t*) x_ptr;
27  uint16_t* y = (uint16_t*) y_ptr;
28
29  const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
30  const __m256 vthree = _mm256_load_ps(params->avx.three);
31  const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six);
32  const __m128i vzero = _mm_setzero_si128();
33
34  $if BATCH_TILE > 8:
35    for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) {
36      __m256 vx${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
37      $for N in range(8, BATCH_TILE, 8):
38        __m256 vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (x + ${N})));
39      x += ${BATCH_TILE};
40
41      $for N in range(0, BATCH_TILE, 8):
42        __m128i vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_add_ps(vx${ABC[N:N+8]}, vthree), _MM_FROUND_NO_EXC);
43        vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx${ABC[N:N+8]}, vsixth), _MM_FROUND_NO_EXC));
44
45      $for N in range(0, BATCH_TILE, 8):
46        vacc${ABC[N:N+8]} = _mm_max_epi16(vacc${ABC[N:N+8]}, vzero);
47
48      $for N in range(0, BATCH_TILE, 8):
49        vacc${ABC[N:N+8]} = _mm_min_epi16(vacc${ABC[N:N+8]}, vsix);
50
51      $for N in range(0, BATCH_TILE, 8):
52        vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[N:N+8]}), vx${ABC[N:N+8]}), _MM_FROUND_NO_EXC);
53
54      _mm_storeu_si128((__m128i*) y, vacc${ABC[0:8]});
55      $for N in range(8, BATCH_TILE, 8):
56        _mm_storeu_si128((__m128i*) (y + ${N}), vacc${ABC[N:N+8]});
57      y += ${BATCH_TILE};
58    }
59  for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
60    __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
61    x += 8;
62    __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC);
63    vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC));
64    vacc = _mm_max_epi16(vacc, vzero);
65    vacc = _mm_min_epi16(vacc, vsix);
66    vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC);
67    _mm_storeu_si128((__m128i*) y, vacc);
68    y += 8;
69  }
70  if XNN_UNLIKELY(n != 0) {
71    __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
72    __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC);
73    vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC));
74    vacc = _mm_max_epi16(vacc, vzero);
75    vacc = _mm_min_epi16(vacc, vsix);
76    vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC);
77
78    if (n & (4 * sizeof(uint16_t))) {
79      _mm_storel_epi64((__m128i*) y, vacc);
80      vacc = _mm_unpackhi_epi64(vacc, vacc);
81      y += 4;
82    }
83    if (n & (2 * sizeof(uint16_t))) {
84      *((uint32_t*) y) = (uint32_t) _mm_cvtsi128_si32(vacc);
85      vacc = _mm_srli_epi64(vacc, 32);
86      y += 2;
87    }
88    if (n & (1 * sizeof(uint16_t))) {
89      *y = (uint16_t) _mm_extract_epi16(vacc, 0);
90    }
91  }
92}
93