• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/unaligned.h>
15#include <xnnpack/vcvt.h>
16
17
18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
19$_MM_CVTEPX8_EPI32 = {"QS8": "_mm_cvtepi8_epi32", "QU8": "_mm_cvtepu8_epi32"}[DATATYPE]
20void xnn_${DATATYPE.lower()}_f32_vcvt_ukernel__avx_x${BATCH_TILE}(
21    size_t n,
22    const ${XINT8_T}* x,
23    float* y,
24    const union xnn_${DATATYPE.lower()}_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25{
26  assert(n != 0);
27  assert(n % sizeof(${XINT8_T}) == 0);
28  assert(x != NULL);
29  assert(y != NULL);
30
31  const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
32  const __m256 vscale = _mm256_load_ps(params->avx.scale);
33  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
34    __m128i vx${ABC[0:4]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x)));
35    $for N in range(4, BATCH_TILE, 4):
36      __m128i vx${ABC[N:N+4]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x + ${N})));
37    x += ${BATCH_TILE};
38
39    $for N in range(0, BATCH_TILE, 4):
40      vx${ABC[N:N+4]} = _mm_add_epi32(vx${ABC[N:N+4]}, vminus_zero_point);
41
42    $for N in range(0, BATCH_TILE, 8):
43      const __m256i vx${ABC[N:N+8]} = _mm256_insertf128_si256(_mm256_castsi128_si256(vx${ABC[N:N+4]}), vx${ABC[N+4:N+8]}, 1);
44
45    $for N in range(0, BATCH_TILE, 8):
46      __m256 vy${ABC[N:N+8]} = _mm256_cvtepi32_ps(vx${ABC[N:N+8]});
47
48    $for N in range(0, BATCH_TILE, 8):
49      vy${ABC[N:N+8]} = _mm256_mul_ps(vy${ABC[N:N+8]}, vscale);
50
51    _mm256_storeu_ps(y, vy${ABC[0:8]});
52    $for N in range(8, BATCH_TILE, 8):
53      _mm256_storeu_ps(y + ${N}, vy${ABC[N:N+8]});
54    y += ${BATCH_TILE};
55  }
56  for (; n >= 4 * sizeof(${XINT8_T}); n -= 4 * sizeof(${XINT8_T})) {
57    __m128i vx = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x)));
58    vx = _mm_add_epi32(vx, vminus_zero_point);
59    x += 4;
60
61    __m128 vy = _mm_cvtepi32_ps(vx);
62    vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
63
64    _mm_storeu_ps(y, vy);
65    y += 4;
66  }
67  if XNN_UNLIKELY(n != 0) {
68    assert(n >= 1 * sizeof(${XINT8_T}));
69    assert(n <= 3 * sizeof(${XINT8_T}));
70
71    __m128i vx = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x)));
72    vx = _mm_add_epi32(vx, vminus_zero_point);
73
74    __m128 vy = _mm_cvtepi32_ps(vx);
75    vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
76
77    if (n & (2 * sizeof(${XINT8_T}))) {
78      _mm_storel_pi((__m64*) y, vy);
79      vy = _mm_movehl_ps(vy, vy);
80      y += 2;
81    }
82    if (n & (1 * sizeof(${XINT8_T}))) {
83      _mm_store_ss(y, vy);
84    }
85  }
86}
87