• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-f32-vcvt/avx.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/unaligned.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qs8_f32_vcvt_ukernel__avx_x8(size_t n,const int8_t * x,float * y,const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_f32_vcvt_ukernel__avx_x8(
20     size_t n,
21     const int8_t* x,
22     float* y,
23     const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(int8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
31   const __m256 vscale = _mm256_load_ps(params->avx.scale);
32   for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
33     __m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(x)));
34     __m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(x + 4)));
35     x += 8;
36 
37     vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
38     vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
39 
40     const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
41 
42     __m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
43 
44     vy01234567 = _mm256_mul_ps(vy01234567, vscale);
45 
46     _mm256_storeu_ps(y, vy01234567);
47     y += 8;
48   }
49   for (; n >= 4 * sizeof(int8_t); n -= 4 * sizeof(int8_t)) {
50     __m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(x)));
51     vx = _mm_add_epi32(vx, vminus_zero_point);
52     x += 4;
53 
54     __m128 vy = _mm_cvtepi32_ps(vx);
55     vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
56 
57     _mm_storeu_ps(y, vy);
58     y += 4;
59   }
60   if XNN_UNLIKELY(n != 0) {
61     assert(n >= 1 * sizeof(int8_t));
62     assert(n <= 3 * sizeof(int8_t));
63 
64     __m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(x)));
65     vx = _mm_add_epi32(vx, vminus_zero_point);
66 
67     __m128 vy = _mm_cvtepi32_ps(vx);
68     vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
69 
70     if (n & (2 * sizeof(int8_t))) {
71       _mm_storel_pi((__m64*) y, vy);
72       vy = _mm_movehl_ps(vy, vy);
73       y += 2;
74     }
75     if (n & (1 * sizeof(int8_t))) {
76       _mm_store_ss(y, vy);
77     }
78   }
79 }
80