• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-qs8-vcvt/avx2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_f32_qu8_vcvt_ukernel__avx2_x64(size_t n,const float * x,uint8_t * y,const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_qu8_vcvt_ukernel__avx2_x64(
20     size_t n,
21     const float* x,
22     uint8_t* y,
23     const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25   assert(n != 0);
26   assert(n % sizeof(float) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m256 vscale = _mm256_load_ps(params->avx2.scale);
31   const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
32   const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
33   const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
34   const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
35 
36   for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
37     __m256 vx01 = _mm256_loadu_ps(x);
38     __m256 vx23 = _mm256_loadu_ps(x + 8);
39     __m256 vx45 = _mm256_loadu_ps(x + 16);
40     __m256 vx67 = _mm256_loadu_ps(x + 24);
41     __m256 vx89 = _mm256_loadu_ps(x + 32);
42     __m256 vxAB = _mm256_loadu_ps(x + 40);
43     __m256 vxCD = _mm256_loadu_ps(x + 48);
44     __m256 vxEF = _mm256_loadu_ps(x + 56);
45     x += 64;
46 
47     vx01 = _mm256_mul_ps(vx01, vscale);
48     vx23 = _mm256_mul_ps(vx23, vscale);
49     vx45 = _mm256_mul_ps(vx45, vscale);
50     vx67 = _mm256_mul_ps(vx67, vscale);
51     vx89 = _mm256_mul_ps(vx89, vscale);
52     vxAB = _mm256_mul_ps(vxAB, vscale);
53     vxCD = _mm256_mul_ps(vxCD, vscale);
54     vxEF = _mm256_mul_ps(vxEF, vscale);
55 
56     vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
57     vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
58     vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
59     vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
60     vx89 = _mm256_min_ps(vx89, voutput_max_less_zero_point);
61     vxAB = _mm256_min_ps(vxAB, voutput_max_less_zero_point);
62     vxCD = _mm256_min_ps(vxCD, voutput_max_less_zero_point);
63     vxEF = _mm256_min_ps(vxEF, voutput_max_less_zero_point);
64 
65     const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
66     const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
67     const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
68     const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
69     const __m256i vacc89 = _mm256_cvtps_epi32(vx89);
70     const __m256i vaccAB = _mm256_cvtps_epi32(vxAB);
71     const __m256i vaccCD = _mm256_cvtps_epi32(vxCD);
72     const __m256i vaccEF = _mm256_cvtps_epi32(vxEF);
73 
74     __m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
75     __m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
76     __m256i vacc8A9B = _mm256_packs_epi32(vacc89, vaccAB);
77     __m256i vaccCEDF = _mm256_packs_epi32(vaccCD, vaccEF);
78 
79     vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
80     vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
81     vacc8A9B = _mm256_adds_epi16(vacc8A9B, voutput_zero_point);
82     vaccCEDF = _mm256_adds_epi16(vaccCEDF, voutput_zero_point);
83 
84     const __m256i vy02461357 = _mm256_packus_epi16(vacc0213, vacc4657);
85     const __m256i vy8ACE9BDF = _mm256_packus_epi16(vacc8A9B, vaccCEDF);
86 
87     __m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
88     __m256i vy89ABCDEF = _mm256_permutevar8x32_epi32(vy8ACE9BDF, vshuffle_mask);
89 
90     vy01234567 = _mm256_max_epu8(vy01234567, voutput_min);
91     vy89ABCDEF = _mm256_max_epu8(vy89ABCDEF, voutput_min);
92 
93     _mm256_storeu_si256((__m256i*) y, vy01234567);
94     _mm256_storeu_si256((__m256i*) (y + 32), vy89ABCDEF);
95     y += 64;
96   }
97   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
98     __m256 vx = _mm256_loadu_ps(x);
99     vx = _mm256_mul_ps(vx, vscale);
100     vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
101     x += 8;
102 
103     const __m256i vacc = _mm256_cvtps_epi32(vx);
104 
105     __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
106     vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
107     vy = _mm_packus_epi16(vy, vy);
108     vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
109 
110     _mm_storel_epi64((__m128i*) y, vy);
111     y += 8;
112   }
113   if XNN_UNLIKELY(n != 0) {
114     assert(n >= 1 * sizeof(float));
115     assert(n <= 7 * sizeof(float));
116     const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2.mask_table[7] - n));
117 
118     __m256 vx = _mm256_maskload_ps(x, vmask);
119     vx = _mm256_mul_ps(vx, vscale);
120     vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
121 
122     const __m256i vacc = _mm256_cvtps_epi32(vx);
123 
124     __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
125     vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
126     vy = _mm_packus_epi16(vy, vy);
127     vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
128 
129     if (n & (4 * sizeof(float))) {
130       _mm_storeu_si32(y, vy);
131       y += 4;
132       vy = _mm_srli_epi64(vy, 32);
133     }
134     if (n & (2 * sizeof(float))) {
135       _mm_storeu_si16(y, vy);
136       y += 2;
137       vy = _mm_srli_epi32(vy, 16);
138     }
139     if (n & (1 * sizeof(float))) {
140       *y = (uint8_t) _mm_extract_epi8(vy, 0);
141     }
142   }
143 }
144