1 // Auto-generated file. Do not edit!
2 // Template: src/f32-qs8-vcvt/avx2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f32_qu8_vcvt_ukernel__avx2_x48(size_t n,const float * x,uint8_t * y,const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_qu8_vcvt_ukernel__avx2_x48(
19 size_t n,
20 const float* x,
21 uint8_t* y,
22 const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24 assert(n != 0);
25 assert(n % sizeof(float) == 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const __m256 vscale = _mm256_load_ps(params->avx2.scale);
30 const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
31 const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
32 const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
33 const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
34
35 for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
36 __m256 vx01 = _mm256_loadu_ps(x);
37 __m256 vx23 = _mm256_loadu_ps(x + 8);
38 __m256 vx45 = _mm256_loadu_ps(x + 16);
39 __m256 vx67 = _mm256_loadu_ps(x + 24);
40 __m256 vx89 = _mm256_loadu_ps(x + 32);
41 __m256 vxAB = _mm256_loadu_ps(x + 40);
42 x += 48;
43
44 vx01 = _mm256_mul_ps(vx01, vscale);
45 vx23 = _mm256_mul_ps(vx23, vscale);
46 vx45 = _mm256_mul_ps(vx45, vscale);
47 vx67 = _mm256_mul_ps(vx67, vscale);
48 vx89 = _mm256_mul_ps(vx89, vscale);
49 vxAB = _mm256_mul_ps(vxAB, vscale);
50
51 vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
52 vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
53 vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
54 vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
55 vx89 = _mm256_min_ps(vx89, voutput_max_less_zero_point);
56 vxAB = _mm256_min_ps(vxAB, voutput_max_less_zero_point);
57
58 const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
59 const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
60 const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
61 const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
62 const __m256i vacc89 = _mm256_cvtps_epi32(vx89);
63 const __m256i vaccAB = _mm256_cvtps_epi32(vxAB);
64
65 __m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
66 __m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
67 __m256i vacc8A9B = _mm256_packs_epi32(vacc89, vaccAB);
68
69 vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
70 vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
71 vacc8A9B = _mm256_adds_epi16(vacc8A9B, voutput_zero_point);
72
73 const __m256i vy02461357 = _mm256_packus_epi16(vacc0213, vacc4657);
74 const __m128i vy8A9B = _mm_packus_epi16(_mm256_castsi256_si128(vacc8A9B), _mm256_extracti128_si256(vacc8A9B, 1));
75
76 __m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
77 __m128i vy89AB = _mm_shuffle_epi32(vy8A9B, _MM_SHUFFLE(3, 1, 2, 0));
78
79 vy01234567 = _mm256_max_epu8(vy01234567, voutput_min);
80 vy89AB = _mm_max_epu8(vy89AB, _mm256_castsi256_si128(voutput_min));
81
82 _mm256_storeu_si256((__m256i*) y, vy01234567);
83 _mm_storeu_si128((__m128i*) (y + 32), vy89AB);
84 y += 48;
85 }
86 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
87 __m256 vx = _mm256_loadu_ps(x);
88 vx = _mm256_mul_ps(vx, vscale);
89 vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
90 x += 8;
91
92 const __m256i vacc = _mm256_cvtps_epi32(vx);
93
94 __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
95 vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
96 vy = _mm_packus_epi16(vy, vy);
97 vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
98
99 _mm_storel_epi64((__m128i*) y, vy);
100 y += 8;
101 }
102 if XNN_UNLIKELY(n != 0) {
103 assert(n >= 1 * sizeof(float));
104 assert(n <= 7 * sizeof(float));
105 const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - n));
106
107 __m256 vx = _mm256_maskload_ps(x, vmask);
108 vx = _mm256_mul_ps(vx, vscale);
109 vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
110
111 const __m256i vacc = _mm256_cvtps_epi32(vx);
112
113 __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
114 vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
115 vy = _mm_packus_epi16(vy, vy);
116 vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
117
118 if (n & (4 * sizeof(float))) {
119 *((uint32_t*) y) = (uint32_t) _mm_cvtsi128_si32(vy);
120 y += 4;
121 vy = _mm_srli_epi64(vy, 32);
122 }
123 if (n & (2 * sizeof(float))) {
124 *((uint16_t*) y) = (uint16_t) _mm_extract_epi16(vy, 0);
125 y += 2;
126 vy = _mm_srli_epi32(vy, 16);
127 }
128 if (n & (1 * sizeof(float))) {
129 *y = (uint8_t) _mm_extract_epi8(vy, 0);
130 }
131 }
132 }
133