1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stddef.h>
12
13 #include <emmintrin.h>
14
15 #include <fp16/bitcasts.h>
16
17 #include <xnnpack/requantization-stubs.h>
18
19
xnn_qs8_requantize_gemmlowp__sse2(size_t n,const int32_t * input,float scale,int8_t zero_point,int8_t qmin,int8_t qmax,int8_t * output)20 void xnn_qs8_requantize_gemmlowp__sse2(
21 size_t n,
22 const int32_t* input,
23 float scale,
24 int8_t zero_point,
25 int8_t qmin,
26 int8_t qmax,
27 int8_t* output)
28 {
29 assert(n % 16 == 0);
30 assert(scale < 1.0f);
31 assert(scale >= 0x1.0p-32f);
32
33 // Compute requantization parameters.
34 const uint32_t scale_bits = fp32_to_bits(scale);
35
36 // Multiplier is in [0x40000000, 0x7FFFFF80] range.
37 const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
38 assert(multiplier >= INT32_C(0x40000000));
39 assert(multiplier <= INT32_C(0x7FFFFF80));
40
41 // Shift is in [0, 31] range.
42 const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
43 assert(shift >= 0);
44 assert(shift < 32);
45
46 const __m128i vmultiplier = _mm_set1_epi32(multiplier);
47 const __m128i vzero_point = _mm_set1_epi16((short) zero_point);
48 const __m128i vqmin = _mm_set1_epi16((short) qmin);
49 const __m128i vqmax = _mm_set1_epi16((short) qmax);
50 const __m128i vshift = _mm_cvtsi32_si128((int) shift);
51 const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
52 const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
53 const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
54 const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
55 for (; n != 0; n -= 16) {
56 const __m128i x = _mm_loadu_si128((const __m128i*) input);
57 const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
58 const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
59 const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
60 input += 16;
61
62 const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
63 const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
64 const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
65 const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
66
67 const __m128i x_abs = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
68 const __m128i y_abs = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
69 const __m128i z_abs = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
70 const __m128i w_abs = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
71
72 const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
73 const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
74 const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
75 const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
76
77 const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
78 const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
79 const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
80 const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
81
82 const __m128i x_neg_mask_even = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
83 const __m128i y_neg_mask_even = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
84 const __m128i z_neg_mask_even = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
85 const __m128i w_neg_mask_even = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
86
87 const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
88 const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
89 const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
90 const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
91
92 const __m128i x_rounded_product_even = _mm_add_epi64(x_product_even, vq31rounding);
93 const __m128i y_rounded_product_even = _mm_add_epi64(y_product_even, vq31rounding);
94 const __m128i z_rounded_product_even = _mm_add_epi64(z_product_even, vq31rounding);
95 const __m128i w_rounded_product_even = _mm_add_epi64(w_product_even, vq31rounding);
96
97 const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
98 const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
99 const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
100 const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
101
102 const __m128i x_neg_mask_odd = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
103 const __m128i y_neg_mask_odd = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
104 const __m128i z_neg_mask_odd = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
105 const __m128i w_neg_mask_odd = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
106
107 const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
108 const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
109 const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
110 const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
111
112 const __m128i x_rounded_product_odd = _mm_add_epi64(x_product_odd, vq31rounding);
113 const __m128i y_rounded_product_odd = _mm_add_epi64(y_product_odd, vq31rounding);
114 const __m128i z_rounded_product_odd = _mm_add_epi64(z_product_odd, vq31rounding);
115 const __m128i w_rounded_product_odd = _mm_add_epi64(w_product_odd, vq31rounding);
116
117 const __m128i x_q31product_even = _mm_srli_epi64(x_rounded_product_even, 31);
118 const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
119 const __m128i y_q31product_even = _mm_srli_epi64(y_rounded_product_even, 31);
120 const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
121 const __m128i z_q31product_even = _mm_srli_epi64(z_rounded_product_even, 31);
122 const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
123 const __m128i w_q31product_even = _mm_srli_epi64(w_rounded_product_even, 31);
124 const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
125
126 const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
127 _mm_castsi128_ps(x_q31product_even), _mm_castsi128_ps(x_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
128 const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
129 _mm_castsi128_ps(y_q31product_even), _mm_castsi128_ps(y_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
130 const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
131 _mm_castsi128_ps(z_q31product_even), _mm_castsi128_ps(z_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
132 const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
133 _mm_castsi128_ps(w_q31product_even), _mm_castsi128_ps(w_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
134
135 const __m128i x_q31product = _mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
136 const __m128i y_q31product = _mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
137 const __m128i z_q31product = _mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
138 const __m128i w_q31product = _mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
139
140 const __m128i x_remainder =
141 _mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
142 const __m128i y_remainder =
143 _mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
144 const __m128i z_remainder =
145 _mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
146 const __m128i w_remainder =
147 _mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
148
149 const __m128i x_scaled =
150 _mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
151 const __m128i y_scaled =
152 _mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
153 const __m128i z_scaled =
154 _mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
155 const __m128i w_scaled =
156 _mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
157
158 const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
159 const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
160 const __m128i xy_clamped = _mm_max_epi16(_mm_min_epi16(xy_packed, vqmax), vqmin);
161 const __m128i zw_clamped = _mm_max_epi16(_mm_min_epi16(zw_packed, vqmax), vqmin);
162 const __m128i xyzw_clamped = _mm_packs_epi16(xy_clamped, zw_clamped);
163
164 // 16x PSHUFD
165 // 4x SHUFPS
166 // 8x PMULUDQ
167 // 8x PXOR (setzero)
168 // 12x PXOR
169 // 4x PAND
170 // 8x PADDQ
171 // 4x PADDD
172 // 8x PSUBQ
173 // 8x PSUBD
174 // 8x PSRLQ (immediate)
175 // 4x PSRAD (register)
176 // 12x PCMPGTD
177 // 2x PACKSSDW
178 // 2x PADDSW
179 // 2x PMAXSW
180 // 2x PMINSW
181 // 1x PACKSSWB
182 // ---------------------
183 // 113 instructions total
184
185 _mm_storeu_si128((__m128i*) output, xyzw_clamped);
186 output += 16;
187 }
188 }
189