1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stddef.h>
12
13 #include <emmintrin.h>
14
15 #include <fp16/bitcasts.h>
16
17 #include <xnnpack/requantization-stubs.h>
18
19
xnn_qu8_requantize_precise__sse2(size_t n,const int32_t * input,float scale,uint8_t zero_point,uint8_t qmin,uint8_t qmax,uint8_t * output)20 void xnn_qu8_requantize_precise__sse2(
21 size_t n,
22 const int32_t* input,
23 float scale,
24 uint8_t zero_point,
25 uint8_t qmin,
26 uint8_t qmax,
27 uint8_t* output)
28 {
29 assert(n % 16 == 0);
30 assert(scale < 1.0f);
31 assert(scale >= 0x1.0p-32f);
32
33 const uint32_t scale_bits = fp32_to_bits(scale);
34 const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
35 const uint32_t shift = 127 + 23 - (scale_bits >> 23);
36 assert(shift >= 24);
37 assert(shift < 56);
38 const uint64_t rounding = UINT64_C(1) << (shift - 1);
39
40 const __m128i vmultiplier = _mm_set1_epi32(multiplier);
41 const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
42 const __m128i vqmin = _mm_set1_epi8((char) qmin);
43 const __m128i vqmax = _mm_set1_epi8((char) qmax);
44 const __m128i vshift = _mm_cvtsi32_si128((int) shift);
45 const __m128i vrounding = _mm_set1_epi64x(rounding);
46 for (; n != 0; n -= 16) {
47 const __m128i x = _mm_loadu_si128((const __m128i*) input);
48 const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
49 const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
50 const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
51 input += 16;
52
53 const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
54 const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
55 const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
56 const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
57
58 const __m128i x_abs0123 = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
59 const __m128i y_abs0123 = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
60 const __m128i z_abs0123 = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
61 const __m128i w_abs0123 = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
62
63 const __m128i x_abs1032 = _mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
64 const __m128i y_abs1032 = _mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
65 const __m128i z_abs1032 = _mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
66 const __m128i w_abs1032 = _mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
67
68 const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
69 const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
70 const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
71 const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
72
73 const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
74 const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
75 const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
76 const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
77
78 const __m128i x_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshift);
79 const __m128i x_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(x_absmul13, vrounding), vshift);
80 const __m128i y_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshift);
81 const __m128i y_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(y_absmul13, vrounding), vshift);
82 const __m128i z_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshift);
83 const __m128i z_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(z_absmul13, vrounding), vshift);
84 const __m128i w_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshift);
85 const __m128i w_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(w_absmul13, vrounding), vshift);
86
87 const __m128i x_abs_scaled0213 = _mm_castps_si128(
88 _mm_shuffle_ps(_mm_castsi128_ps(x_abs_scaled02), _mm_castsi128_ps(x_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
89 const __m128i y_abs_scaled0213 = _mm_castps_si128(
90 _mm_shuffle_ps(_mm_castsi128_ps(y_abs_scaled02), _mm_castsi128_ps(y_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
91 const __m128i z_abs_scaled0213 = _mm_castps_si128(
92 _mm_shuffle_ps(_mm_castsi128_ps(z_abs_scaled02), _mm_castsi128_ps(z_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
93 const __m128i w_abs_scaled0213 = _mm_castps_si128(
94 _mm_shuffle_ps(_mm_castsi128_ps(w_abs_scaled02), _mm_castsi128_ps(w_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
95
96 const __m128i x_abs_scaled = _mm_shuffle_epi32(x_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
97 const __m128i y_abs_scaled = _mm_shuffle_epi32(y_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
98 const __m128i z_abs_scaled = _mm_shuffle_epi32(z_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
99 const __m128i w_abs_scaled = _mm_shuffle_epi32(w_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
100
101 const __m128i x_scaled = _mm_sub_epi32(_mm_xor_si128(x_abs_scaled, x_neg_mask), x_neg_mask);
102 const __m128i y_scaled = _mm_sub_epi32(_mm_xor_si128(y_abs_scaled, y_neg_mask), y_neg_mask);
103 const __m128i z_scaled = _mm_sub_epi32(_mm_xor_si128(z_abs_scaled, z_neg_mask), z_neg_mask);
104 const __m128i w_scaled = _mm_sub_epi32(_mm_xor_si128(w_abs_scaled, w_neg_mask), w_neg_mask);
105
106 const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
107 const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
108 const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
109 const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
110
111 // 4x PXOR (setzero)
112 // 8x PSUBD
113 // 8x PXOR
114 // 8x PSHUFD
115 // 8x PMULUDQ
116 // 8x PSRLQ
117 // 8x PADDQ
118 // 4x SHUFPS
119 // 2x PACKSSDW
120 // 1x PACKUSWB
121 // 2x PADDW
122 // 1x PMAXUB
123 // 1x PMINUB
124 // ---------------------
125 // 63 instructions total
126
127 _mm_storeu_si128((__m128i*) output, xyzw_clamped);
128 output += 16;
129 }
130 }
131