• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #pragma once
10 
11 #include <limits.h>
12 
13 #include <immintrin.h>
14 
15 // The code below is adapted from Google's gemmlowp library.
16 // It is only used in XNNPACK unit tests and comparative benchmarks,
17 // but not the library itself.
18 //
19 // Copyright 2015 Google Inc. All Rights Reserved.
20 //
21 // Licensed under the Apache License, Version 2.0 (the "License");
22 // you may not use this file except in compliance with the License.
23 // You may obtain a copy of the License at
24 //
25 //     http://www.apache.org/licenses/LICENSE-2.0
26 //
27 // Unless required by applicable law or agreed to in writing, software
28 // distributed under the License is distributed on an "AS IS" BASIS,
29 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
30 // See the License for the specific language governing permissions and
31 // limitations under the License.
32 
gemmlowp_sse_rdivbypo2_s32(__m128i x,int exponent)33 static inline __m128i gemmlowp_sse_rdivbypo2_s32(__m128i x, int exponent) {
34   const __m128i mask = _mm_set1_epi32((int32_t) ((UINT64_C(1) << exponent) - UINT64_C(1)));
35   const __m128i remainder = _mm_and_si128(x, mask);
36   const __m128i threshold = _mm_sub_epi32(
37       _mm_srli_epi32(mask, 1), _mm_cmplt_epi32(x, _mm_setzero_si128()));
38   return _mm_sub_epi32(
39       _mm_sra_epi32(x, _mm_cvtsi32_si128(exponent)),
40       _mm_cmpgt_epi32(remainder, threshold));
41 }
42 
gemmlowp_sse_mul_s32(__m128i a,__m128i b)43 static inline __m128i gemmlowp_sse_mul_s32(__m128i a, __m128i b) {
44 #ifdef __SSE4_1__
45   return _mm_mul_epi32(a, b);
46 #else
47   __m128i sign, zero, mul_us, a_neg, b_neg, mul_us_neg;
48   sign = _mm_xor_si128(a, b);
49   sign = _mm_srai_epi32(sign, 31); // promote sign bit to all fields, all fff if
50                                    // negative and all 0 if positive
51   sign = _mm_shuffle_epi32(
52       sign,
53       _MM_SHUFFLE(2, 2, 0, 0)); // promote sign bit to 3 and 1st data lanes
54   zero = _mm_setzero_si128();
55 #ifdef __SSSE3__
56   a_neg = _mm_abs_epi32(a); // negate a and b
57   b_neg = _mm_abs_epi32(b); // negate a and b
58 #else  // pre-SSSE3
59   const __m128i a_neg_mask = _mm_cmplt_epi32(a, zero);
60   a_neg = _mm_sub_epi32(_mm_xor_si128(a, a_neg_mask), a_neg_mask);
61   const __m128i b_neg_mask = _mm_cmplt_epi32(b, zero);
62   b_neg = _mm_sub_epi32(_mm_xor_si128(b, b_neg_mask), b_neg_mask);
63 #endif  // pre-SSSE3
64   mul_us = _mm_mul_epu32(a_neg, b_neg); // uses 0 and 2nd data lanes, (abs), the
65                                         // multiplication gives 64 bit result
66   mul_us_neg = _mm_sub_epi64(zero, mul_us);
67   mul_us_neg = _mm_and_si128(sign, mul_us_neg);
68   mul_us = _mm_andnot_si128(sign, mul_us);
69   return _mm_or_si128(mul_us, mul_us_neg);
70 #endif
71 }
72 
gemmlowp_sse_vqrdmulh_s32(__m128i a,__m128i b)73 static inline __m128i gemmlowp_sse_vqrdmulh_s32(__m128i a, __m128i b) {
74   // saturation only happen if a == b == INT32_MIN
75   const __m128i min = _mm_set1_epi32(INT32_MIN);
76   const __m128i saturation_mask =
77       _mm_and_si128(_mm_cmpeq_epi32(a, b), _mm_cmpeq_epi32(a, min));
78 
79   // a = a0 | a1 | a2 | a3
80   // b = b0 | b1 | b2 | b3
81   const __m128i a0_a2 = a;
82   const __m128i a1_a3 = _mm_srli_si128(a, 4);
83   const __m128i b0_b2 = b;
84   const __m128i b1_b3 = _mm_srli_si128(b, 4);
85 
86   const __m128i a0b0_a2b2 = gemmlowp_sse_mul_s32(a0_a2, b0_b2);
87   const __m128i a1b1_a3b3 = gemmlowp_sse_mul_s32(a1_a3, b1_b3);
88 
89   // do the rounding and take into account that it will be doubled
90   const __m128i nudge = _mm_set1_epi64x(1 << 30);
91   const __m128i a0b0_a2b2_rounded = _mm_add_epi64(a0b0_a2b2, nudge);
92   const __m128i a1b1_a3b3_rounded = _mm_add_epi64(a1b1_a3b3, nudge);
93 
94   // do the doubling
95   const __m128i a0b0_a2b2_rounded_2x = _mm_slli_epi64(a0b0_a2b2_rounded, 1);
96   const __m128i a1b1_a3b3_rounded_2x = _mm_slli_epi64(a1b1_a3b3_rounded, 1);
97 
98 // get the high part of the products
99 #ifdef __SSE4_1__
100   const __m128i result = _mm_blend_epi16(
101       _mm_srli_epi64(a0b0_a2b2_rounded_2x, 32), a1b1_a3b3_rounded_2x, 0xCC);
102 #else
103   const __m128i result0213 = _mm_castps_si128(_mm_shuffle_ps(
104       _mm_castsi128_ps(a0b0_a2b2_rounded_2x),
105       _mm_castsi128_ps(a1b1_a3b3_rounded_2x),
106       _MM_SHUFFLE(3, 1, 3, 1)));
107   const __m128i result = _mm_shuffle_epi32(result0213, _MM_SHUFFLE(3, 1, 2, 0));
108 #endif
109 
110 // saturate those which overflowed
111 #ifdef __SSE4_1__
112   const __m128i saturated_result = _mm_blendv_epi8(result, min, saturation_mask);
113 #else
114   const __m128i saturated_result = _mm_or_si128(
115       _mm_and_si128(saturation_mask, min),
116       _mm_andnot_si128(saturation_mask, result));
117 #endif
118   return saturated_result;
119 }
120