• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 Google Inc. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // fixedpoint_SSE.h: optimized SSE specializations of the templates
16 // in fixedpoint.h.
17 
18 #ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_
19 #define GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_
20 
21 #include <smmintrin.h>
22 #include "fixedpoint.h"
23 
24 namespace gemmlowp {
25 
26 // SSE intrinsics are not finely typed: there is a single __m128i vector
27 // type that does not distinguish between "int32x4" and "int16x8" use
28 // cases, unlike the NEON equivalents. Because we had initially focused
29 // on int32x4, we did not pay attention and specialized these fixedpoint
30 // templates directly for __m128i hardcoding the int32x4 semantics,
31 // not leaving room for int16x8 semantics. Amending that by adding a separate
32 // data type, int16x8_m128i, that wraps __m128i while being a separate
33 // type.
34 struct int16x8_m128i {
35   __m128i v;
36 };
37 
38 // Keep int16x8_m128i trivially constructible/destructible and provide
39 // easily optimized helper function.
to_int16x8_m128i(__m128i w)40 inline int16x8_m128i to_int16x8_m128i(__m128i w) {
41   int16x8_m128i r;
42   r.v = w;
43   return r;
44 }
45 
46 template <>
47 struct FixedPointRawTypeTraits<__m128i> {
48   typedef std::int32_t ScalarRawType;
49   static constexpr int kLanes = 4;
50 };
51 
52 template <>
53 struct FixedPointRawTypeTraits<int16x8_m128i> {
54   typedef std::int16_t ScalarRawType;
55   static constexpr int kLanes = 8;
56 };
57 
58 template <>
59 inline __m128i BitAnd(__m128i a, __m128i b) {
60   return _mm_and_si128(a, b);
61 }
62 
63 template <>
64 inline int16x8_m128i BitAnd(int16x8_m128i a, int16x8_m128i b) {
65   return to_int16x8_m128i(_mm_and_si128(a.v, b.v));
66 }
67 
68 template <>
69 inline __m128i BitOr(__m128i a, __m128i b) {
70   return _mm_or_si128(a, b);
71 }
72 
73 template <>
74 inline int16x8_m128i BitOr(int16x8_m128i a, int16x8_m128i b) {
75   return to_int16x8_m128i(_mm_or_si128(a.v, b.v));
76 }
77 
78 template <>
79 inline __m128i BitXor(__m128i a, __m128i b) {
80   return _mm_xor_si128(a, b);
81 }
82 
83 template <>
84 inline int16x8_m128i BitXor(int16x8_m128i a, int16x8_m128i b) {
85   return to_int16x8_m128i(_mm_xor_si128(a.v, b.v));
86 }
87 
88 template <>
89 inline __m128i BitNot(__m128i a) {
90   return _mm_andnot_si128(a, _mm_set1_epi32(-1));
91 }
92 
93 template <>
94 inline int16x8_m128i BitNot(int16x8_m128i a) {
95   return to_int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1)));
96 }
97 
98 template <>
99 inline __m128i Add(__m128i a, __m128i b) {
100   return _mm_add_epi32(a, b);
101 }
102 
103 template <>
104 inline int16x8_m128i Add(int16x8_m128i a, int16x8_m128i b) {
105   return to_int16x8_m128i(_mm_add_epi16(a.v, b.v));
106 }
107 
108 template <>
109 inline __m128i Mul(__m128i a, __m128i b) {
110   return _mm_mullo_epi32(a, b);
111 }
112 
113 template <>
114 inline int16x8_m128i Mul(int16x8_m128i a, int16x8_m128i b) {
115   return to_int16x8_m128i(_mm_mullo_epi16(a.v, b.v));
116 }
117 
118 template <>
119 inline __m128i Sub(__m128i a, __m128i b) {
120   return _mm_sub_epi32(a, b);
121 }
122 
123 template <>
124 inline int16x8_m128i Sub(int16x8_m128i a, int16x8_m128i b) {
125   return to_int16x8_m128i(_mm_sub_epi16(a.v, b.v));
126 }
127 
128 template <>
129 inline __m128i Neg(__m128i a) {
130   return _mm_sign_epi32(a, _mm_set1_epi32(-1));
131 }
132 
133 template <>
134 inline int16x8_m128i Neg(int16x8_m128i a) {
135   return to_int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1)));
136 }
137 
138 template <>
139 inline __m128i ShiftLeft(__m128i a, int offset) {
140   return _mm_slli_epi32(a, offset);
141 }
142 
143 template <>
144 inline int16x8_m128i ShiftLeft(int16x8_m128i a, int offset) {
145   return to_int16x8_m128i(_mm_slli_epi16(a.v, offset));
146 }
147 
148 template <>
149 inline __m128i ShiftRight(__m128i a, int offset) {
150   return _mm_srai_epi32(a, offset);
151 }
152 
153 template <>
154 inline int16x8_m128i ShiftRight(int16x8_m128i a, int offset) {
155   return to_int16x8_m128i(_mm_srai_epi16(a.v, offset));
156 }
157 
158 template <>
159 inline __m128i SelectUsingMask(__m128i if_mask, __m128i then_val,
160                                __m128i else_val) {
161   // borrowed from Intel's arm_neon_sse.h header.
162   return _mm_or_si128(_mm_and_si128(if_mask, then_val),
163                       _mm_andnot_si128(if_mask, else_val));
164 }
165 
166 template <>
167 inline int16x8_m128i SelectUsingMask(int16x8_m128i if_mask,
168                                      int16x8_m128i then_val,
169                                      int16x8_m128i else_val) {
170   // borrowed from Intel's arm_neon_sse.h header.
171   return to_int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v));
172 }
173 
174 template <>
175 inline __m128i MaskIfEqual(__m128i a, __m128i b) {
176   return _mm_cmpeq_epi32(a, b);
177 }
178 
179 template <>
180 inline int16x8_m128i MaskIfEqual(int16x8_m128i a, int16x8_m128i b) {
181   return to_int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v));
182 }
183 
184 template <>
185 inline __m128i MaskIfNotEqual(__m128i a, __m128i b) {
186   return BitNot(MaskIfEqual(a, b));
187 }
188 
189 template <>
190 inline int16x8_m128i MaskIfNotEqual(int16x8_m128i a, int16x8_m128i b) {
191   return BitNot(MaskIfEqual(a, b));
192 }
193 
194 template <>
195 inline __m128i MaskIfZero(__m128i a) {
196   return MaskIfEqual(a, _mm_set1_epi32(0));
197 }
198 
199 template <>
200 inline int16x8_m128i MaskIfZero(int16x8_m128i a) {
201   return MaskIfEqual(a, to_int16x8_m128i(_mm_set1_epi16(0)));
202 }
203 
204 template <>
205 inline __m128i MaskIfNonZero(__m128i a) {
206   return MaskIfNotEqual(a, _mm_set1_epi32(0));
207 }
208 
209 template <>
210 inline int16x8_m128i MaskIfNonZero(int16x8_m128i a) {
211   return MaskIfNotEqual(a, to_int16x8_m128i(_mm_set1_epi16(0)));
212 }
213 
214 template <>
215 inline __m128i MaskIfGreaterThan(__m128i a, __m128i b) {
216   return _mm_cmpgt_epi32(a, b);
217 }
218 
219 template <>
220 inline int16x8_m128i MaskIfGreaterThan(int16x8_m128i a, int16x8_m128i b) {
221   return to_int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v));
222 }
223 
224 template <>
225 inline __m128i MaskIfLessThan(__m128i a, __m128i b) {
226   return _mm_cmplt_epi32(a, b);
227 }
228 
229 template <>
230 inline int16x8_m128i MaskIfLessThan(int16x8_m128i a, int16x8_m128i b) {
231   return to_int16x8_m128i(_mm_cmplt_epi16(a.v, b.v));
232 }
233 
234 template <>
235 inline __m128i MaskIfGreaterThanOrEqual(__m128i a, __m128i b) {
236   return BitNot(MaskIfLessThan(a, b));
237 }
238 
239 template <>
240 inline int16x8_m128i MaskIfGreaterThanOrEqual(int16x8_m128i a,
241                                               int16x8_m128i b) {
242   return BitNot(MaskIfLessThan(a, b));
243 }
244 
245 template <>
246 inline __m128i MaskIfLessThanOrEqual(__m128i a, __m128i b) {
247   return BitNot(MaskIfGreaterThan(a, b));
248 }
249 
250 template <>
251 inline int16x8_m128i MaskIfLessThanOrEqual(int16x8_m128i a, int16x8_m128i b) {
252   return BitNot(MaskIfGreaterThan(a, b));
253 }
254 
255 /* Assumptions:
256    - All and Any are used on masks.
257    - masks are all_ones for true lanes, all_zeroes otherwise.
258 Hence, All means all 128bits set, and Any means any bit set.
259 */
260 
261 template <>
262 inline bool All(__m128i a) {
263   return _mm_testc_si128(a, a);
264 }
265 
266 template <>
267 inline bool All(int16x8_m128i a) {
268   return _mm_testc_si128(a.v, a.v);
269 }
270 
271 template <>
272 inline bool Any(__m128i a) {
273   return !_mm_testz_si128(a, a);
274 }
275 
276 template <>
277 inline bool Any(int16x8_m128i a) {
278   return !_mm_testz_si128(a.v, a.v);
279 }
280 
281 template <>
282 inline __m128i RoundingHalfSum(__m128i a, __m128i b) {
283   /* __m128i round_bit_mask, a_over_2, b_over_2, round_bit, sum; */
284   /* We divide the inputs before the add to avoid the overflow and costly test
285    */
286   /* of checking if an overflow occured on signed add */
287   /* round_bit_mask = _mm_set1_epi32(1); */
288   /* a_over_2 = _mm_srai_epi32(a, 1); */
289   /* b_over_2 = _mm_srai_epi32(b, 1); */
290   /* sum = Add(a_over_2, b_over_2); */
291   /* round_bit = _mm_sign_epi32(BitAnd(BitOr(a,b), round_bit_mask), sum); */
292   /* return Add(sum, round_bit); */
293 
294   /* Other possibility detecting overflow and xor the sign if an overflow
295    * happened*/
296   __m128i one, sign_bit_mask, sum, rounded_half_sum, overflow, result;
297   one = _mm_set1_epi32(1);
298   sign_bit_mask = _mm_set1_epi32(0x80000000);
299   sum = Add(a, b);
300   rounded_half_sum = _mm_srai_epi32(Add(sum, one), 1);
301   overflow =
302       BitAnd(BitAnd(BitXor(a, rounded_half_sum), BitXor(b, rounded_half_sum)),
303              sign_bit_mask);
304   result = BitXor(rounded_half_sum, overflow);
305   return result;
306 }
307 
308 template <>
309 inline int16x8_m128i RoundingHalfSum(int16x8_m128i a, int16x8_m128i b) {
310   // Idea: go to unsigned to use _mm_avg_epu16,
311   // borrowed from Intel's arm_neon_sse.h header.
312   __m128i constant_neg_32768 = _mm_set1_epi16(-32768);
313   __m128i a_unsigned = _mm_sub_epi16(a.v, constant_neg_32768);
314   __m128i b_unsigned = _mm_sub_epi16(b.v, constant_neg_32768);
315   __m128i avg_unsigned = _mm_avg_epu16(a_unsigned, b_unsigned);
316   __m128i avg = _mm_add_epi16(avg_unsigned, constant_neg_32768);
317   return to_int16x8_m128i(avg);
318 }
319 
320 template <>
321 inline __m128i SaturatingRoundingDoublingHighMul(__m128i a, __m128i b) {
322   __m128i min, saturation_mask, a0_a2, a1_a3, b0_b2, b1_b3;
323   __m128i a0b0_a2b2, a1b1_a3b3, a0b0_a2b2_rounded, a1b1_a3b3_rounded;
324   __m128i a0b0_a2b2_rounded_2x, a1b1_a3b3_rounded_2x, result;
325   __m128i nudge;
326 
327   // saturation only happen if a == b == INT_MIN
328   min = _mm_set1_epi32(std::numeric_limits<std::int32_t>::min());
329   saturation_mask = BitAnd(MaskIfEqual(a, b), MaskIfEqual(a, min));
330 
331   // a = a0 | a1 | a2 | a3
332   // b = b0 | b1 | b2 | b3
333   a0_a2 = a;
334   a1_a3 = _mm_srli_si128(a, 4);
335   b0_b2 = b;
336   b1_b3 = _mm_srli_si128(b, 4);
337 
338   a0b0_a2b2 = _mm_mul_epi32(a0_a2, b0_b2);
339   a1b1_a3b3 = _mm_mul_epi32(a1_a3, b1_b3);
340 
341   // do the rounding and take into account that it will be doubled
342   nudge = _mm_set1_epi64x(1 << 30);
343   a0b0_a2b2_rounded = _mm_add_epi64(a0b0_a2b2, nudge);
344   a1b1_a3b3_rounded = _mm_add_epi64(a1b1_a3b3, nudge);
345 
346   // do the doubling
347   a0b0_a2b2_rounded_2x = _mm_slli_epi64(a0b0_a2b2_rounded, 1);
348   a1b1_a3b3_rounded_2x = _mm_slli_epi64(a1b1_a3b3_rounded, 1);
349 
350   // get the high part of the products
351   result = _mm_blend_epi16(_mm_srli_si128(a0b0_a2b2_rounded_2x, 4),
352                            a1b1_a3b3_rounded_2x, 0xcc);
353 
354   // saturate those which overflowed
355   return SelectUsingMask(saturation_mask, min, result);
356 }
357 
358 template <>
359 inline int16x8_m128i SaturatingRoundingDoublingHighMul(int16x8_m128i a,
360                                                        int16x8_m128i b) {
361   // Idea: use _mm_mulhrs_epi16 then saturate with a bit-operation,
362   // borrowed from Intel's arm_neon_sse.h header.
363   __m128i result_unsaturated = _mm_mulhrs_epi16(a.v, b.v);
364   __m128i saturation_mask =
365       _mm_cmpeq_epi16(result_unsaturated, _mm_set1_epi16(0x8000));
366   __m128i result = _mm_xor_si128(result_unsaturated, saturation_mask);
367   return to_int16x8_m128i(result);
368 }
369 
370 template <>
371 inline __m128i Dup<__m128i>(std::int32_t x) {
372   return _mm_set1_epi32(x);
373 }
374 
375 template <>
376 inline int16x8_m128i Dup<int16x8_m128i>(std::int16_t x) {
377   return to_int16x8_m128i(_mm_set1_epi16(x));
378 }
379 
380 // So far this is only needed for int16.
381 template <>
382 inline int16x8_m128i SaturatingAdd(int16x8_m128i a, int16x8_m128i b) {
383   return to_int16x8_m128i(_mm_adds_epi16(a.v, b.v));
384 }
385 
386 }  // end namespace gemmlowp
387 
388 #endif  // GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_
389