1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AOM_DSP_X86_SYNONYMS_H_
13 #define AOM_AOM_DSP_X86_SYNONYMS_H_
14
15 #include <immintrin.h>
16 #include <string.h>
17
18 #include "config/aom_config.h"
19
20 #include "aom/aom_integer.h"
21
22 /**
23 * Various reusable shorthands for x86 SIMD intrinsics.
24 *
25 * Intrinsics prefixed with xx_ operate on or return 128bit XMM registers.
26 * Intrinsics prefixed with yy_ operate on or return 256bit YMM registers.
27 */
28
29 // Loads and stores to do away with the tedium of casting the address
30 // to the right type.
xx_loadl_32(const void * a)31 static INLINE __m128i xx_loadl_32(const void *a) {
32 int val;
33 memcpy(&val, a, sizeof(val));
34 return _mm_cvtsi32_si128(val);
35 }
36
xx_loadl_64(const void * a)37 static INLINE __m128i xx_loadl_64(const void *a) {
38 return _mm_loadl_epi64((const __m128i *)a);
39 }
40
xx_load_128(const void * a)41 static INLINE __m128i xx_load_128(const void *a) {
42 return _mm_load_si128((const __m128i *)a);
43 }
44
xx_loadu_128(const void * a)45 static INLINE __m128i xx_loadu_128(const void *a) {
46 return _mm_loadu_si128((const __m128i *)a);
47 }
48
49 // Load 64 bits from each of hi and low, and pack into an SSE register
50 // Since directly loading as `int64_t`s and using _mm_set_epi64 may violate
51 // the strict aliasing rule, this takes a different approach
xx_loadu_2x64(const void * hi,const void * lo)52 static INLINE __m128i xx_loadu_2x64(const void *hi, const void *lo) {
53 return _mm_unpacklo_epi64(_mm_loadu_si64(lo), _mm_loadu_si64(hi));
54 }
55
xx_storel_32(void * const a,const __m128i v)56 static INLINE void xx_storel_32(void *const a, const __m128i v) {
57 const int val = _mm_cvtsi128_si32(v);
58 memcpy(a, &val, sizeof(val));
59 }
60
xx_storel_64(void * const a,const __m128i v)61 static INLINE void xx_storel_64(void *const a, const __m128i v) {
62 _mm_storel_epi64((__m128i *)a, v);
63 }
64
xx_store_128(void * const a,const __m128i v)65 static INLINE void xx_store_128(void *const a, const __m128i v) {
66 _mm_store_si128((__m128i *)a, v);
67 }
68
xx_storeu_128(void * const a,const __m128i v)69 static INLINE void xx_storeu_128(void *const a, const __m128i v) {
70 _mm_storeu_si128((__m128i *)a, v);
71 }
72
73 // The _mm_set_epi64x() intrinsic is undefined for some Visual Studio
74 // compilers. The following function is equivalent to _mm_set_epi64x()
75 // acting on 32-bit integers.
xx_set_64_from_32i(int32_t e1,int32_t e0)76 static INLINE __m128i xx_set_64_from_32i(int32_t e1, int32_t e0) {
77 #if defined(_MSC_VER) && _MSC_VER < 1900
78 return _mm_set_epi32(0, e1, 0, e0);
79 #else
80 return _mm_set_epi64x((uint32_t)e1, (uint32_t)e0);
81 #endif
82 }
83
84 // The _mm_set1_epi64x() intrinsic is undefined for some Visual Studio
85 // compilers. The following function is equivalent to _mm_set1_epi64x()
86 // acting on a 32-bit integer.
xx_set1_64_from_32i(int32_t a)87 static INLINE __m128i xx_set1_64_from_32i(int32_t a) {
88 #if defined(_MSC_VER) && _MSC_VER < 1900
89 return _mm_set_epi32(0, a, 0, a);
90 #else
91 return _mm_set1_epi64x((uint32_t)a);
92 #endif
93 }
94
95 // Fill an SSE register using an interleaved pair of values, ie. set the
96 // 8 channels to {a, b, a, b, a, b, a, b}, using the same channel ordering
97 // as when a register is stored to / loaded from memory.
98 //
99 // This is useful for rearranging filter kernels for use with the _mm_madd_epi16
100 // instruction
xx_set2_epi16(int16_t a,int16_t b)101 static INLINE __m128i xx_set2_epi16(int16_t a, int16_t b) {
102 return _mm_setr_epi16(a, b, a, b, a, b, a, b);
103 }
104
xx_round_epu16(__m128i v_val_w)105 static INLINE __m128i xx_round_epu16(__m128i v_val_w) {
106 return _mm_avg_epu16(v_val_w, _mm_setzero_si128());
107 }
108
xx_roundn_epu16(__m128i v_val_w,int bits)109 static INLINE __m128i xx_roundn_epu16(__m128i v_val_w, int bits) {
110 const __m128i v_s_w = _mm_srli_epi16(v_val_w, bits - 1);
111 return _mm_avg_epu16(v_s_w, _mm_setzero_si128());
112 }
113
xx_roundn_epu32(__m128i v_val_d,int bits)114 static INLINE __m128i xx_roundn_epu32(__m128i v_val_d, int bits) {
115 const __m128i v_bias_d = _mm_set1_epi32((1 << bits) >> 1);
116 const __m128i v_tmp_d = _mm_add_epi32(v_val_d, v_bias_d);
117 return _mm_srli_epi32(v_tmp_d, bits);
118 }
119
xx_roundn_epi16_unsigned(__m128i v_val_d,int bits)120 static INLINE __m128i xx_roundn_epi16_unsigned(__m128i v_val_d, int bits) {
121 const __m128i v_bias_d = _mm_set1_epi16((1 << bits) >> 1);
122 const __m128i v_tmp_d = _mm_add_epi16(v_val_d, v_bias_d);
123 return _mm_srai_epi16(v_tmp_d, bits);
124 }
125
126 // This is equivalent to ROUND_POWER_OF_TWO(v_val_d, bits)
xx_roundn_epi32_unsigned(__m128i v_val_d,int bits)127 static INLINE __m128i xx_roundn_epi32_unsigned(__m128i v_val_d, int bits) {
128 const __m128i v_bias_d = _mm_set1_epi32((1 << bits) >> 1);
129 const __m128i v_tmp_d = _mm_add_epi32(v_val_d, v_bias_d);
130 return _mm_srai_epi32(v_tmp_d, bits);
131 }
132
xx_roundn_epi16(__m128i v_val_d,int bits)133 static INLINE __m128i xx_roundn_epi16(__m128i v_val_d, int bits) {
134 const __m128i v_bias_d = _mm_set1_epi16((1 << bits) >> 1);
135 const __m128i v_sign_d = _mm_srai_epi16(v_val_d, 15);
136 const __m128i v_tmp_d =
137 _mm_add_epi16(_mm_add_epi16(v_val_d, v_bias_d), v_sign_d);
138 return _mm_srai_epi16(v_tmp_d, bits);
139 }
140
141 #endif // AOM_AOM_DSP_X86_SYNONYMS_H_
142