1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AOM_DSP_SIMD_V256_INTRINSICS_X86_H_
13 #define AOM_AOM_DSP_SIMD_V256_INTRINSICS_X86_H_
14
15 #if !defined(__AVX2__)
16
17 #include "aom_dsp/simd/v256_intrinsics_v128.h"
18
19 #else
20
21 // The _m256i type seems to cause problems for g++'s mangling prior to
22 // version 5, but adding -fabi-version=0 fixes this.
23 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 5 && \
24 defined(__AVX2__) && defined(__cplusplus)
25 #pragma GCC optimize "-fabi-version=0"
26 #endif
27
28 #include <immintrin.h>
29
30 #include "aom_dsp/simd/v128_intrinsics_x86.h"
31
32 typedef __m256i v256;
33
v256_low_u32(v256 a)34 SIMD_INLINE uint32_t v256_low_u32(v256 a) {
35 return (uint32_t)_mm_cvtsi128_si32(_mm256_extracti128_si256(a, 0));
36 }
37
v256_low_v64(v256 a)38 SIMD_INLINE v64 v256_low_v64(v256 a) {
39 return _mm_unpacklo_epi64(_mm256_extracti128_si256(a, 0), v64_zero());
40 }
41
v256_low_u64(v256 a)42 SIMD_INLINE uint64_t v256_low_u64(v256 a) { return v64_u64(v256_low_v64(a)); }
43
v256_low_v128(v256 a)44 SIMD_INLINE v128 v256_low_v128(v256 a) { return _mm256_castsi256_si128(a); }
45
v256_high_v128(v256 a)46 SIMD_INLINE v128 v256_high_v128(v256 a) {
47 return _mm256_extracti128_si256(a, 1);
48 }
49
v256_from_v128(v128 a,v128 b)50 SIMD_INLINE v256 v256_from_v128(v128 a, v128 b) {
51 // gcc seems to be missing _mm256_set_m128i()
52 return _mm256_inserti128_si256(_mm256_castsi128_si256(b), a, 1);
53 }
54
v256_from_v64(v64 a,v64 b,v64 c,v64 d)55 SIMD_INLINE v256 v256_from_v64(v64 a, v64 b, v64 c, v64 d) {
56 return v256_from_v128(v128_from_v64(a, b), v128_from_v64(c, d));
57 }
58
v256_from_64(uint64_t a,uint64_t b,uint64_t c,uint64_t d)59 SIMD_INLINE v256 v256_from_64(uint64_t a, uint64_t b, uint64_t c, uint64_t d) {
60 return v256_from_v128(v128_from_64(a, b), v128_from_64(c, d));
61 }
62
v256_load_aligned(const void * p)63 SIMD_INLINE v256 v256_load_aligned(const void *p) {
64 return _mm256_load_si256((const __m256i *)p);
65 }
66
v256_load_unaligned(const void * p)67 SIMD_INLINE v256 v256_load_unaligned(const void *p) {
68 return _mm256_loadu_si256((const __m256i *)p);
69 }
70
v256_store_aligned(void * p,v256 a)71 SIMD_INLINE void v256_store_aligned(void *p, v256 a) {
72 _mm256_store_si256((__m256i *)p, a);
73 }
74
v256_store_unaligned(void * p,v256 a)75 SIMD_INLINE void v256_store_unaligned(void *p, v256 a) {
76 _mm256_storeu_si256((__m256i *)p, a);
77 }
78
v256_zero()79 SIMD_INLINE v256 v256_zero() { return _mm256_setzero_si256(); }
80
v256_dup_8(uint8_t x)81 SIMD_INLINE v256 v256_dup_8(uint8_t x) { return _mm256_set1_epi8(x); }
82
v256_dup_16(uint16_t x)83 SIMD_INLINE v256 v256_dup_16(uint16_t x) { return _mm256_set1_epi16(x); }
84
v256_dup_32(uint32_t x)85 SIMD_INLINE v256 v256_dup_32(uint32_t x) { return _mm256_set1_epi32(x); }
86
v256_dup_64(uint64_t x)87 SIMD_INLINE v256 v256_dup_64(uint64_t x) { return _mm256_set1_epi64x(x); }
88
v256_add_8(v256 a,v256 b)89 SIMD_INLINE v256 v256_add_8(v256 a, v256 b) { return _mm256_add_epi8(a, b); }
90
v256_add_16(v256 a,v256 b)91 SIMD_INLINE v256 v256_add_16(v256 a, v256 b) { return _mm256_add_epi16(a, b); }
92
v256_sadd_u8(v256 a,v256 b)93 SIMD_INLINE v256 v256_sadd_u8(v256 a, v256 b) { return _mm256_adds_epu8(a, b); }
94
v256_sadd_s8(v256 a,v256 b)95 SIMD_INLINE v256 v256_sadd_s8(v256 a, v256 b) { return _mm256_adds_epi8(a, b); }
96
v256_sadd_s16(v256 a,v256 b)97 SIMD_INLINE v256 v256_sadd_s16(v256 a, v256 b) {
98 return _mm256_adds_epi16(a, b);
99 }
100
v256_add_32(v256 a,v256 b)101 SIMD_INLINE v256 v256_add_32(v256 a, v256 b) { return _mm256_add_epi32(a, b); }
102
v256_add_64(v256 a,v256 b)103 SIMD_INLINE v256 v256_add_64(v256 a, v256 b) { return _mm256_add_epi64(a, b); }
104
v256_padd_u8(v256 a)105 SIMD_INLINE v256 v256_padd_u8(v256 a) {
106 return _mm256_maddubs_epi16(a, _mm256_set1_epi8(1));
107 }
108
v256_padd_s16(v256 a)109 SIMD_INLINE v256 v256_padd_s16(v256 a) {
110 return _mm256_madd_epi16(a, _mm256_set1_epi16(1));
111 }
112
v256_sub_8(v256 a,v256 b)113 SIMD_INLINE v256 v256_sub_8(v256 a, v256 b) { return _mm256_sub_epi8(a, b); }
114
v256_ssub_u8(v256 a,v256 b)115 SIMD_INLINE v256 v256_ssub_u8(v256 a, v256 b) { return _mm256_subs_epu8(a, b); }
116
v256_ssub_s8(v256 a,v256 b)117 SIMD_INLINE v256 v256_ssub_s8(v256 a, v256 b) { return _mm256_subs_epi8(a, b); }
118
v256_sub_16(v256 a,v256 b)119 SIMD_INLINE v256 v256_sub_16(v256 a, v256 b) { return _mm256_sub_epi16(a, b); }
120
v256_ssub_s16(v256 a,v256 b)121 SIMD_INLINE v256 v256_ssub_s16(v256 a, v256 b) {
122 return _mm256_subs_epi16(a, b);
123 }
124
v256_ssub_u16(v256 a,v256 b)125 SIMD_INLINE v256 v256_ssub_u16(v256 a, v256 b) {
126 return _mm256_subs_epu16(a, b);
127 }
128
v256_sub_32(v256 a,v256 b)129 SIMD_INLINE v256 v256_sub_32(v256 a, v256 b) { return _mm256_sub_epi32(a, b); }
130
v256_sub_64(v256 a,v256 b)131 SIMD_INLINE v256 v256_sub_64(v256 a, v256 b) { return _mm256_sub_epi64(a, b); }
132
v256_abs_s16(v256 a)133 SIMD_INLINE v256 v256_abs_s16(v256 a) { return _mm256_abs_epi16(a); }
134
v256_abs_s8(v256 a)135 SIMD_INLINE v256 v256_abs_s8(v256 a) { return _mm256_abs_epi8(a); }
136
137 // AVX doesn't have the direct intrinsics to zip/unzip 8, 16, 32 bit
138 // lanes of lower or upper halves of a 256bit vector because the
139 // unpack/pack intrinsics operate on the 256 bit input vector as 2
140 // independent 128 bit vectors.
v256_ziplo_8(v256 a,v256 b)141 SIMD_INLINE v256 v256_ziplo_8(v256 a, v256 b) {
142 return _mm256_unpacklo_epi8(
143 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
144 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
145 }
146
v256_ziphi_8(v256 a,v256 b)147 SIMD_INLINE v256 v256_ziphi_8(v256 a, v256 b) {
148 return _mm256_unpackhi_epi8(
149 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
150 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
151 }
152
v256_ziplo_16(v256 a,v256 b)153 SIMD_INLINE v256 v256_ziplo_16(v256 a, v256 b) {
154 return _mm256_unpacklo_epi16(
155 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
156 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
157 }
158
v256_ziphi_16(v256 a,v256 b)159 SIMD_INLINE v256 v256_ziphi_16(v256 a, v256 b) {
160 return _mm256_unpackhi_epi16(
161 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
162 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
163 }
164
v256_ziplo_32(v256 a,v256 b)165 SIMD_INLINE v256 v256_ziplo_32(v256 a, v256 b) {
166 return _mm256_unpacklo_epi32(
167 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
168 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
169 }
170
v256_ziphi_32(v256 a,v256 b)171 SIMD_INLINE v256 v256_ziphi_32(v256 a, v256 b) {
172 return _mm256_unpackhi_epi32(
173 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
174 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
175 }
176
v256_ziplo_64(v256 a,v256 b)177 SIMD_INLINE v256 v256_ziplo_64(v256 a, v256 b) {
178 return _mm256_unpacklo_epi64(
179 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
180 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
181 }
182
v256_ziphi_64(v256 a,v256 b)183 SIMD_INLINE v256 v256_ziphi_64(v256 a, v256 b) {
184 return _mm256_unpackhi_epi64(
185 _mm256_permute4x64_epi64(b, _MM_SHUFFLE(3, 1, 2, 0)),
186 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)));
187 }
188
v256_ziplo_128(v256 a,v256 b)189 SIMD_INLINE v256 v256_ziplo_128(v256 a, v256 b) {
190 return v256_from_v128(v256_low_v128(a), v256_low_v128(b));
191 }
192
v256_ziphi_128(v256 a,v256 b)193 SIMD_INLINE v256 v256_ziphi_128(v256 a, v256 b) {
194 return v256_from_v128(v256_high_v128(a), v256_high_v128(b));
195 }
196
v256_zip_8(v128 a,v128 b)197 SIMD_INLINE v256 v256_zip_8(v128 a, v128 b) {
198 return v256_from_v128(v128_ziphi_8(a, b), v128_ziplo_8(a, b));
199 }
200
v256_zip_16(v128 a,v128 b)201 SIMD_INLINE v256 v256_zip_16(v128 a, v128 b) {
202 return v256_from_v128(v128_ziphi_16(a, b), v128_ziplo_16(a, b));
203 }
204
v256_zip_32(v128 a,v128 b)205 SIMD_INLINE v256 v256_zip_32(v128 a, v128 b) {
206 return v256_from_v128(v128_ziphi_32(a, b), v128_ziplo_32(a, b));
207 }
208
v256_unziphi_8(v256 a,v256 b)209 SIMD_INLINE v256 v256_unziphi_8(v256 a, v256 b) {
210 return _mm256_permute4x64_epi64(
211 _mm256_packs_epi16(_mm256_srai_epi16(b, 8), _mm256_srai_epi16(a, 8)),
212 _MM_SHUFFLE(3, 1, 2, 0));
213 }
214
v256_unziplo_8(v256 a,v256 b)215 SIMD_INLINE v256 v256_unziplo_8(v256 a, v256 b) {
216 return v256_unziphi_8(_mm256_slli_si256(a, 1), _mm256_slli_si256(b, 1));
217 }
218
v256_unziphi_16(v256 a,v256 b)219 SIMD_INLINE v256 v256_unziphi_16(v256 a, v256 b) {
220 return _mm256_permute4x64_epi64(
221 _mm256_packs_epi32(_mm256_srai_epi32(b, 16), _mm256_srai_epi32(a, 16)),
222 _MM_SHUFFLE(3, 1, 2, 0));
223 }
224
v256_unziplo_16(v256 a,v256 b)225 SIMD_INLINE v256 v256_unziplo_16(v256 a, v256 b) {
226 return v256_unziphi_16(_mm256_slli_si256(a, 2), _mm256_slli_si256(b, 2));
227 }
228
v256_unziphi_32(v256 a,v256 b)229 SIMD_INLINE v256 v256_unziphi_32(v256 a, v256 b) {
230 return _mm256_permute4x64_epi64(
231 _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(b),
232 _mm256_castsi256_ps(a),
233 _MM_SHUFFLE(3, 1, 3, 1))),
234 _MM_SHUFFLE(3, 1, 2, 0));
235 }
236
v256_unziplo_32(v256 a,v256 b)237 SIMD_INLINE v256 v256_unziplo_32(v256 a, v256 b) {
238 return _mm256_permute4x64_epi64(
239 _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(b),
240 _mm256_castsi256_ps(a),
241 _MM_SHUFFLE(2, 0, 2, 0))),
242 _MM_SHUFFLE(3, 1, 2, 0));
243 }
244
v256_unziphi_64(v256 a,v256 b)245 SIMD_INLINE v256 v256_unziphi_64(v256 a, v256 b) {
246 return _mm256_permute4x64_epi64(
247 _mm256_castpd_si256(_mm256_shuffle_pd(_mm256_castsi256_pd(b),
248 _mm256_castsi256_pd(a), 15)),
249 _MM_SHUFFLE(3, 1, 2, 0));
250 }
251
v256_unziplo_64(v256 a,v256 b)252 SIMD_INLINE v256 v256_unziplo_64(v256 a, v256 b) {
253 return _mm256_permute4x64_epi64(
254 _mm256_castpd_si256(
255 _mm256_shuffle_pd(_mm256_castsi256_pd(b), _mm256_castsi256_pd(a), 0)),
256 _MM_SHUFFLE(3, 1, 2, 0));
257 }
258
v256_unpack_u8_s16(v128 a)259 SIMD_INLINE v256 v256_unpack_u8_s16(v128 a) {
260 return v256_from_v128(v128_unpackhi_u8_s16(a), v128_unpacklo_u8_s16(a));
261 }
262
v256_unpacklo_u8_s16(v256 a)263 SIMD_INLINE v256 v256_unpacklo_u8_s16(v256 a) {
264 return _mm256_unpacklo_epi8(
265 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)),
266 _mm256_setzero_si256());
267 }
268
v256_unpackhi_u8_s16(v256 a)269 SIMD_INLINE v256 v256_unpackhi_u8_s16(v256 a) {
270 return _mm256_unpackhi_epi8(
271 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)),
272 _mm256_setzero_si256());
273 }
274
v256_unpack_s8_s16(v128 a)275 SIMD_INLINE v256 v256_unpack_s8_s16(v128 a) {
276 return v256_from_v128(v128_unpackhi_s8_s16(a), v128_unpacklo_s8_s16(a));
277 }
278
v256_unpacklo_s8_s16(v256 a)279 SIMD_INLINE v256 v256_unpacklo_s8_s16(v256 a) {
280 return _mm256_srai_epi16(
281 _mm256_unpacklo_epi8(
282 a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))),
283 8);
284 }
285
v256_unpackhi_s8_s16(v256 a)286 SIMD_INLINE v256 v256_unpackhi_s8_s16(v256 a) {
287 return _mm256_srai_epi16(
288 _mm256_unpackhi_epi8(
289 a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))),
290 8);
291 }
292
v256_pack_s32_s16(v256 a,v256 b)293 SIMD_INLINE v256 v256_pack_s32_s16(v256 a, v256 b) {
294 return _mm256_permute4x64_epi64(_mm256_packs_epi32(b, a),
295 _MM_SHUFFLE(3, 1, 2, 0));
296 }
297
v256_pack_s32_u16(v256 a,v256 b)298 SIMD_INLINE v256 v256_pack_s32_u16(v256 a, v256 b) {
299 return _mm256_permute4x64_epi64(_mm256_packus_epi32(b, a),
300 _MM_SHUFFLE(3, 1, 2, 0));
301 }
302
v256_pack_s16_u8(v256 a,v256 b)303 SIMD_INLINE v256 v256_pack_s16_u8(v256 a, v256 b) {
304 return _mm256_permute4x64_epi64(_mm256_packus_epi16(b, a),
305 _MM_SHUFFLE(3, 1, 2, 0));
306 }
307
v256_pack_s16_s8(v256 a,v256 b)308 SIMD_INLINE v256 v256_pack_s16_s8(v256 a, v256 b) {
309 return _mm256_permute4x64_epi64(_mm256_packs_epi16(b, a),
310 _MM_SHUFFLE(3, 1, 2, 0));
311 }
312
v256_unpack_u16_s32(v128 a)313 SIMD_INLINE v256 v256_unpack_u16_s32(v128 a) {
314 return v256_from_v128(v128_unpackhi_u16_s32(a), v128_unpacklo_u16_s32(a));
315 }
316
v256_unpack_s16_s32(v128 a)317 SIMD_INLINE v256 v256_unpack_s16_s32(v128 a) {
318 return v256_from_v128(v128_unpackhi_s16_s32(a), v128_unpacklo_s16_s32(a));
319 }
320
v256_unpacklo_u16_s32(v256 a)321 SIMD_INLINE v256 v256_unpacklo_u16_s32(v256 a) {
322 return _mm256_unpacklo_epi16(
323 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)),
324 _mm256_setzero_si256());
325 }
326
v256_unpacklo_s16_s32(v256 a)327 SIMD_INLINE v256 v256_unpacklo_s16_s32(v256 a) {
328 return _mm256_srai_epi32(
329 _mm256_unpacklo_epi16(
330 a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))),
331 16);
332 }
333
v256_unpackhi_u16_s32(v256 a)334 SIMD_INLINE v256 v256_unpackhi_u16_s32(v256 a) {
335 return _mm256_unpackhi_epi16(
336 _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0)),
337 _mm256_setzero_si256());
338 }
339
v256_unpackhi_s16_s32(v256 a)340 SIMD_INLINE v256 v256_unpackhi_s16_s32(v256 a) {
341 return _mm256_srai_epi32(
342 _mm256_unpackhi_epi16(
343 a, _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0))),
344 16);
345 }
346
v256_shuffle_8(v256 a,v256 pattern)347 SIMD_INLINE v256 v256_shuffle_8(v256 a, v256 pattern) {
348 return _mm256_blendv_epi8(
349 _mm256_shuffle_epi8(
350 _mm256_permute2x128_si256(a, a, _MM_SHUFFLE(0, 1, 0, 1)), pattern),
351 _mm256_shuffle_epi8(
352 _mm256_permute2x128_si256(a, a, _MM_SHUFFLE(0, 0, 0, 0)), pattern),
353 _mm256_cmpgt_epi8(v256_dup_8(16), pattern));
354 }
355
v256_wideshuffle_8(v256 a,v256 b,v256 pattern)356 SIMD_INLINE v256 v256_wideshuffle_8(v256 a, v256 b, v256 pattern) {
357 v256 c32 = v256_dup_8(32);
358 v256 p32 = v256_sub_8(pattern, c32);
359 v256 r1 = _mm256_blendv_epi8(
360 _mm256_shuffle_epi8(
361 _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 1, 0, 1)), p32),
362 _mm256_shuffle_epi8(
363 _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 0, 0, 0)), p32),
364 _mm256_cmpgt_epi8(v256_dup_8(48), pattern));
365 v256 r2 = _mm256_blendv_epi8(
366 _mm256_shuffle_epi8(
367 _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 3, 0, 3)), pattern),
368 _mm256_shuffle_epi8(
369 _mm256_permute2x128_si256(a, b, _MM_SHUFFLE(0, 2, 0, 2)), pattern),
370 _mm256_cmpgt_epi8(v256_dup_8(16), pattern));
371 return _mm256_blendv_epi8(r1, r2, _mm256_cmpgt_epi8(c32, pattern));
372 }
373
v256_pshuffle_8(v256 a,v256 pattern)374 SIMD_INLINE v256 v256_pshuffle_8(v256 a, v256 pattern) {
375 return _mm256_shuffle_epi8(a, pattern);
376 }
377
v256_dotp_su8(v256 a,v256 b)378 SIMD_INLINE int64_t v256_dotp_su8(v256 a, v256 b) {
379 v256 t1 = _mm256_madd_epi16(v256_unpackhi_s8_s16(a), v256_unpackhi_u8_s16(b));
380 v256 t2 = _mm256_madd_epi16(v256_unpacklo_s8_s16(a), v256_unpacklo_u8_s16(b));
381 t1 = _mm256_add_epi32(t1, t2);
382 v128 t = _mm_add_epi32(_mm256_extracti128_si256(t1, 0),
383 _mm256_extracti128_si256(t1, 1));
384 t = _mm_add_epi32(t, _mm_srli_si128(t, 8));
385 t = _mm_add_epi32(t, _mm_srli_si128(t, 4));
386 return (int32_t)v128_low_u32(t);
387 }
388
v256_dotp_s16(v256 a,v256 b)389 SIMD_INLINE int64_t v256_dotp_s16(v256 a, v256 b) {
390 v256 r = _mm256_madd_epi16(a, b);
391 #if defined(__x86_64__)
392 v128 t;
393 r = _mm256_add_epi64(_mm256_cvtepi32_epi64(v256_high_v128(r)),
394 _mm256_cvtepi32_epi64(v256_low_v128(r)));
395 t = v256_low_v128(_mm256_add_epi64(
396 r, _mm256_permute2x128_si256(r, r, _MM_SHUFFLE(2, 0, 0, 1))));
397 return _mm_cvtsi128_si64(_mm_add_epi64(t, _mm_srli_si128(t, 8)));
398 #else
399 v128 l = v256_low_v128(r);
400 v128 h = v256_high_v128(r);
401 return (int64_t)_mm_cvtsi128_si32(l) +
402 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 4)) +
403 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 8)) +
404 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 12)) +
405 (int64_t)_mm_cvtsi128_si32(h) +
406 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 4)) +
407 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 8)) +
408 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 12));
409 #endif
410 }
411
v256_dotp_s32(v256 a,v256 b)412 SIMD_INLINE int64_t v256_dotp_s32(v256 a, v256 b) {
413 v256 r = _mm256_mullo_epi32(a, b);
414 #if defined(__x86_64__)
415 v128 t;
416 r = _mm256_add_epi64(_mm256_cvtepi32_epi64(v256_high_v128(r)),
417 _mm256_cvtepi32_epi64(v256_low_v128(r)));
418 t = v256_low_v128(_mm256_add_epi64(
419 r, _mm256_permute2x128_si256(r, r, _MM_SHUFFLE(2, 0, 0, 1))));
420 return _mm_cvtsi128_si64(_mm_add_epi64(t, _mm_srli_si128(t, 8)));
421 #else
422 v128 l = v256_low_v128(r);
423 v128 h = v256_high_v128(r);
424 return (int64_t)_mm_cvtsi128_si32(l) +
425 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 4)) +
426 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 8)) +
427 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(l, 12)) +
428 (int64_t)_mm_cvtsi128_si32(h) +
429 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 4)) +
430 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 8)) +
431 (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(h, 12));
432 #endif
433 }
434
v256_hadd_u8(v256 a)435 SIMD_INLINE uint64_t v256_hadd_u8(v256 a) {
436 v256 t = _mm256_sad_epu8(a, _mm256_setzero_si256());
437 v128 lo = v256_low_v128(t);
438 v128 hi = v256_high_v128(t);
439 lo = v128_add_32(lo, hi);
440 return v64_low_u32(v128_low_v64(lo)) + v128_low_u32(v128_high_v64(lo));
441 }
442
443 typedef v256 sad256_internal;
444
v256_sad_u8_init()445 SIMD_INLINE sad256_internal v256_sad_u8_init() {
446 return _mm256_setzero_si256();
447 }
448
449 /* Implementation dependent return value. Result must be finalised with
450 v256_sad_u8_sum().
451 The result for more than 32 v256_sad_u8() calls is undefined. */
v256_sad_u8(sad256_internal s,v256 a,v256 b)452 SIMD_INLINE sad256_internal v256_sad_u8(sad256_internal s, v256 a, v256 b) {
453 return _mm256_add_epi64(s, _mm256_sad_epu8(a, b));
454 }
455
v256_sad_u8_sum(sad256_internal s)456 SIMD_INLINE uint32_t v256_sad_u8_sum(sad256_internal s) {
457 v256 t = _mm256_add_epi32(s, _mm256_unpackhi_epi64(s, s));
458 return v128_low_u32(_mm_add_epi32(v256_high_v128(t), v256_low_v128(t)));
459 }
460
461 typedef v256 ssd256_internal;
462
v256_ssd_u8_init()463 SIMD_INLINE ssd256_internal v256_ssd_u8_init() {
464 return _mm256_setzero_si256();
465 }
466
467 /* Implementation dependent return value. Result must be finalised with
468 * v256_ssd_u8_sum(). */
v256_ssd_u8(ssd256_internal s,v256 a,v256 b)469 SIMD_INLINE ssd256_internal v256_ssd_u8(ssd256_internal s, v256 a, v256 b) {
470 v256 l = _mm256_sub_epi16(_mm256_unpacklo_epi8(a, _mm256_setzero_si256()),
471 _mm256_unpacklo_epi8(b, _mm256_setzero_si256()));
472 v256 h = _mm256_sub_epi16(_mm256_unpackhi_epi8(a, _mm256_setzero_si256()),
473 _mm256_unpackhi_epi8(b, _mm256_setzero_si256()));
474 v256 rl = _mm256_madd_epi16(l, l);
475 v256 rh = _mm256_madd_epi16(h, h);
476 v128 c = _mm_cvtsi32_si128(32);
477 rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 8));
478 rl = _mm256_add_epi32(rl, _mm256_srli_si256(rl, 4));
479 rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 8));
480 rh = _mm256_add_epi32(rh, _mm256_srli_si256(rh, 4));
481 return _mm256_add_epi64(
482 s,
483 _mm256_srl_epi64(_mm256_sll_epi64(_mm256_unpacklo_epi64(rl, rh), c), c));
484 }
485
v256_ssd_u8_sum(ssd256_internal s)486 SIMD_INLINE uint32_t v256_ssd_u8_sum(ssd256_internal s) {
487 v256 t = _mm256_add_epi32(s, _mm256_unpackhi_epi64(s, s));
488 return v128_low_u32(_mm_add_epi32(v256_high_v128(t), v256_low_v128(t)));
489 }
490
v256_or(v256 a,v256 b)491 SIMD_INLINE v256 v256_or(v256 a, v256 b) { return _mm256_or_si256(a, b); }
492
v256_xor(v256 a,v256 b)493 SIMD_INLINE v256 v256_xor(v256 a, v256 b) { return _mm256_xor_si256(a, b); }
494
v256_and(v256 a,v256 b)495 SIMD_INLINE v256 v256_and(v256 a, v256 b) { return _mm256_and_si256(a, b); }
496
v256_andn(v256 a,v256 b)497 SIMD_INLINE v256 v256_andn(v256 a, v256 b) { return _mm256_andnot_si256(b, a); }
498
v256_mul_s16(v64 a,v64 b)499 SIMD_INLINE v256 v256_mul_s16(v64 a, v64 b) {
500 v128 lo_bits = v128_mullo_s16(a, b);
501 v128 hi_bits = v128_mulhi_s16(a, b);
502 return v256_from_v128(v128_ziphi_16(hi_bits, lo_bits),
503 v128_ziplo_16(hi_bits, lo_bits));
504 }
505
v256_mullo_s16(v256 a,v256 b)506 SIMD_INLINE v256 v256_mullo_s16(v256 a, v256 b) {
507 return _mm256_mullo_epi16(a, b);
508 }
509
v256_mulhi_s16(v256 a,v256 b)510 SIMD_INLINE v256 v256_mulhi_s16(v256 a, v256 b) {
511 return _mm256_mulhi_epi16(a, b);
512 }
513
v256_mullo_s32(v256 a,v256 b)514 SIMD_INLINE v256 v256_mullo_s32(v256 a, v256 b) {
515 return _mm256_mullo_epi32(a, b);
516 }
517
v256_madd_s16(v256 a,v256 b)518 SIMD_INLINE v256 v256_madd_s16(v256 a, v256 b) {
519 return _mm256_madd_epi16(a, b);
520 }
521
v256_madd_us8(v256 a,v256 b)522 SIMD_INLINE v256 v256_madd_us8(v256 a, v256 b) {
523 return _mm256_maddubs_epi16(a, b);
524 }
525
v256_avg_u8(v256 a,v256 b)526 SIMD_INLINE v256 v256_avg_u8(v256 a, v256 b) { return _mm256_avg_epu8(a, b); }
527
v256_rdavg_u8(v256 a,v256 b)528 SIMD_INLINE v256 v256_rdavg_u8(v256 a, v256 b) {
529 return _mm256_sub_epi8(
530 _mm256_avg_epu8(a, b),
531 _mm256_and_si256(_mm256_xor_si256(a, b), v256_dup_8(1)));
532 }
533
v256_rdavg_u16(v256 a,v256 b)534 SIMD_INLINE v256 v256_rdavg_u16(v256 a, v256 b) {
535 return _mm256_sub_epi16(
536 _mm256_avg_epu16(a, b),
537 _mm256_and_si256(_mm256_xor_si256(a, b), v256_dup_16(1)));
538 }
539
v256_avg_u16(v256 a,v256 b)540 SIMD_INLINE v256 v256_avg_u16(v256 a, v256 b) { return _mm256_avg_epu16(a, b); }
541
v256_min_u8(v256 a,v256 b)542 SIMD_INLINE v256 v256_min_u8(v256 a, v256 b) { return _mm256_min_epu8(a, b); }
543
v256_max_u8(v256 a,v256 b)544 SIMD_INLINE v256 v256_max_u8(v256 a, v256 b) { return _mm256_max_epu8(a, b); }
545
v256_min_s8(v256 a,v256 b)546 SIMD_INLINE v256 v256_min_s8(v256 a, v256 b) { return _mm256_min_epi8(a, b); }
547
v256_movemask_8(v256 a)548 SIMD_INLINE uint32_t v256_movemask_8(v256 a) { return _mm256_movemask_epi8(a); }
549
v256_blend_8(v256 a,v256 b,v256 c)550 SIMD_INLINE v256 v256_blend_8(v256 a, v256 b, v256 c) {
551 return _mm256_blendv_epi8(a, b, c);
552 }
553
v256_max_s8(v256 a,v256 b)554 SIMD_INLINE v256 v256_max_s8(v256 a, v256 b) { return _mm256_max_epi8(a, b); }
555
v256_min_s16(v256 a,v256 b)556 SIMD_INLINE v256 v256_min_s16(v256 a, v256 b) { return _mm256_min_epi16(a, b); }
557
v256_max_s16(v256 a,v256 b)558 SIMD_INLINE v256 v256_max_s16(v256 a, v256 b) { return _mm256_max_epi16(a, b); }
559
v256_min_s32(v256 a,v256 b)560 SIMD_INLINE v256 v256_min_s32(v256 a, v256 b) { return _mm256_min_epi32(a, b); }
561
v256_max_s32(v256 a,v256 b)562 SIMD_INLINE v256 v256_max_s32(v256 a, v256 b) { return _mm256_max_epi32(a, b); }
563
v256_cmpgt_s8(v256 a,v256 b)564 SIMD_INLINE v256 v256_cmpgt_s8(v256 a, v256 b) {
565 return _mm256_cmpgt_epi8(a, b);
566 }
567
v256_cmplt_s8(v256 a,v256 b)568 SIMD_INLINE v256 v256_cmplt_s8(v256 a, v256 b) {
569 return _mm256_cmpgt_epi8(b, a);
570 }
571
v256_cmpeq_8(v256 a,v256 b)572 SIMD_INLINE v256 v256_cmpeq_8(v256 a, v256 b) {
573 return _mm256_cmpeq_epi8(a, b);
574 }
575
v256_cmpgt_s16(v256 a,v256 b)576 SIMD_INLINE v256 v256_cmpgt_s16(v256 a, v256 b) {
577 return _mm256_cmpgt_epi16(a, b);
578 }
579
v256_cmplt_s16(v256 a,v256 b)580 SIMD_INLINE v256 v256_cmplt_s16(v256 a, v256 b) {
581 return _mm256_cmpgt_epi16(b, a);
582 }
583
v256_cmpeq_16(v256 a,v256 b)584 SIMD_INLINE v256 v256_cmpeq_16(v256 a, v256 b) {
585 return _mm256_cmpeq_epi16(a, b);
586 }
587
v256_cmpgt_s32(v256 a,v256 b)588 SIMD_INLINE v256 v256_cmpgt_s32(v256 a, v256 b) {
589 return _mm256_cmpgt_epi32(a, b);
590 }
591
v256_cmplt_s32(v256 a,v256 b)592 SIMD_INLINE v256 v256_cmplt_s32(v256 a, v256 b) {
593 return _mm256_cmpgt_epi32(b, a);
594 }
595
v256_cmpeq_32(v256 a,v256 b)596 SIMD_INLINE v256 v256_cmpeq_32(v256 a, v256 b) {
597 return _mm256_cmpeq_epi32(a, b);
598 }
599
v256_shl_8(v256 a,unsigned int c)600 SIMD_INLINE v256 v256_shl_8(v256 a, unsigned int c) {
601 return _mm256_and_si256(_mm256_set1_epi8((uint8_t)(0xff << c)),
602 _mm256_sll_epi16(a, _mm_cvtsi32_si128(c)));
603 }
604
v256_shr_u8(v256 a,unsigned int c)605 SIMD_INLINE v256 v256_shr_u8(v256 a, unsigned int c) {
606 return _mm256_and_si256(_mm256_set1_epi8(0xff >> c),
607 _mm256_srl_epi16(a, _mm_cvtsi32_si128(c)));
608 }
609
v256_shr_s8(v256 a,unsigned int c)610 SIMD_INLINE v256 v256_shr_s8(v256 a, unsigned int c) {
611 __m128i x = _mm_cvtsi32_si128(c + 8);
612 return _mm256_packs_epi16(_mm256_sra_epi16(_mm256_unpacklo_epi8(a, a), x),
613 _mm256_sra_epi16(_mm256_unpackhi_epi8(a, a), x));
614 }
615
v256_shl_16(v256 a,unsigned int c)616 SIMD_INLINE v256 v256_shl_16(v256 a, unsigned int c) {
617 return _mm256_sll_epi16(a, _mm_cvtsi32_si128(c));
618 }
619
v256_shr_u16(v256 a,unsigned int c)620 SIMD_INLINE v256 v256_shr_u16(v256 a, unsigned int c) {
621 return _mm256_srl_epi16(a, _mm_cvtsi32_si128(c));
622 }
623
v256_shr_s16(v256 a,unsigned int c)624 SIMD_INLINE v256 v256_shr_s16(v256 a, unsigned int c) {
625 return _mm256_sra_epi16(a, _mm_cvtsi32_si128(c));
626 }
627
v256_shl_32(v256 a,unsigned int c)628 SIMD_INLINE v256 v256_shl_32(v256 a, unsigned int c) {
629 return _mm256_sll_epi32(a, _mm_cvtsi32_si128(c));
630 }
631
v256_shr_u32(v256 a,unsigned int c)632 SIMD_INLINE v256 v256_shr_u32(v256 a, unsigned int c) {
633 return _mm256_srl_epi32(a, _mm_cvtsi32_si128(c));
634 }
635
v256_shr_s32(v256 a,unsigned int c)636 SIMD_INLINE v256 v256_shr_s32(v256 a, unsigned int c) {
637 return _mm256_sra_epi32(a, _mm_cvtsi32_si128(c));
638 }
639
v256_shl_64(v256 a,unsigned int c)640 SIMD_INLINE v256 v256_shl_64(v256 a, unsigned int c) {
641 return _mm256_sll_epi64(a, _mm_cvtsi32_si128(c));
642 }
643
v256_shr_u64(v256 a,unsigned int c)644 SIMD_INLINE v256 v256_shr_u64(v256 a, unsigned int c) {
645 return _mm256_srl_epi64(a, _mm_cvtsi32_si128(c));
646 }
647
v256_shr_s64(v256 a,unsigned int c)648 SIMD_INLINE v256 v256_shr_s64(v256 a, unsigned int c) {
649 #if defined(__AVX512F__)
650 return _mm256_sra_epi64(a, _mm_cvtsi32_si128(c));
651 #else
652 return v256_from_v128(v128_shr_s64(v256_high_v128(a), c),
653 v128_shr_s64(v256_low_v128(a), c));
654 #endif
655 }
656
657 /* These intrinsics require immediate values, so we must use #defines
658 to enforce that. */
659 // _mm256_slli_si256 works on 128 bit lanes and can't be used
660 #define v256_shl_n_byte(a, n) \
661 ((n) < 16 ? v256_from_v128( \
662 v128_align(v256_high_v128(a), v256_low_v128(a), 16 - (n)), \
663 v128_shl_n_byte(v256_low_v128(a), n)) \
664 : _mm256_inserti128_si256( \
665 _mm256_setzero_si256(), \
666 v128_shl_n_byte(v256_low_v128(a), (n)-16), 1))
667
668 // _mm256_srli_si256 works on 128 bit lanes and can't be used
669 #define v256_shr_n_byte(a, n) \
670 ((n) < 16 \
671 ? _mm256_alignr_epi8( \
672 _mm256_permute2x128_si256(a, a, _MM_SHUFFLE(2, 0, 0, 1)), a, n) \
673 : _mm256_inserti128_si256( \
674 _mm256_setzero_si256(), \
675 v128_align(v256_high_v128(a), v256_high_v128(a), n), 0))
676
677 // _mm256_alignr_epi8 works on two 128 bit lanes and can't be used
678 #define v256_align(a, b, c) \
679 ((c) ? v256_or(v256_shr_n_byte(b, c), v256_shl_n_byte(a, 32 - c)) : b)
680
681 #define v256_shl_n_8(a, c) \
682 _mm256_and_si256(_mm256_set1_epi8((uint8_t)(0xff << (c))), \
683 _mm256_slli_epi16(a, c))
684 #define v256_shr_n_u8(a, c) \
685 _mm256_and_si256(_mm256_set1_epi8(0xff >> (c)), _mm256_srli_epi16(a, c))
686 #define v256_shr_n_s8(a, c) \
687 _mm256_packs_epi16(_mm256_srai_epi16(_mm256_unpacklo_epi8(a, a), (c) + 8), \
688 _mm256_srai_epi16(_mm256_unpackhi_epi8(a, a), (c) + 8))
689 #define v256_shl_n_16(a, c) _mm256_slli_epi16(a, c)
690 #define v256_shr_n_u16(a, c) _mm256_srli_epi16(a, c)
691 #define v256_shr_n_s16(a, c) _mm256_srai_epi16(a, c)
692 #define v256_shl_n_32(a, c) _mm256_slli_epi32(a, c)
693 #define v256_shr_n_u32(a, c) _mm256_srli_epi32(a, c)
694 #define v256_shr_n_s32(a, c) _mm256_srai_epi32(a, c)
695 #define v256_shl_n_64(a, c) _mm256_slli_epi64(a, c)
696 #define v256_shr_n_u64(a, c) _mm256_srli_epi64(a, c)
697 #define v256_shr_n_s64(a, c) \
698 v256_shr_s64((a), (c)) // _mm256_srai_epi64 broken in gcc?
699 #define v256_shr_n_word(a, n) v256_shr_n_byte(a, 2 * (n))
700 #define v256_shl_n_word(a, n) v256_shl_n_byte(a, 2 * (n))
701
702 typedef v256 sad256_internal_u16;
703
v256_sad_u16_init()704 SIMD_INLINE sad256_internal_u16 v256_sad_u16_init() { return v256_zero(); }
705
706 /* Implementation dependent return value. Result must be finalised with
707 * v256_sad_u16_sum(). */
v256_sad_u16(sad256_internal_u16 s,v256 a,v256 b)708 SIMD_INLINE sad256_internal_u16 v256_sad_u16(sad256_internal_u16 s, v256 a,
709 v256 b) {
710 #if defined(__SSE4_1__)
711 v256 t = v256_sub_16(_mm256_max_epu16(a, b), _mm256_min_epu16(a, b));
712 #else
713 v256 t = v256_cmplt_s16(v256_xor(a, v256_dup_16(32768)),
714 v256_xor(b, v256_dup_16(32768)));
715 t = v256_sub_16(v256_or(v256_and(b, t), v256_andn(a, t)),
716 v256_or(v256_and(a, t), v256_andn(b, t)));
717 #endif
718 return v256_add_32(
719 s, v256_add_32(v256_unpackhi_u16_s32(t), v256_unpacklo_u16_s32(t)));
720 }
721
v256_sad_u16_sum(sad256_internal_u16 s)722 SIMD_INLINE uint32_t v256_sad_u16_sum(sad256_internal_u16 s) {
723 v128 t = v128_add_32(v256_high_v128(s), v256_low_v128(s));
724 return v128_low_u32(t) + v128_low_u32(v128_shr_n_byte(t, 4)) +
725 v128_low_u32(v128_shr_n_byte(t, 8)) +
726 v128_low_u32(v128_shr_n_byte(t, 12));
727 }
728
729 typedef v256 ssd256_internal_s16;
730
v256_ssd_s16_init()731 SIMD_INLINE ssd256_internal_s16 v256_ssd_s16_init() { return v256_zero(); }
732
733 /* Implementation dependent return value. Result must be finalised with
734 * v256_ssd_s16_sum(). */
v256_ssd_s16(ssd256_internal_s16 s,v256 a,v256 b)735 SIMD_INLINE ssd256_internal_s16 v256_ssd_s16(ssd256_internal_s16 s, v256 a,
736 v256 b) {
737 v256 d = v256_sub_16(a, b);
738 d = v256_madd_s16(d, d);
739 return v256_add_64(s, v256_add_64(_mm256_unpackhi_epi32(d, v256_zero()),
740 _mm256_unpacklo_epi32(d, v256_zero())));
741 }
742
v256_ssd_s16_sum(ssd256_internal_s16 s)743 SIMD_INLINE uint64_t v256_ssd_s16_sum(ssd256_internal_s16 s) {
744 v128 t = v128_add_64(v256_high_v128(s), v256_low_v128(s));
745 return v64_u64(v128_low_v64(t)) + v64_u64(v128_high_v64(t));
746 }
747
748 #endif
749
750 #endif // AOM_AOM_DSP_SIMD_V256_INTRINSICS_X86_H_
751