• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AOM_DSP_X86_TXFM_COMMON_AVX2_H_
13 #define AOM_AOM_DSP_X86_TXFM_COMMON_AVX2_H_
14 
15 #include <emmintrin.h>
16 #include "aom/aom_integer.h"
17 #include "aom_dsp/x86/synonyms.h"
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
pair_set_w16_epi16(int16_t a,int16_t b)23 static INLINE __m256i pair_set_w16_epi16(int16_t a, int16_t b) {
24   return _mm256_set1_epi32(
25       (int32_t)(((uint16_t)(a)) | (((uint32_t)(b)) << 16)));
26 }
27 
btf_16_w16_avx2(const __m256i w0,const __m256i w1,__m256i * in0,__m256i * in1,const __m256i _r,const int32_t cos_bit)28 static INLINE void btf_16_w16_avx2(const __m256i w0, const __m256i w1,
29                                    __m256i *in0, __m256i *in1, const __m256i _r,
30                                    const int32_t cos_bit) {
31   __m256i t0 = _mm256_unpacklo_epi16(*in0, *in1);
32   __m256i t1 = _mm256_unpackhi_epi16(*in0, *in1);
33   __m256i u0 = _mm256_madd_epi16(t0, w0);
34   __m256i u1 = _mm256_madd_epi16(t1, w0);
35   __m256i v0 = _mm256_madd_epi16(t0, w1);
36   __m256i v1 = _mm256_madd_epi16(t1, w1);
37 
38   __m256i a0 = _mm256_add_epi32(u0, _r);
39   __m256i a1 = _mm256_add_epi32(u1, _r);
40   __m256i b0 = _mm256_add_epi32(v0, _r);
41   __m256i b1 = _mm256_add_epi32(v1, _r);
42 
43   __m256i c0 = _mm256_srai_epi32(a0, cos_bit);
44   __m256i c1 = _mm256_srai_epi32(a1, cos_bit);
45   __m256i d0 = _mm256_srai_epi32(b0, cos_bit);
46   __m256i d1 = _mm256_srai_epi32(b1, cos_bit);
47 
48   *in0 = _mm256_packs_epi32(c0, c1);
49   *in1 = _mm256_packs_epi32(d0, d1);
50 }
51 
btf_16_adds_subs_avx2(__m256i * in0,__m256i * in1)52 static INLINE void btf_16_adds_subs_avx2(__m256i *in0, __m256i *in1) {
53   const __m256i _in0 = *in0;
54   const __m256i _in1 = *in1;
55   *in0 = _mm256_adds_epi16(_in0, _in1);
56   *in1 = _mm256_subs_epi16(_in0, _in1);
57 }
58 
btf_32_add_sub_avx2(__m256i * in0,__m256i * in1)59 static INLINE void btf_32_add_sub_avx2(__m256i *in0, __m256i *in1) {
60   const __m256i _in0 = *in0;
61   const __m256i _in1 = *in1;
62   *in0 = _mm256_add_epi32(_in0, _in1);
63   *in1 = _mm256_sub_epi32(_in0, _in1);
64 }
65 
btf_16_adds_subs_out_avx2(__m256i * out0,__m256i * out1,__m256i in0,__m256i in1)66 static INLINE void btf_16_adds_subs_out_avx2(__m256i *out0, __m256i *out1,
67                                              __m256i in0, __m256i in1) {
68   const __m256i _in0 = in0;
69   const __m256i _in1 = in1;
70   *out0 = _mm256_adds_epi16(_in0, _in1);
71   *out1 = _mm256_subs_epi16(_in0, _in1);
72 }
73 
btf_32_add_sub_out_avx2(__m256i * out0,__m256i * out1,__m256i in0,__m256i in1)74 static INLINE void btf_32_add_sub_out_avx2(__m256i *out0, __m256i *out1,
75                                            __m256i in0, __m256i in1) {
76   const __m256i _in0 = in0;
77   const __m256i _in1 = in1;
78   *out0 = _mm256_add_epi32(_in0, _in1);
79   *out1 = _mm256_sub_epi32(_in0, _in1);
80 }
81 
load_16bit_to_16bit_avx2(const int16_t * a)82 static INLINE __m256i load_16bit_to_16bit_avx2(const int16_t *a) {
83   return _mm256_load_si256((const __m256i *)a);
84 }
85 
load_buffer_16bit_to_16bit_avx2(const int16_t * in,int stride,__m256i * out,int out_size)86 static INLINE void load_buffer_16bit_to_16bit_avx2(const int16_t *in,
87                                                    int stride, __m256i *out,
88                                                    int out_size) {
89   for (int i = 0; i < out_size; ++i) {
90     out[i] = load_16bit_to_16bit_avx2(in + i * stride);
91   }
92 }
93 
load_buffer_16bit_to_16bit_flip_avx2(const int16_t * in,int stride,__m256i * out,int out_size)94 static INLINE void load_buffer_16bit_to_16bit_flip_avx2(const int16_t *in,
95                                                         int stride,
96                                                         __m256i *out,
97                                                         int out_size) {
98   for (int i = 0; i < out_size; ++i) {
99     out[out_size - i - 1] = load_16bit_to_16bit_avx2(in + i * stride);
100   }
101 }
102 
load_32bit_to_16bit_w16_avx2(const int32_t * a)103 static INLINE __m256i load_32bit_to_16bit_w16_avx2(const int32_t *a) {
104   const __m256i a_low = _mm256_lddqu_si256((const __m256i *)a);
105   const __m256i b = _mm256_packs_epi32(a_low, *(const __m256i *)(a + 8));
106   return _mm256_permute4x64_epi64(b, 0xD8);
107 }
108 
load_buffer_32bit_to_16bit_w16_avx2(const int32_t * in,int stride,__m256i * out,int out_size)109 static INLINE void load_buffer_32bit_to_16bit_w16_avx2(const int32_t *in,
110                                                        int stride, __m256i *out,
111                                                        int out_size) {
112   for (int i = 0; i < out_size; ++i) {
113     out[i] = load_32bit_to_16bit_w16_avx2(in + i * stride);
114   }
115 }
116 
transpose_16bit_16x16_avx2(const __m256i * const in,__m256i * const out)117 static INLINE void transpose_16bit_16x16_avx2(const __m256i *const in,
118                                               __m256i *const out) {
119   // Unpack 16 bit elements. Goes from:
120   // in[0]: 00 01 02 03  08 09 0a 0b  04 05 06 07  0c 0d 0e 0f
121   // in[1]: 10 11 12 13  18 19 1a 1b  14 15 16 17  1c 1d 1e 1f
122   // in[2]: 20 21 22 23  28 29 2a 2b  24 25 26 27  2c 2d 2e 2f
123   // in[3]: 30 31 32 33  38 39 3a 3b  34 35 36 37  3c 3d 3e 3f
124   // in[4]: 40 41 42 43  48 49 4a 4b  44 45 46 47  4c 4d 4e 4f
125   // in[5]: 50 51 52 53  58 59 5a 5b  54 55 56 57  5c 5d 5e 5f
126   // in[6]: 60 61 62 63  68 69 6a 6b  64 65 66 67  6c 6d 6e 6f
127   // in[7]: 70 71 72 73  78 79 7a 7b  74 75 76 77  7c 7d 7e 7f
128   // in[8]: 80 81 82 83  88 89 8a 8b  84 85 86 87  8c 8d 8e 8f
129   // to:
130   // a0:    00 10 01 11  02 12 03 13  04 14 05 15  06 16 07 17
131   // a1:    20 30 21 31  22 32 23 33  24 34 25 35  26 36 27 37
132   // a2:    40 50 41 51  42 52 43 53  44 54 45 55  46 56 47 57
133   // a3:    60 70 61 71  62 72 63 73  64 74 65 75  66 76 67 77
134   // ...
135   __m256i a[16];
136   for (int i = 0; i < 16; i += 2) {
137     a[i / 2 + 0] = _mm256_unpacklo_epi16(in[i], in[i + 1]);
138     a[i / 2 + 8] = _mm256_unpackhi_epi16(in[i], in[i + 1]);
139   }
140   __m256i b[16];
141   for (int i = 0; i < 16; i += 2) {
142     b[i / 2 + 0] = _mm256_unpacklo_epi32(a[i], a[i + 1]);
143     b[i / 2 + 8] = _mm256_unpackhi_epi32(a[i], a[i + 1]);
144   }
145   __m256i c[16];
146   for (int i = 0; i < 16; i += 2) {
147     c[i / 2 + 0] = _mm256_unpacklo_epi64(b[i], b[i + 1]);
148     c[i / 2 + 8] = _mm256_unpackhi_epi64(b[i], b[i + 1]);
149   }
150   out[0 + 0] = _mm256_permute2x128_si256(c[0], c[1], 0x20);
151   out[1 + 0] = _mm256_permute2x128_si256(c[8], c[9], 0x20);
152   out[2 + 0] = _mm256_permute2x128_si256(c[4], c[5], 0x20);
153   out[3 + 0] = _mm256_permute2x128_si256(c[12], c[13], 0x20);
154 
155   out[0 + 8] = _mm256_permute2x128_si256(c[0], c[1], 0x31);
156   out[1 + 8] = _mm256_permute2x128_si256(c[8], c[9], 0x31);
157   out[2 + 8] = _mm256_permute2x128_si256(c[4], c[5], 0x31);
158   out[3 + 8] = _mm256_permute2x128_si256(c[12], c[13], 0x31);
159 
160   out[4 + 0] = _mm256_permute2x128_si256(c[0 + 2], c[1 + 2], 0x20);
161   out[5 + 0] = _mm256_permute2x128_si256(c[8 + 2], c[9 + 2], 0x20);
162   out[6 + 0] = _mm256_permute2x128_si256(c[4 + 2], c[5 + 2], 0x20);
163   out[7 + 0] = _mm256_permute2x128_si256(c[12 + 2], c[13 + 2], 0x20);
164 
165   out[4 + 8] = _mm256_permute2x128_si256(c[0 + 2], c[1 + 2], 0x31);
166   out[5 + 8] = _mm256_permute2x128_si256(c[8 + 2], c[9 + 2], 0x31);
167   out[6 + 8] = _mm256_permute2x128_si256(c[4 + 2], c[5 + 2], 0x31);
168   out[7 + 8] = _mm256_permute2x128_si256(c[12 + 2], c[13 + 2], 0x31);
169 }
170 
transpose_16bit_16x8_avx2(const __m256i * const in,__m256i * const out)171 static INLINE void transpose_16bit_16x8_avx2(const __m256i *const in,
172                                              __m256i *const out) {
173   const __m256i a0 = _mm256_unpacklo_epi16(in[0], in[1]);
174   const __m256i a1 = _mm256_unpacklo_epi16(in[2], in[3]);
175   const __m256i a2 = _mm256_unpacklo_epi16(in[4], in[5]);
176   const __m256i a3 = _mm256_unpacklo_epi16(in[6], in[7]);
177   const __m256i a4 = _mm256_unpackhi_epi16(in[0], in[1]);
178   const __m256i a5 = _mm256_unpackhi_epi16(in[2], in[3]);
179   const __m256i a6 = _mm256_unpackhi_epi16(in[4], in[5]);
180   const __m256i a7 = _mm256_unpackhi_epi16(in[6], in[7]);
181 
182   const __m256i b0 = _mm256_unpacklo_epi32(a0, a1);
183   const __m256i b1 = _mm256_unpacklo_epi32(a2, a3);
184   const __m256i b2 = _mm256_unpacklo_epi32(a4, a5);
185   const __m256i b3 = _mm256_unpacklo_epi32(a6, a7);
186   const __m256i b4 = _mm256_unpackhi_epi32(a0, a1);
187   const __m256i b5 = _mm256_unpackhi_epi32(a2, a3);
188   const __m256i b6 = _mm256_unpackhi_epi32(a4, a5);
189   const __m256i b7 = _mm256_unpackhi_epi32(a6, a7);
190 
191   out[0] = _mm256_unpacklo_epi64(b0, b1);
192   out[1] = _mm256_unpackhi_epi64(b0, b1);
193   out[2] = _mm256_unpacklo_epi64(b4, b5);
194   out[3] = _mm256_unpackhi_epi64(b4, b5);
195   out[4] = _mm256_unpacklo_epi64(b2, b3);
196   out[5] = _mm256_unpackhi_epi64(b2, b3);
197   out[6] = _mm256_unpacklo_epi64(b6, b7);
198   out[7] = _mm256_unpackhi_epi64(b6, b7);
199 }
200 
flip_buf_avx2(__m256i * in,__m256i * out,int size)201 static INLINE void flip_buf_avx2(__m256i *in, __m256i *out, int size) {
202   for (int i = 0; i < size; ++i) {
203     out[size - i - 1] = in[i];
204   }
205 }
206 
round_shift_16bit_w16_avx2(__m256i * in,int size,int bit)207 static INLINE void round_shift_16bit_w16_avx2(__m256i *in, int size, int bit) {
208   if (bit < 0) {
209     bit = -bit;
210     __m256i round = _mm256_set1_epi16(1 << (bit - 1));
211     for (int i = 0; i < size; ++i) {
212       in[i] = _mm256_adds_epi16(in[i], round);
213       in[i] = _mm256_srai_epi16(in[i], bit);
214     }
215   } else if (bit > 0) {
216     for (int i = 0; i < size; ++i) {
217       in[i] = _mm256_slli_epi16(in[i], bit);
218     }
219   }
220 }
221 
av1_round_shift_32_avx2(__m256i vec,int bit)222 static INLINE __m256i av1_round_shift_32_avx2(__m256i vec, int bit) {
223   __m256i tmp, round;
224   round = _mm256_set1_epi32(1 << (bit - 1));
225   tmp = _mm256_add_epi32(vec, round);
226   return _mm256_srai_epi32(tmp, bit);
227 }
228 
av1_round_shift_array_32_avx2(__m256i * input,__m256i * output,const int size,const int bit)229 static INLINE void av1_round_shift_array_32_avx2(__m256i *input,
230                                                  __m256i *output,
231                                                  const int size,
232                                                  const int bit) {
233   if (bit > 0) {
234     int i;
235     for (i = 0; i < size; i++) {
236       output[i] = av1_round_shift_32_avx2(input[i], bit);
237     }
238   } else {
239     int i;
240     for (i = 0; i < size; i++) {
241       output[i] = _mm256_slli_epi32(input[i], -bit);
242     }
243   }
244 }
245 
av1_round_shift_rect_array_32_avx2(__m256i * input,__m256i * output,const int size,const int bit,const int val)246 static INLINE void av1_round_shift_rect_array_32_avx2(__m256i *input,
247                                                       __m256i *output,
248                                                       const int size,
249                                                       const int bit,
250                                                       const int val) {
251   const __m256i sqrt2 = _mm256_set1_epi32(val);
252   if (bit > 0) {
253     int i;
254     for (i = 0; i < size; i++) {
255       const __m256i r0 = av1_round_shift_32_avx2(input[i], bit);
256       const __m256i r1 = _mm256_mullo_epi32(sqrt2, r0);
257       output[i] = av1_round_shift_32_avx2(r1, NewSqrt2Bits);
258     }
259   } else {
260     int i;
261     for (i = 0; i < size; i++) {
262       const __m256i r0 = _mm256_slli_epi32(input[i], -bit);
263       const __m256i r1 = _mm256_mullo_epi32(sqrt2, r0);
264       output[i] = av1_round_shift_32_avx2(r1, NewSqrt2Bits);
265     }
266   }
267 }
268 
scale_round_avx2(const __m256i a,const int scale)269 static INLINE __m256i scale_round_avx2(const __m256i a, const int scale) {
270   const __m256i scale_rounding =
271       pair_set_w16_epi16(scale, 1 << (NewSqrt2Bits - 1));
272   const __m256i b = _mm256_madd_epi16(a, scale_rounding);
273   return _mm256_srai_epi32(b, NewSqrt2Bits);
274 }
275 
store_rect_16bit_to_32bit_w8_avx2(const __m256i a,int32_t * const b)276 static INLINE void store_rect_16bit_to_32bit_w8_avx2(const __m256i a,
277                                                      int32_t *const b) {
278   const __m256i one = _mm256_set1_epi16(1);
279   const __m256i a_lo = _mm256_unpacklo_epi16(a, one);
280   const __m256i a_hi = _mm256_unpackhi_epi16(a, one);
281   const __m256i b_lo = scale_round_avx2(a_lo, NewSqrt2);
282   const __m256i b_hi = scale_round_avx2(a_hi, NewSqrt2);
283   const __m256i temp = _mm256_permute2f128_si256(b_lo, b_hi, 0x31);
284   _mm_store_si128((__m128i *)b, _mm256_castsi256_si128(b_lo));
285   _mm_store_si128((__m128i *)(b + 4), _mm256_castsi256_si128(b_hi));
286   _mm256_store_si256((__m256i *)(b + 64), temp);
287 }
288 
store_rect_buffer_16bit_to_32bit_w8_avx2(const __m256i * const in,int32_t * const out,const int stride,const int out_size)289 static INLINE void store_rect_buffer_16bit_to_32bit_w8_avx2(
290     const __m256i *const in, int32_t *const out, const int stride,
291     const int out_size) {
292   for (int i = 0; i < out_size; ++i) {
293     store_rect_16bit_to_32bit_w8_avx2(in[i], out + i * stride);
294   }
295 }
296 
pack_reg(const __m128i * in1,const __m128i * in2,__m256i * out)297 static INLINE void pack_reg(const __m128i *in1, const __m128i *in2,
298                             __m256i *out) {
299   out[0] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[0]), in2[0], 0x1);
300   out[1] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[1]), in2[1], 0x1);
301   out[2] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[2]), in2[2], 0x1);
302   out[3] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[3]), in2[3], 0x1);
303   out[4] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[4]), in2[4], 0x1);
304   out[5] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[5]), in2[5], 0x1);
305   out[6] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[6]), in2[6], 0x1);
306   out[7] = _mm256_insertf128_si256(_mm256_castsi128_si256(in1[7]), in2[7], 0x1);
307 }
308 
extract_reg(const __m256i * in,__m128i * out1)309 static INLINE void extract_reg(const __m256i *in, __m128i *out1) {
310   out1[0] = _mm256_castsi256_si128(in[0]);
311   out1[1] = _mm256_castsi256_si128(in[1]);
312   out1[2] = _mm256_castsi256_si128(in[2]);
313   out1[3] = _mm256_castsi256_si128(in[3]);
314   out1[4] = _mm256_castsi256_si128(in[4]);
315   out1[5] = _mm256_castsi256_si128(in[5]);
316   out1[6] = _mm256_castsi256_si128(in[6]);
317   out1[7] = _mm256_castsi256_si128(in[7]);
318 
319   out1[8] = _mm256_extracti128_si256(in[0], 0x01);
320   out1[9] = _mm256_extracti128_si256(in[1], 0x01);
321   out1[10] = _mm256_extracti128_si256(in[2], 0x01);
322   out1[11] = _mm256_extracti128_si256(in[3], 0x01);
323   out1[12] = _mm256_extracti128_si256(in[4], 0x01);
324   out1[13] = _mm256_extracti128_si256(in[5], 0x01);
325   out1[14] = _mm256_extracti128_si256(in[6], 0x01);
326   out1[15] = _mm256_extracti128_si256(in[7], 0x01);
327 }
328 
329 #ifdef __cplusplus
330 }
331 #endif
332 
333 #endif  // AOM_AOM_DSP_X86_TXFM_COMMON_AVX2_H_
334