1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <immintrin.h>
13
14 #include "config/aom_dsp_rtcd.h"
15 #include "aom/aom_integer.h"
16 #include "aom_dsp/x86/bitdepth_conversion_avx2.h"
17 #include "aom_ports/mem.h"
18
hadamard_col8x2_avx2(__m256i * in,int iter)19 static void hadamard_col8x2_avx2(__m256i *in, int iter) {
20 __m256i a0 = in[0];
21 __m256i a1 = in[1];
22 __m256i a2 = in[2];
23 __m256i a3 = in[3];
24 __m256i a4 = in[4];
25 __m256i a5 = in[5];
26 __m256i a6 = in[6];
27 __m256i a7 = in[7];
28
29 __m256i b0 = _mm256_add_epi16(a0, a1);
30 __m256i b1 = _mm256_sub_epi16(a0, a1);
31 __m256i b2 = _mm256_add_epi16(a2, a3);
32 __m256i b3 = _mm256_sub_epi16(a2, a3);
33 __m256i b4 = _mm256_add_epi16(a4, a5);
34 __m256i b5 = _mm256_sub_epi16(a4, a5);
35 __m256i b6 = _mm256_add_epi16(a6, a7);
36 __m256i b7 = _mm256_sub_epi16(a6, a7);
37
38 a0 = _mm256_add_epi16(b0, b2);
39 a1 = _mm256_add_epi16(b1, b3);
40 a2 = _mm256_sub_epi16(b0, b2);
41 a3 = _mm256_sub_epi16(b1, b3);
42 a4 = _mm256_add_epi16(b4, b6);
43 a5 = _mm256_add_epi16(b5, b7);
44 a6 = _mm256_sub_epi16(b4, b6);
45 a7 = _mm256_sub_epi16(b5, b7);
46
47 if (iter == 0) {
48 b0 = _mm256_add_epi16(a0, a4);
49 b7 = _mm256_add_epi16(a1, a5);
50 b3 = _mm256_add_epi16(a2, a6);
51 b4 = _mm256_add_epi16(a3, a7);
52 b2 = _mm256_sub_epi16(a0, a4);
53 b6 = _mm256_sub_epi16(a1, a5);
54 b1 = _mm256_sub_epi16(a2, a6);
55 b5 = _mm256_sub_epi16(a3, a7);
56
57 a0 = _mm256_unpacklo_epi16(b0, b1);
58 a1 = _mm256_unpacklo_epi16(b2, b3);
59 a2 = _mm256_unpackhi_epi16(b0, b1);
60 a3 = _mm256_unpackhi_epi16(b2, b3);
61 a4 = _mm256_unpacklo_epi16(b4, b5);
62 a5 = _mm256_unpacklo_epi16(b6, b7);
63 a6 = _mm256_unpackhi_epi16(b4, b5);
64 a7 = _mm256_unpackhi_epi16(b6, b7);
65
66 b0 = _mm256_unpacklo_epi32(a0, a1);
67 b1 = _mm256_unpacklo_epi32(a4, a5);
68 b2 = _mm256_unpackhi_epi32(a0, a1);
69 b3 = _mm256_unpackhi_epi32(a4, a5);
70 b4 = _mm256_unpacklo_epi32(a2, a3);
71 b5 = _mm256_unpacklo_epi32(a6, a7);
72 b6 = _mm256_unpackhi_epi32(a2, a3);
73 b7 = _mm256_unpackhi_epi32(a6, a7);
74
75 in[0] = _mm256_unpacklo_epi64(b0, b1);
76 in[1] = _mm256_unpackhi_epi64(b0, b1);
77 in[2] = _mm256_unpacklo_epi64(b2, b3);
78 in[3] = _mm256_unpackhi_epi64(b2, b3);
79 in[4] = _mm256_unpacklo_epi64(b4, b5);
80 in[5] = _mm256_unpackhi_epi64(b4, b5);
81 in[6] = _mm256_unpacklo_epi64(b6, b7);
82 in[7] = _mm256_unpackhi_epi64(b6, b7);
83 } else {
84 in[0] = _mm256_add_epi16(a0, a4);
85 in[7] = _mm256_add_epi16(a1, a5);
86 in[3] = _mm256_add_epi16(a2, a6);
87 in[4] = _mm256_add_epi16(a3, a7);
88 in[2] = _mm256_sub_epi16(a0, a4);
89 in[6] = _mm256_sub_epi16(a1, a5);
90 in[1] = _mm256_sub_epi16(a2, a6);
91 in[5] = _mm256_sub_epi16(a3, a7);
92 }
93 }
94
hadamard_8x8x2_avx2(const int16_t * src_diff,ptrdiff_t src_stride,int16_t * coeff)95 static void hadamard_8x8x2_avx2(const int16_t *src_diff, ptrdiff_t src_stride,
96 int16_t *coeff) {
97 __m256i src[8];
98 src[0] = _mm256_loadu_si256((const __m256i *)src_diff);
99 src[1] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
100 src[2] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
101 src[3] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
102 src[4] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
103 src[5] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
104 src[6] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
105 src[7] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
106
107 hadamard_col8x2_avx2(src, 0);
108 hadamard_col8x2_avx2(src, 1);
109
110 _mm256_storeu_si256((__m256i *)coeff,
111 _mm256_permute2x128_si256(src[0], src[1], 0x20));
112 coeff += 16;
113 _mm256_storeu_si256((__m256i *)coeff,
114 _mm256_permute2x128_si256(src[2], src[3], 0x20));
115 coeff += 16;
116 _mm256_storeu_si256((__m256i *)coeff,
117 _mm256_permute2x128_si256(src[4], src[5], 0x20));
118 coeff += 16;
119 _mm256_storeu_si256((__m256i *)coeff,
120 _mm256_permute2x128_si256(src[6], src[7], 0x20));
121 coeff += 16;
122 _mm256_storeu_si256((__m256i *)coeff,
123 _mm256_permute2x128_si256(src[0], src[1], 0x31));
124 coeff += 16;
125 _mm256_storeu_si256((__m256i *)coeff,
126 _mm256_permute2x128_si256(src[2], src[3], 0x31));
127 coeff += 16;
128 _mm256_storeu_si256((__m256i *)coeff,
129 _mm256_permute2x128_si256(src[4], src[5], 0x31));
130 coeff += 16;
131 _mm256_storeu_si256((__m256i *)coeff,
132 _mm256_permute2x128_si256(src[6], src[7], 0x31));
133 }
134
hadamard_16x16_avx2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff,int is_final)135 static INLINE void hadamard_16x16_avx2(const int16_t *src_diff,
136 ptrdiff_t src_stride, tran_low_t *coeff,
137 int is_final) {
138 DECLARE_ALIGNED(32, int16_t, temp_coeff[16 * 16]);
139 int16_t *t_coeff = temp_coeff;
140 int16_t *coeff16 = (int16_t *)coeff;
141 int idx;
142 for (idx = 0; idx < 2; ++idx) {
143 const int16_t *src_ptr = src_diff + idx * 8 * src_stride;
144 hadamard_8x8x2_avx2(src_ptr, src_stride, t_coeff + (idx * 64 * 2));
145 }
146
147 for (idx = 0; idx < 64; idx += 16) {
148 const __m256i coeff0 = _mm256_loadu_si256((const __m256i *)t_coeff);
149 const __m256i coeff1 = _mm256_loadu_si256((const __m256i *)(t_coeff + 64));
150 const __m256i coeff2 = _mm256_loadu_si256((const __m256i *)(t_coeff + 128));
151 const __m256i coeff3 = _mm256_loadu_si256((const __m256i *)(t_coeff + 192));
152
153 __m256i b0 = _mm256_add_epi16(coeff0, coeff1);
154 __m256i b1 = _mm256_sub_epi16(coeff0, coeff1);
155 __m256i b2 = _mm256_add_epi16(coeff2, coeff3);
156 __m256i b3 = _mm256_sub_epi16(coeff2, coeff3);
157
158 b0 = _mm256_srai_epi16(b0, 1);
159 b1 = _mm256_srai_epi16(b1, 1);
160 b2 = _mm256_srai_epi16(b2, 1);
161 b3 = _mm256_srai_epi16(b3, 1);
162 if (is_final) {
163 store_tran_low(_mm256_add_epi16(b0, b2), coeff);
164 store_tran_low(_mm256_add_epi16(b1, b3), coeff + 64);
165 store_tran_low(_mm256_sub_epi16(b0, b2), coeff + 128);
166 store_tran_low(_mm256_sub_epi16(b1, b3), coeff + 192);
167 coeff += 16;
168 } else {
169 _mm256_storeu_si256((__m256i *)coeff16, _mm256_add_epi16(b0, b2));
170 _mm256_storeu_si256((__m256i *)(coeff16 + 64), _mm256_add_epi16(b1, b3));
171 _mm256_storeu_si256((__m256i *)(coeff16 + 128), _mm256_sub_epi16(b0, b2));
172 _mm256_storeu_si256((__m256i *)(coeff16 + 192), _mm256_sub_epi16(b1, b3));
173 coeff16 += 16;
174 }
175 t_coeff += 16;
176 }
177 }
178
aom_hadamard_16x16_avx2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)179 void aom_hadamard_16x16_avx2(const int16_t *src_diff, ptrdiff_t src_stride,
180 tran_low_t *coeff) {
181 hadamard_16x16_avx2(src_diff, src_stride, coeff, 1);
182 }
183
aom_hadamard_32x32_avx2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)184 void aom_hadamard_32x32_avx2(const int16_t *src_diff, ptrdiff_t src_stride,
185 tran_low_t *coeff) {
186 // For high bitdepths, it is unnecessary to store_tran_low
187 // (mult/unpack/store), then load_tran_low (load/pack) the same memory in the
188 // next stage. Output to an intermediate buffer first, then store_tran_low()
189 // in the final stage.
190 DECLARE_ALIGNED(32, int16_t, temp_coeff[32 * 32]);
191 int16_t *t_coeff = temp_coeff;
192 int idx;
193 for (idx = 0; idx < 4; ++idx) {
194 // src_diff: 9 bit, dynamic range [-255, 255]
195 const int16_t *src_ptr =
196 src_diff + (idx >> 1) * 16 * src_stride + (idx & 0x01) * 16;
197 hadamard_16x16_avx2(src_ptr, src_stride,
198 (tran_low_t *)(t_coeff + idx * 256), 0);
199 }
200
201 for (idx = 0; idx < 256; idx += 16) {
202 const __m256i coeff0 = _mm256_loadu_si256((const __m256i *)t_coeff);
203 const __m256i coeff1 = _mm256_loadu_si256((const __m256i *)(t_coeff + 256));
204 const __m256i coeff2 = _mm256_loadu_si256((const __m256i *)(t_coeff + 512));
205 const __m256i coeff3 = _mm256_loadu_si256((const __m256i *)(t_coeff + 768));
206
207 __m256i b0 = _mm256_add_epi16(coeff0, coeff1);
208 __m256i b1 = _mm256_sub_epi16(coeff0, coeff1);
209 __m256i b2 = _mm256_add_epi16(coeff2, coeff3);
210 __m256i b3 = _mm256_sub_epi16(coeff2, coeff3);
211
212 b0 = _mm256_srai_epi16(b0, 2);
213 b1 = _mm256_srai_epi16(b1, 2);
214 b2 = _mm256_srai_epi16(b2, 2);
215 b3 = _mm256_srai_epi16(b3, 2);
216
217 store_tran_low(_mm256_add_epi16(b0, b2), coeff);
218 store_tran_low(_mm256_add_epi16(b1, b3), coeff + 256);
219 store_tran_low(_mm256_sub_epi16(b0, b2), coeff + 512);
220 store_tran_low(_mm256_sub_epi16(b1, b3), coeff + 768);
221
222 coeff += 16;
223 t_coeff += 16;
224 }
225 }
226
aom_satd_avx2(const tran_low_t * coeff,int length)227 int aom_satd_avx2(const tran_low_t *coeff, int length) {
228 const __m256i one = _mm256_set1_epi16(1);
229 __m256i accum = _mm256_setzero_si256();
230 int i;
231
232 for (i = 0; i < length; i += 16) {
233 const __m256i src_line = load_tran_low(coeff);
234 const __m256i abs = _mm256_abs_epi16(src_line);
235 const __m256i sum = _mm256_madd_epi16(abs, one);
236 accum = _mm256_add_epi32(accum, sum);
237 coeff += 16;
238 }
239
240 { // 32 bit horizontal add
241 const __m256i a = _mm256_srli_si256(accum, 8);
242 const __m256i b = _mm256_add_epi32(accum, a);
243 const __m256i c = _mm256_srli_epi64(b, 32);
244 const __m256i d = _mm256_add_epi32(b, c);
245 const __m128i accum_128 = _mm_add_epi32(_mm256_castsi256_si128(d),
246 _mm256_extractf128_si256(d, 1));
247 return _mm_cvtsi128_si32(accum_128);
248 }
249 }
250