1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11 #include <smmintrin.h>
12 #include <immintrin.h>
13
14 #include "config/aom_dsp_rtcd.h"
15
16 #include "aom_ports/mem.h"
17 #include "aom_dsp/x86/synonyms.h"
18 #include "aom_dsp/x86/synonyms_avx2.h"
19
sse_w32_avx2(__m256i * sum,const uint8_t * a,const uint8_t * b)20 static INLINE void sse_w32_avx2(__m256i *sum, const uint8_t *a,
21 const uint8_t *b) {
22 const __m256i v_a0 = yy_loadu_256(a);
23 const __m256i v_b0 = yy_loadu_256(b);
24 const __m256i zero = _mm256_setzero_si256();
25 const __m256i v_a00_w = _mm256_unpacklo_epi8(v_a0, zero);
26 const __m256i v_a01_w = _mm256_unpackhi_epi8(v_a0, zero);
27 const __m256i v_b00_w = _mm256_unpacklo_epi8(v_b0, zero);
28 const __m256i v_b01_w = _mm256_unpackhi_epi8(v_b0, zero);
29 const __m256i v_d00_w = _mm256_sub_epi16(v_a00_w, v_b00_w);
30 const __m256i v_d01_w = _mm256_sub_epi16(v_a01_w, v_b01_w);
31 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d00_w, v_d00_w));
32 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d01_w, v_d01_w));
33 }
34
summary_all_avx2(const __m256i * sum_all)35 static INLINE int64_t summary_all_avx2(const __m256i *sum_all) {
36 int64_t sum;
37 __m256i zero = _mm256_setzero_si256();
38 const __m256i sum0_4x64 = _mm256_unpacklo_epi32(*sum_all, zero);
39 const __m256i sum1_4x64 = _mm256_unpackhi_epi32(*sum_all, zero);
40 const __m256i sum_4x64 = _mm256_add_epi64(sum0_4x64, sum1_4x64);
41 const __m128i sum_2x64 = _mm_add_epi64(_mm256_castsi256_si128(sum_4x64),
42 _mm256_extracti128_si256(sum_4x64, 1));
43 const __m128i sum_1x64 = _mm_add_epi64(sum_2x64, _mm_srli_si128(sum_2x64, 8));
44 xx_storel_64(&sum, sum_1x64);
45 return sum;
46 }
47
48 #if CONFIG_AV1_HIGHBITDEPTH
summary_32_avx2(const __m256i * sum32,__m256i * sum)49 static INLINE void summary_32_avx2(const __m256i *sum32, __m256i *sum) {
50 const __m256i sum0_4x64 =
51 _mm256_cvtepu32_epi64(_mm256_castsi256_si128(*sum32));
52 const __m256i sum1_4x64 =
53 _mm256_cvtepu32_epi64(_mm256_extracti128_si256(*sum32, 1));
54 const __m256i sum_4x64 = _mm256_add_epi64(sum0_4x64, sum1_4x64);
55 *sum = _mm256_add_epi64(*sum, sum_4x64);
56 }
57
summary_4x64_avx2(const __m256i sum_4x64)58 static INLINE int64_t summary_4x64_avx2(const __m256i sum_4x64) {
59 int64_t sum;
60 const __m128i sum_2x64 = _mm_add_epi64(_mm256_castsi256_si128(sum_4x64),
61 _mm256_extracti128_si256(sum_4x64, 1));
62 const __m128i sum_1x64 = _mm_add_epi64(sum_2x64, _mm_srli_si128(sum_2x64, 8));
63
64 xx_storel_64(&sum, sum_1x64);
65 return sum;
66 }
67 #endif
68
sse_w4x4_avx2(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,__m256i * sum)69 static INLINE void sse_w4x4_avx2(const uint8_t *a, int a_stride,
70 const uint8_t *b, int b_stride, __m256i *sum) {
71 const __m128i v_a0 = xx_loadl_32(a);
72 const __m128i v_a1 = xx_loadl_32(a + a_stride);
73 const __m128i v_a2 = xx_loadl_32(a + a_stride * 2);
74 const __m128i v_a3 = xx_loadl_32(a + a_stride * 3);
75 const __m128i v_b0 = xx_loadl_32(b);
76 const __m128i v_b1 = xx_loadl_32(b + b_stride);
77 const __m128i v_b2 = xx_loadl_32(b + b_stride * 2);
78 const __m128i v_b3 = xx_loadl_32(b + b_stride * 3);
79 const __m128i v_a0123 = _mm_unpacklo_epi64(_mm_unpacklo_epi32(v_a0, v_a1),
80 _mm_unpacklo_epi32(v_a2, v_a3));
81 const __m128i v_b0123 = _mm_unpacklo_epi64(_mm_unpacklo_epi32(v_b0, v_b1),
82 _mm_unpacklo_epi32(v_b2, v_b3));
83 const __m256i v_a_w = _mm256_cvtepu8_epi16(v_a0123);
84 const __m256i v_b_w = _mm256_cvtepu8_epi16(v_b0123);
85 const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
86 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
87 }
sse_w8x2_avx2(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,__m256i * sum)88 static INLINE void sse_w8x2_avx2(const uint8_t *a, int a_stride,
89 const uint8_t *b, int b_stride, __m256i *sum) {
90 const __m128i v_a0 = xx_loadl_64(a);
91 const __m128i v_a1 = xx_loadl_64(a + a_stride);
92 const __m128i v_b0 = xx_loadl_64(b);
93 const __m128i v_b1 = xx_loadl_64(b + b_stride);
94 const __m256i v_a_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(v_a0, v_a1));
95 const __m256i v_b_w = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64(v_b0, v_b1));
96 const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
97 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
98 }
aom_sse_avx2(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int width,int height)99 int64_t aom_sse_avx2(const uint8_t *a, int a_stride, const uint8_t *b,
100 int b_stride, int width, int height) {
101 int32_t y = 0;
102 int64_t sse = 0;
103 __m256i sum = _mm256_setzero_si256();
104 __m256i zero = _mm256_setzero_si256();
105 switch (width) {
106 case 4:
107 do {
108 sse_w4x4_avx2(a, a_stride, b, b_stride, &sum);
109 a += a_stride << 2;
110 b += b_stride << 2;
111 y += 4;
112 } while (y < height);
113 sse = summary_all_avx2(&sum);
114 break;
115 case 8:
116 do {
117 sse_w8x2_avx2(a, a_stride, b, b_stride, &sum);
118 a += a_stride << 1;
119 b += b_stride << 1;
120 y += 2;
121 } while (y < height);
122 sse = summary_all_avx2(&sum);
123 break;
124 case 16:
125 do {
126 const __m128i v_a0 = xx_loadu_128(a);
127 const __m128i v_a1 = xx_loadu_128(a + a_stride);
128 const __m128i v_b0 = xx_loadu_128(b);
129 const __m128i v_b1 = xx_loadu_128(b + b_stride);
130 const __m256i v_a =
131 _mm256_insertf128_si256(_mm256_castsi128_si256(v_a0), v_a1, 0x01);
132 const __m256i v_b =
133 _mm256_insertf128_si256(_mm256_castsi128_si256(v_b0), v_b1, 0x01);
134 const __m256i v_al = _mm256_unpacklo_epi8(v_a, zero);
135 const __m256i v_au = _mm256_unpackhi_epi8(v_a, zero);
136 const __m256i v_bl = _mm256_unpacklo_epi8(v_b, zero);
137 const __m256i v_bu = _mm256_unpackhi_epi8(v_b, zero);
138 const __m256i v_asub = _mm256_sub_epi16(v_al, v_bl);
139 const __m256i v_bsub = _mm256_sub_epi16(v_au, v_bu);
140 const __m256i temp =
141 _mm256_add_epi32(_mm256_madd_epi16(v_asub, v_asub),
142 _mm256_madd_epi16(v_bsub, v_bsub));
143 sum = _mm256_add_epi32(sum, temp);
144 a += a_stride << 1;
145 b += b_stride << 1;
146 y += 2;
147 } while (y < height);
148 sse = summary_all_avx2(&sum);
149 break;
150 case 32:
151 do {
152 sse_w32_avx2(&sum, a, b);
153 a += a_stride;
154 b += b_stride;
155 y += 1;
156 } while (y < height);
157 sse = summary_all_avx2(&sum);
158 break;
159 case 64:
160 do {
161 sse_w32_avx2(&sum, a, b);
162 sse_w32_avx2(&sum, a + 32, b + 32);
163 a += a_stride;
164 b += b_stride;
165 y += 1;
166 } while (y < height);
167 sse = summary_all_avx2(&sum);
168 break;
169 case 128:
170 do {
171 sse_w32_avx2(&sum, a, b);
172 sse_w32_avx2(&sum, a + 32, b + 32);
173 sse_w32_avx2(&sum, a + 64, b + 64);
174 sse_w32_avx2(&sum, a + 96, b + 96);
175 a += a_stride;
176 b += b_stride;
177 y += 1;
178 } while (y < height);
179 sse = summary_all_avx2(&sum);
180 break;
181 default:
182 if ((width & 0x07) == 0) {
183 do {
184 int i = 0;
185 do {
186 sse_w8x2_avx2(a + i, a_stride, b + i, b_stride, &sum);
187 i += 8;
188 } while (i < width);
189 a += a_stride << 1;
190 b += b_stride << 1;
191 y += 2;
192 } while (y < height);
193 } else {
194 do {
195 int i = 0;
196 do {
197 sse_w8x2_avx2(a + i, a_stride, b + i, b_stride, &sum);
198 const uint8_t *a2 = a + i + (a_stride << 1);
199 const uint8_t *b2 = b + i + (b_stride << 1);
200 sse_w8x2_avx2(a2, a_stride, b2, b_stride, &sum);
201 i += 8;
202 } while (i + 4 < width);
203 sse_w4x4_avx2(a + i, a_stride, b + i, b_stride, &sum);
204 a += a_stride << 2;
205 b += b_stride << 2;
206 y += 4;
207 } while (y < height);
208 }
209 sse = summary_all_avx2(&sum);
210 break;
211 }
212
213 return sse;
214 }
215
216 #if CONFIG_AV1_HIGHBITDEPTH
highbd_sse_w16_avx2(__m256i * sum,const uint16_t * a,const uint16_t * b)217 static INLINE void highbd_sse_w16_avx2(__m256i *sum, const uint16_t *a,
218 const uint16_t *b) {
219 const __m256i v_a_w = yy_loadu_256(a);
220 const __m256i v_b_w = yy_loadu_256(b);
221 const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
222 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
223 }
224
highbd_sse_w4x4_avx2(__m256i * sum,const uint16_t * a,int a_stride,const uint16_t * b,int b_stride)225 static INLINE void highbd_sse_w4x4_avx2(__m256i *sum, const uint16_t *a,
226 int a_stride, const uint16_t *b,
227 int b_stride) {
228 const __m128i v_a0 = xx_loadl_64(a);
229 const __m128i v_a1 = xx_loadl_64(a + a_stride);
230 const __m128i v_a2 = xx_loadl_64(a + a_stride * 2);
231 const __m128i v_a3 = xx_loadl_64(a + a_stride * 3);
232 const __m128i v_b0 = xx_loadl_64(b);
233 const __m128i v_b1 = xx_loadl_64(b + b_stride);
234 const __m128i v_b2 = xx_loadl_64(b + b_stride * 2);
235 const __m128i v_b3 = xx_loadl_64(b + b_stride * 3);
236 const __m256i v_a_w = yy_set_m128i(_mm_unpacklo_epi64(v_a0, v_a1),
237 _mm_unpacklo_epi64(v_a2, v_a3));
238 const __m256i v_b_w = yy_set_m128i(_mm_unpacklo_epi64(v_b0, v_b1),
239 _mm_unpacklo_epi64(v_b2, v_b3));
240 const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
241 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
242 }
243
highbd_sse_w8x2_avx2(__m256i * sum,const uint16_t * a,int a_stride,const uint16_t * b,int b_stride)244 static INLINE void highbd_sse_w8x2_avx2(__m256i *sum, const uint16_t *a,
245 int a_stride, const uint16_t *b,
246 int b_stride) {
247 const __m256i v_a_w = yy_loadu2_128(a + a_stride, a);
248 const __m256i v_b_w = yy_loadu2_128(b + b_stride, b);
249 const __m256i v_d_w = _mm256_sub_epi16(v_a_w, v_b_w);
250 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(v_d_w, v_d_w));
251 }
aom_highbd_sse_avx2(const uint8_t * a8,int a_stride,const uint8_t * b8,int b_stride,int width,int height)252 int64_t aom_highbd_sse_avx2(const uint8_t *a8, int a_stride, const uint8_t *b8,
253 int b_stride, int width, int height) {
254 int32_t y = 0;
255 int64_t sse = 0;
256 uint16_t *a = CONVERT_TO_SHORTPTR(a8);
257 uint16_t *b = CONVERT_TO_SHORTPTR(b8);
258 __m256i sum = _mm256_setzero_si256();
259 switch (width) {
260 case 4:
261 do {
262 highbd_sse_w4x4_avx2(&sum, a, a_stride, b, b_stride);
263 a += a_stride << 2;
264 b += b_stride << 2;
265 y += 4;
266 } while (y < height);
267 sse = summary_all_avx2(&sum);
268 break;
269 case 8:
270 do {
271 highbd_sse_w8x2_avx2(&sum, a, a_stride, b, b_stride);
272 a += a_stride << 1;
273 b += b_stride << 1;
274 y += 2;
275 } while (y < height);
276 sse = summary_all_avx2(&sum);
277 break;
278 case 16:
279 do {
280 highbd_sse_w16_avx2(&sum, a, b);
281 a += a_stride;
282 b += b_stride;
283 y += 1;
284 } while (y < height);
285 sse = summary_all_avx2(&sum);
286 break;
287 case 32:
288 do {
289 int l = 0;
290 __m256i sum32 = _mm256_setzero_si256();
291 do {
292 highbd_sse_w16_avx2(&sum32, a, b);
293 highbd_sse_w16_avx2(&sum32, a + 16, b + 16);
294 a += a_stride;
295 b += b_stride;
296 l += 1;
297 } while (l < 64 && l < (height - y));
298 summary_32_avx2(&sum32, &sum);
299 y += 64;
300 } while (y < height);
301 sse = summary_4x64_avx2(sum);
302 break;
303 case 64:
304 do {
305 int l = 0;
306 __m256i sum32 = _mm256_setzero_si256();
307 do {
308 highbd_sse_w16_avx2(&sum32, a, b);
309 highbd_sse_w16_avx2(&sum32, a + 16 * 1, b + 16 * 1);
310 highbd_sse_w16_avx2(&sum32, a + 16 * 2, b + 16 * 2);
311 highbd_sse_w16_avx2(&sum32, a + 16 * 3, b + 16 * 3);
312 a += a_stride;
313 b += b_stride;
314 l += 1;
315 } while (l < 32 && l < (height - y));
316 summary_32_avx2(&sum32, &sum);
317 y += 32;
318 } while (y < height);
319 sse = summary_4x64_avx2(sum);
320 break;
321 case 128:
322 do {
323 int l = 0;
324 __m256i sum32 = _mm256_setzero_si256();
325 do {
326 highbd_sse_w16_avx2(&sum32, a, b);
327 highbd_sse_w16_avx2(&sum32, a + 16 * 1, b + 16 * 1);
328 highbd_sse_w16_avx2(&sum32, a + 16 * 2, b + 16 * 2);
329 highbd_sse_w16_avx2(&sum32, a + 16 * 3, b + 16 * 3);
330 highbd_sse_w16_avx2(&sum32, a + 16 * 4, b + 16 * 4);
331 highbd_sse_w16_avx2(&sum32, a + 16 * 5, b + 16 * 5);
332 highbd_sse_w16_avx2(&sum32, a + 16 * 6, b + 16 * 6);
333 highbd_sse_w16_avx2(&sum32, a + 16 * 7, b + 16 * 7);
334 a += a_stride;
335 b += b_stride;
336 l += 1;
337 } while (l < 16 && l < (height - y));
338 summary_32_avx2(&sum32, &sum);
339 y += 16;
340 } while (y < height);
341 sse = summary_4x64_avx2(sum);
342 break;
343 default:
344 if (width & 0x7) {
345 do {
346 int i = 0;
347 __m256i sum32 = _mm256_setzero_si256();
348 do {
349 highbd_sse_w8x2_avx2(&sum32, a + i, a_stride, b + i, b_stride);
350 const uint16_t *a2 = a + i + (a_stride << 1);
351 const uint16_t *b2 = b + i + (b_stride << 1);
352 highbd_sse_w8x2_avx2(&sum32, a2, a_stride, b2, b_stride);
353 i += 8;
354 } while (i + 4 < width);
355 highbd_sse_w4x4_avx2(&sum32, a + i, a_stride, b + i, b_stride);
356 summary_32_avx2(&sum32, &sum);
357 a += a_stride << 2;
358 b += b_stride << 2;
359 y += 4;
360 } while (y < height);
361 } else {
362 do {
363 int l = 0;
364 __m256i sum32 = _mm256_setzero_si256();
365 do {
366 int i = 0;
367 do {
368 highbd_sse_w8x2_avx2(&sum32, a + i, a_stride, b + i, b_stride);
369 i += 8;
370 } while (i < width);
371 a += a_stride << 1;
372 b += b_stride << 1;
373 l += 2;
374 } while (l < 8 && l < (height - y));
375 summary_32_avx2(&sum32, &sum);
376 y += 8;
377 } while (y < height);
378 }
379 sse = summary_4x64_avx2(sum);
380 break;
381 }
382 return sse;
383 }
384 #endif // CONFIG_AV1_HIGHBITDEPTH
385