1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <immintrin.h>
13
14 #include "config/aom_dsp_rtcd.h"
15
sad32x32(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride)16 static unsigned int sad32x32(const uint8_t *src_ptr, int src_stride,
17 const uint8_t *ref_ptr, int ref_stride) {
18 __m256i s1, s2, r1, r2;
19 __m256i sum = _mm256_setzero_si256();
20 __m128i sum_i128;
21 int i;
22
23 for (i = 0; i < 16; ++i) {
24 r1 = _mm256_loadu_si256((__m256i const *)ref_ptr);
25 r2 = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride));
26 s1 = _mm256_sad_epu8(r1, _mm256_loadu_si256((__m256i const *)src_ptr));
27 s2 = _mm256_sad_epu8(
28 r2, _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride)));
29 sum = _mm256_add_epi32(sum, _mm256_add_epi32(s1, s2));
30 ref_ptr += ref_stride << 1;
31 src_ptr += src_stride << 1;
32 }
33
34 sum = _mm256_add_epi32(sum, _mm256_srli_si256(sum, 8));
35 sum_i128 = _mm_add_epi32(_mm256_extracti128_si256(sum, 1),
36 _mm256_castsi256_si128(sum));
37 return _mm_cvtsi128_si32(sum_i128);
38 }
39
sad64x32(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride)40 static unsigned int sad64x32(const uint8_t *src_ptr, int src_stride,
41 const uint8_t *ref_ptr, int ref_stride) {
42 unsigned int half_width = 32;
43 uint32_t sum = sad32x32(src_ptr, src_stride, ref_ptr, ref_stride);
44 src_ptr += half_width;
45 ref_ptr += half_width;
46 sum += sad32x32(src_ptr, src_stride, ref_ptr, ref_stride);
47 return sum;
48 }
49
sad64x64(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride)50 static unsigned int sad64x64(const uint8_t *src_ptr, int src_stride,
51 const uint8_t *ref_ptr, int ref_stride) {
52 uint32_t sum = sad64x32(src_ptr, src_stride, ref_ptr, ref_stride);
53 src_ptr += src_stride << 5;
54 ref_ptr += ref_stride << 5;
55 sum += sad64x32(src_ptr, src_stride, ref_ptr, ref_stride);
56 return sum;
57 }
58
aom_sad128x64_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride)59 unsigned int aom_sad128x64_avx2(const uint8_t *src_ptr, int src_stride,
60 const uint8_t *ref_ptr, int ref_stride) {
61 unsigned int half_width = 64;
62 uint32_t sum = sad64x64(src_ptr, src_stride, ref_ptr, ref_stride);
63 src_ptr += half_width;
64 ref_ptr += half_width;
65 sum += sad64x64(src_ptr, src_stride, ref_ptr, ref_stride);
66 return sum;
67 }
68
aom_sad64x128_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride)69 unsigned int aom_sad64x128_avx2(const uint8_t *src_ptr, int src_stride,
70 const uint8_t *ref_ptr, int ref_stride) {
71 uint32_t sum = sad64x64(src_ptr, src_stride, ref_ptr, ref_stride);
72 src_ptr += src_stride << 6;
73 ref_ptr += ref_stride << 6;
74 sum += sad64x64(src_ptr, src_stride, ref_ptr, ref_stride);
75 return sum;
76 }
77
aom_sad128x128_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride)78 unsigned int aom_sad128x128_avx2(const uint8_t *src_ptr, int src_stride,
79 const uint8_t *ref_ptr, int ref_stride) {
80 uint32_t sum = aom_sad128x64_avx2(src_ptr, src_stride, ref_ptr, ref_stride);
81 src_ptr += src_stride << 6;
82 ref_ptr += ref_stride << 6;
83 sum += aom_sad128x64_avx2(src_ptr, src_stride, ref_ptr, ref_stride);
84 return sum;
85 }
86
sad_w64_avg_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,const int h,const uint8_t * second_pred,const int second_pred_stride)87 static unsigned int sad_w64_avg_avx2(const uint8_t *src_ptr, int src_stride,
88 const uint8_t *ref_ptr, int ref_stride,
89 const int h, const uint8_t *second_pred,
90 const int second_pred_stride) {
91 int i, res;
92 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg;
93 __m256i sum_sad = _mm256_setzero_si256();
94 __m256i sum_sad_h;
95 __m128i sum_sad128;
96 for (i = 0; i < h; i++) {
97 ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr);
98 ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32));
99 ref1_reg = _mm256_avg_epu8(
100 ref1_reg, _mm256_loadu_si256((__m256i const *)second_pred));
101 ref2_reg = _mm256_avg_epu8(
102 ref2_reg, _mm256_loadu_si256((__m256i const *)(second_pred + 32)));
103 sad1_reg =
104 _mm256_sad_epu8(ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr));
105 sad2_reg = _mm256_sad_epu8(
106 ref2_reg, _mm256_loadu_si256((__m256i const *)(src_ptr + 32)));
107 sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg));
108 ref_ptr += ref_stride;
109 src_ptr += src_stride;
110 second_pred += second_pred_stride;
111 }
112 sum_sad_h = _mm256_srli_si256(sum_sad, 8);
113 sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h);
114 sum_sad128 = _mm256_extracti128_si256(sum_sad, 1);
115 sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128);
116 res = _mm_cvtsi128_si32(sum_sad128);
117
118 return res;
119 }
120
aom_sad64x128_avg_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,const uint8_t * second_pred)121 unsigned int aom_sad64x128_avg_avx2(const uint8_t *src_ptr, int src_stride,
122 const uint8_t *ref_ptr, int ref_stride,
123 const uint8_t *second_pred) {
124 uint32_t sum = sad_w64_avg_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 64,
125 second_pred, 64);
126 src_ptr += src_stride << 6;
127 ref_ptr += ref_stride << 6;
128 second_pred += 64 << 6;
129 sum += sad_w64_avg_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 64,
130 second_pred, 64);
131 return sum;
132 }
133
aom_sad128x64_avg_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,const uint8_t * second_pred)134 unsigned int aom_sad128x64_avg_avx2(const uint8_t *src_ptr, int src_stride,
135 const uint8_t *ref_ptr, int ref_stride,
136 const uint8_t *second_pred) {
137 unsigned int half_width = 64;
138 uint32_t sum = sad_w64_avg_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 64,
139 second_pred, 128);
140 src_ptr += half_width;
141 ref_ptr += half_width;
142 second_pred += half_width;
143 sum += sad_w64_avg_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 64,
144 second_pred, 128);
145 return sum;
146 }
147
aom_sad128x128_avg_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,const uint8_t * second_pred)148 unsigned int aom_sad128x128_avg_avx2(const uint8_t *src_ptr, int src_stride,
149 const uint8_t *ref_ptr, int ref_stride,
150 const uint8_t *second_pred) {
151 uint32_t sum = aom_sad128x64_avg_avx2(src_ptr, src_stride, ref_ptr,
152 ref_stride, second_pred);
153 src_ptr += src_stride << 6;
154 ref_ptr += ref_stride << 6;
155 second_pred += 128 << 6;
156 sum += aom_sad128x64_avg_avx2(src_ptr, src_stride, ref_ptr, ref_stride,
157 second_pred);
158 return sum;
159 }
160