1 /*
2 * Copyright (c) 2022 Samsung Electronics Co., Ltd.
3 * All Rights Reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * - Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * - Neither the name of the copyright owner, nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "oapv_sad_avx.h"
33
34 #if X86_SSE
35
36 /* SAD ***********************************************************************/
sad_16b_avx_8x8(int w,int h,void * src1,void * src2,int s_src1,int s_src2)37 static int sad_16b_avx_8x8(int w, int h, void* src1, void* src2, int s_src1, int s_src2)
38 {
39 s16* s1 = (s16*)src1;
40 s16* s2 = (s16*)src2;
41 __m256i zero_vector = _mm256_setzero_si256();
42 __m256i s1_vector, s2_vector, diff_vector, diff_abs1, diff_abs2;
43 // Because we are working with 16 elements at a time, stride is multiplied by 2.
44 s16 s1_stride = 2 * s_src1;
45 s16 s2_stride = 2 * s_src2;
46 { // Row 0 and Row 1
47 // Load Row 0 and Row 1 data into registers.
48 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
49 s1 += s1_stride;
50 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
51 s2 += s2_stride;
52 // Calculate absolute difference between two rows.
53 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
54 diff_abs1 = _mm256_abs_epi16(diff_vector);
55 }
56 { // Row 2 and Row 3
57 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
58 s1 += s1_stride;
59 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
60 s2 += s2_stride;
61 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
62 diff_abs2 = _mm256_abs_epi16(diff_vector);
63 }
64 // Add absolute differences to running total.
65 __m256i sum = _mm256_add_epi16(diff_abs1, diff_abs2);
66 { // Row 4 and Row 5
67 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
68 s1 += s1_stride;
69 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
70 s2 += s2_stride;
71 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
72 diff_abs2 = _mm256_abs_epi16(diff_vector);
73 sum = _mm256_add_epi16(sum, diff_abs2);
74 }
75 { // Row 6 and Row 7
76 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
77 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
78 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
79 diff_abs2 = _mm256_abs_epi16(diff_vector);
80 sum = _mm256_add_epi16(sum, diff_abs2);
81 }
82 // Convert 16-bit integers to 32-bit integers for summation.
83 __m128i sum_low = _mm256_extracti128_si256(sum, 0);
84 __m128i sum_high = _mm256_extracti128_si256(sum, 1);
85 __m256i sum_low_32 = _mm256_cvtepi16_epi32(sum_low);
86 __m256i sum_high_32 = _mm256_cvtepi16_epi32(sum_high);
87 // Sum up all the values in the array to get final SAD value.
88 sum = _mm256_add_epi32(sum_low_32, sum_high_32);
89 __m256i sum_hadd = _mm256_hadd_epi32(sum, zero_vector); // Horizontal add with zeros
90 sum = _mm256_hadd_epi32(sum_hadd, zero_vector); // Horizontal add with zeros
91 int sum1 = _mm256_extract_epi32(sum, 0);
92 int sum2 = _mm256_extract_epi32(sum, 4);
93 int sad = sum1 + sum2;
94 return sad;
95 }
96
97 const oapv_fn_sad_t oapv_tbl_fn_sad_16b_avx[2] =
98 {
99 sad_16b_avx_8x8,
100 NULL
101 };
102
103 /* SSD ***********************************************************************/
ssd_16b_avx_8x8(int w,int h,void * src1,void * src2,int s_src1,int s_src2)104 static s64 ssd_16b_avx_8x8(int w, int h, void* src1, void* src2, int s_src1, int s_src2)
105 {
106 s16* s1 = (s16*)src1;
107 s16* s2 = (s16*)src2;
108 __m256i s1_vector, s2_vector, diff_vector, sq_vector1, sq_vector2;
109 s64 sum_arr[4];
110 // Because we are working with 16 elements at a time, stride is multiplied by 2.
111 s16 s1_stride = 2 * s_src1;
112 s16 s2_stride = 2 * s_src2;
113 s64 ssd = 0;
114 { // Row 0 and Row 1
115 // Load Row 0 and Row 1 data into registers.
116 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
117 s1 += s1_stride;
118 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
119 s2 += s2_stride;
120 // Calculate squared difference between two rows.
121 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
122 sq_vector1 = _mm256_madd_epi16(diff_vector, diff_vector);
123 }
124 { // Row 2 and Row 3
125 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
126 s1 += s1_stride;
127 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
128 s2 += s2_stride;
129 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
130 sq_vector2 = _mm256_madd_epi16(diff_vector, diff_vector);
131 }
132 // Add squared differences to running total.
133 __m256i sum = _mm256_add_epi32(sq_vector1, sq_vector2);
134 { // Row 4 and Row 5
135 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
136 s1 += s1_stride;
137 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
138 s2 += s2_stride;
139 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
140 sq_vector2 = _mm256_madd_epi16(diff_vector, diff_vector);
141 sum = _mm256_add_epi32(sum, sq_vector2);
142 }
143 { // Row 6 and Row 7
144 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
145 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
146 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
147 sq_vector2 = _mm256_madd_epi16(diff_vector, diff_vector);
148 sum = _mm256_add_epi32(sum, sq_vector2);
149 }
150 // Convert 16-bit integers to 32-bit integers for summation.
151 __m128i sum_low = _mm256_extracti128_si256(sum, 0);
152 __m128i sum_high = _mm256_extracti128_si256(sum, 1);
153 __m256i sum_low_64 = _mm256_cvtepi32_epi64(sum_low);
154 __m256i sum_high_64 = _mm256_cvtepi32_epi64(sum_high);
155 // Sum up all the values in the array to get final SSD value.
156 sum = _mm256_add_epi64(sum_low_64, sum_high_64);
157 _mm256_storeu_si256((__m256i*)sum_arr, sum); // store in array for summation.
158 ssd = sum_arr[0] + sum_arr[1] + sum_arr[2] + sum_arr[3];
159 return ssd;
160 }
161
162 const oapv_fn_ssd_t oapv_tbl_fn_ssd_16b_avx[2] =
163 {
164 ssd_16b_avx_8x8,
165 NULL
166 };
167
168 /* DIFF ***********************************************************************/
diff_16b_avx_8x8(int w,int h,void * src1,void * src2,int s_src1,int s_src2,int s_diff,s16 * diff)169 static void diff_16b_avx_8x8(int w, int h, void* src1, void* src2, int s_src1, int s_src2, int s_diff, s16 *diff)
170 {
171 s16* s1 = (s16*)src1;
172 s16* s2 = (s16*)src2;
173 __m256i s1_vector, s2_vector, diff_vector;
174 // Because we are working with 16 elements at a time, stride is multiplied by 2.
175 s16 s1_stride = 2 * s_src1;
176 s16 s2_stride = 2 * s_src2;
177 s16 diff_stride = 2 * s_diff;
178 { // Row 0 and Row 1
179 // Load Row 0 and Row 1 data into registers.
180 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
181 s1 += s1_stride;
182 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
183 s2 += s2_stride;
184 // Calculate difference between two rows and store it in diff buffer.
185 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
186 _mm256_storeu_si256((__m256i*)diff, diff_vector);
187 diff += diff_stride;
188 }
189 { // Row 2 and Row 3
190 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
191 s1 += s1_stride;
192 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
193 s2 += s2_stride;
194 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
195 _mm256_storeu_si256((__m256i*)diff, diff_vector);
196 diff += diff_stride;
197 }
198 { // Row 4 and Row 5
199 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
200 s1 += s1_stride;
201 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
202 s2 += s2_stride;
203 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
204 _mm256_storeu_si256((__m256i*)diff, diff_vector);
205 diff += diff_stride;
206 }
207 { // Row 6 and Row 7
208 s1_vector = _mm256_loadu_si256((const __m256i*)(s1));
209 s2_vector = _mm256_loadu_si256((const __m256i*)(s2));
210 diff_vector = _mm256_sub_epi16(s1_vector, s2_vector);
211 _mm256_storeu_si256((__m256i*)diff, diff_vector);
212 }
213 }
214
215 const oapv_fn_diff_t oapv_tbl_fn_diff_16b_avx[2] =
216 {
217 diff_16b_avx_8x8,
218 NULL
219 };
220 #endif