1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <smmintrin.h>
13 #include <assert.h>
14
15 #include "config/aom_dsp_rtcd.h"
16
17 #include "aom_dsp/x86/convolve_sse2.h"
18 #include "aom_dsp/x86/convolve_sse4_1.h"
19
av1_highbd_dist_wtd_convolve_y_sse4_1(const uint16_t * src,int src_stride,uint16_t * dst0,int dst_stride0,int w,int h,const InterpFilterParams * filter_params_y,const int subpel_y_qn,ConvolveParams * conv_params,int bd)20 void av1_highbd_dist_wtd_convolve_y_sse4_1(
21 const uint16_t *src, int src_stride, uint16_t *dst0, int dst_stride0, int w,
22 int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn,
23 ConvolveParams *conv_params, int bd) {
24 CONV_BUF_TYPE *dst = conv_params->dst;
25 int dst_stride = conv_params->dst_stride;
26 const int fo_vert = filter_params_y->taps / 2 - 1;
27 const uint16_t *const src_ptr = src - fo_vert * src_stride;
28 const int bits = FILTER_BITS - conv_params->round_0;
29
30 assert(bits >= 0);
31 int i, j;
32 const int do_average = conv_params->do_average;
33 const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
34
35 const int w0 = conv_params->fwd_offset;
36 const int w1 = conv_params->bck_offset;
37 const __m128i wt0 = _mm_set1_epi32(w0);
38 const __m128i wt1 = _mm_set1_epi32(w1);
39 const __m128i round_const_y =
40 _mm_set1_epi32(((1 << conv_params->round_1) >> 1));
41 const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1);
42 const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
43
44 const int offset_0 =
45 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
46 const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
47 const __m128i offset_const = _mm_set1_epi32(offset);
48 const int rounding_shift =
49 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
50 const __m128i rounding_const = _mm_set1_epi32((1 << rounding_shift) >> 1);
51 const __m128i clip_pixel_to_bd =
52 _mm_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
53 const __m128i zero = _mm_setzero_si128();
54 __m128i s[16], coeffs_y[4];
55
56 prepare_coeffs(filter_params_y, subpel_y_qn, coeffs_y);
57
58 for (j = 0; j < w; j += 8) {
59 const uint16_t *data = &src_ptr[j];
60 /* Vertical filter */
61 {
62 __m128i s0 = _mm_loadu_si128((__m128i *)(data + 0 * src_stride));
63 __m128i s1 = _mm_loadu_si128((__m128i *)(data + 1 * src_stride));
64 __m128i s2 = _mm_loadu_si128((__m128i *)(data + 2 * src_stride));
65 __m128i s3 = _mm_loadu_si128((__m128i *)(data + 3 * src_stride));
66 __m128i s4 = _mm_loadu_si128((__m128i *)(data + 4 * src_stride));
67 __m128i s5 = _mm_loadu_si128((__m128i *)(data + 5 * src_stride));
68 __m128i s6 = _mm_loadu_si128((__m128i *)(data + 6 * src_stride));
69
70 s[0] = _mm_unpacklo_epi16(s0, s1);
71 s[1] = _mm_unpacklo_epi16(s2, s3);
72 s[2] = _mm_unpacklo_epi16(s4, s5);
73
74 s[4] = _mm_unpackhi_epi16(s0, s1);
75 s[5] = _mm_unpackhi_epi16(s2, s3);
76 s[6] = _mm_unpackhi_epi16(s4, s5);
77
78 s[0 + 8] = _mm_unpacklo_epi16(s1, s2);
79 s[1 + 8] = _mm_unpacklo_epi16(s3, s4);
80 s[2 + 8] = _mm_unpacklo_epi16(s5, s6);
81
82 s[4 + 8] = _mm_unpackhi_epi16(s1, s2);
83 s[5 + 8] = _mm_unpackhi_epi16(s3, s4);
84 s[6 + 8] = _mm_unpackhi_epi16(s5, s6);
85
86 for (i = 0; i < h; i += 2) {
87 data = &src_ptr[i * src_stride + j];
88
89 __m128i s7 = _mm_loadu_si128((__m128i *)(data + 7 * src_stride));
90 __m128i s8 = _mm_loadu_si128((__m128i *)(data + 8 * src_stride));
91
92 s[3] = _mm_unpacklo_epi16(s6, s7);
93 s[7] = _mm_unpackhi_epi16(s6, s7);
94
95 s[3 + 8] = _mm_unpacklo_epi16(s7, s8);
96 s[7 + 8] = _mm_unpackhi_epi16(s7, s8);
97
98 const __m128i res_a0 = convolve(s, coeffs_y);
99 __m128i res_a_round0 = _mm_sll_epi32(res_a0, round_shift_bits);
100 res_a_round0 = _mm_sra_epi32(_mm_add_epi32(res_a_round0, round_const_y),
101 round_shift_y);
102
103 const __m128i res_a1 = convolve(s + 8, coeffs_y);
104 __m128i res_a_round1 = _mm_sll_epi32(res_a1, round_shift_bits);
105 res_a_round1 = _mm_sra_epi32(_mm_add_epi32(res_a_round1, round_const_y),
106 round_shift_y);
107
108 __m128i res_unsigned_lo_0 = _mm_add_epi32(res_a_round0, offset_const);
109 __m128i res_unsigned_lo_1 = _mm_add_epi32(res_a_round1, offset_const);
110
111 if (w - j < 8) {
112 if (do_average) {
113 const __m128i data_0 =
114 _mm_loadl_epi64((__m128i *)(&dst[i * dst_stride + j]));
115 const __m128i data_1 = _mm_loadl_epi64(
116 (__m128i *)(&dst[i * dst_stride + j + dst_stride]));
117
118 const __m128i data_ref_0 = _mm_unpacklo_epi16(data_0, zero);
119 const __m128i data_ref_1 = _mm_unpacklo_epi16(data_1, zero);
120
121 const __m128i comp_avg_res_0 =
122 highbd_comp_avg_sse4_1(&data_ref_0, &res_unsigned_lo_0, &wt0,
123 &wt1, use_dist_wtd_comp_avg);
124 const __m128i comp_avg_res_1 =
125 highbd_comp_avg_sse4_1(&data_ref_1, &res_unsigned_lo_1, &wt0,
126 &wt1, use_dist_wtd_comp_avg);
127
128 const __m128i round_result_0 =
129 highbd_convolve_rounding_sse2(&comp_avg_res_0, &offset_const,
130 &rounding_const, rounding_shift);
131 const __m128i round_result_1 =
132 highbd_convolve_rounding_sse2(&comp_avg_res_1, &offset_const,
133 &rounding_const, rounding_shift);
134
135 const __m128i res_16b_0 =
136 _mm_packus_epi32(round_result_0, round_result_0);
137 const __m128i res_clip_0 =
138 _mm_min_epi16(res_16b_0, clip_pixel_to_bd);
139 const __m128i res_16b_1 =
140 _mm_packus_epi32(round_result_1, round_result_1);
141 const __m128i res_clip_1 =
142 _mm_min_epi16(res_16b_1, clip_pixel_to_bd);
143
144 _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]),
145 res_clip_0);
146 _mm_storel_epi64(
147 (__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]),
148 res_clip_1);
149
150 } else {
151 __m128i res_16b_0 =
152 _mm_packus_epi32(res_unsigned_lo_0, res_unsigned_lo_0);
153
154 __m128i res_16b_1 =
155 _mm_packus_epi32(res_unsigned_lo_1, res_unsigned_lo_1);
156
157 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_16b_0);
158 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
159 res_16b_1);
160 }
161 } else {
162 const __m128i res_b0 = convolve(s + 4, coeffs_y);
163 __m128i res_b_round0 = _mm_sll_epi32(res_b0, round_shift_bits);
164 res_b_round0 = _mm_sra_epi32(
165 _mm_add_epi32(res_b_round0, round_const_y), round_shift_y);
166
167 const __m128i res_b1 = convolve(s + 4 + 8, coeffs_y);
168 __m128i res_b_round1 = _mm_sll_epi32(res_b1, round_shift_bits);
169 res_b_round1 = _mm_sra_epi32(
170 _mm_add_epi32(res_b_round1, round_const_y), round_shift_y);
171
172 __m128i res_unsigned_hi_0 = _mm_add_epi32(res_b_round0, offset_const);
173 __m128i res_unsigned_hi_1 = _mm_add_epi32(res_b_round1, offset_const);
174
175 if (do_average) {
176 const __m128i data_0 =
177 _mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j]));
178 const __m128i data_1 = _mm_loadu_si128(
179 (__m128i *)(&dst[i * dst_stride + j + dst_stride]));
180 const __m128i data_ref_0_lo_0 = _mm_unpacklo_epi16(data_0, zero);
181 const __m128i data_ref_0_lo_1 = _mm_unpacklo_epi16(data_1, zero);
182
183 const __m128i data_ref_0_hi_0 = _mm_unpackhi_epi16(data_0, zero);
184 const __m128i data_ref_0_hi_1 = _mm_unpackhi_epi16(data_1, zero);
185
186 const __m128i comp_avg_res_lo_0 =
187 highbd_comp_avg_sse4_1(&data_ref_0_lo_0, &res_unsigned_lo_0,
188 &wt0, &wt1, use_dist_wtd_comp_avg);
189 const __m128i comp_avg_res_lo_1 =
190 highbd_comp_avg_sse4_1(&data_ref_0_lo_1, &res_unsigned_lo_1,
191 &wt0, &wt1, use_dist_wtd_comp_avg);
192 const __m128i comp_avg_res_hi_0 =
193 highbd_comp_avg_sse4_1(&data_ref_0_hi_0, &res_unsigned_hi_0,
194 &wt0, &wt1, use_dist_wtd_comp_avg);
195 const __m128i comp_avg_res_hi_1 =
196 highbd_comp_avg_sse4_1(&data_ref_0_hi_1, &res_unsigned_hi_1,
197 &wt0, &wt1, use_dist_wtd_comp_avg);
198
199 const __m128i round_result_lo_0 =
200 highbd_convolve_rounding_sse2(&comp_avg_res_lo_0, &offset_const,
201 &rounding_const, rounding_shift);
202 const __m128i round_result_lo_1 =
203 highbd_convolve_rounding_sse2(&comp_avg_res_lo_1, &offset_const,
204 &rounding_const, rounding_shift);
205 const __m128i round_result_hi_0 =
206 highbd_convolve_rounding_sse2(&comp_avg_res_hi_0, &offset_const,
207 &rounding_const, rounding_shift);
208 const __m128i round_result_hi_1 =
209 highbd_convolve_rounding_sse2(&comp_avg_res_hi_1, &offset_const,
210 &rounding_const, rounding_shift);
211
212 const __m128i res_16b_0 =
213 _mm_packus_epi32(round_result_lo_0, round_result_hi_0);
214 const __m128i res_clip_0 =
215 _mm_min_epi16(res_16b_0, clip_pixel_to_bd);
216
217 const __m128i res_16b_1 =
218 _mm_packus_epi32(round_result_lo_1, round_result_hi_1);
219 const __m128i res_clip_1 =
220 _mm_min_epi16(res_16b_1, clip_pixel_to_bd);
221
222 _mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j]),
223 res_clip_0);
224 _mm_store_si128(
225 (__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]),
226 res_clip_1);
227 } else {
228 __m128i res_16bit0 =
229 _mm_packus_epi32(res_unsigned_lo_0, res_unsigned_hi_0);
230 __m128i res_16bit1 =
231 _mm_packus_epi32(res_unsigned_lo_1, res_unsigned_hi_1);
232 _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_16bit0);
233 _mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
234 res_16bit1);
235 }
236 }
237 s[0] = s[1];
238 s[1] = s[2];
239 s[2] = s[3];
240
241 s[4] = s[5];
242 s[5] = s[6];
243 s[6] = s[7];
244
245 s[0 + 8] = s[1 + 8];
246 s[1 + 8] = s[2 + 8];
247 s[2 + 8] = s[3 + 8];
248
249 s[4 + 8] = s[5 + 8];
250 s[5 + 8] = s[6 + 8];
251 s[6 + 8] = s[7 + 8];
252
253 s6 = s8;
254 }
255 }
256 }
257 }
258
av1_highbd_dist_wtd_convolve_x_sse4_1(const uint16_t * src,int src_stride,uint16_t * dst0,int dst_stride0,int w,int h,const InterpFilterParams * filter_params_x,const int subpel_x_qn,ConvolveParams * conv_params,int bd)259 void av1_highbd_dist_wtd_convolve_x_sse4_1(
260 const uint16_t *src, int src_stride, uint16_t *dst0, int dst_stride0, int w,
261 int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
262 ConvolveParams *conv_params, int bd) {
263 CONV_BUF_TYPE *dst = conv_params->dst;
264 int dst_stride = conv_params->dst_stride;
265 const int fo_horiz = filter_params_x->taps / 2 - 1;
266 const uint16_t *const src_ptr = src - fo_horiz;
267 const int bits = FILTER_BITS - conv_params->round_1;
268
269 int i, j;
270 __m128i s[4], coeffs_x[4];
271
272 const int do_average = conv_params->do_average;
273 const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
274 const int w0 = conv_params->fwd_offset;
275 const int w1 = conv_params->bck_offset;
276 const __m128i wt0 = _mm_set1_epi32(w0);
277 const __m128i wt1 = _mm_set1_epi32(w1);
278 const __m128i zero = _mm_setzero_si128();
279
280 const __m128i round_const_x =
281 _mm_set1_epi32(((1 << conv_params->round_0) >> 1));
282 const __m128i round_shift_x = _mm_cvtsi32_si128(conv_params->round_0);
283 const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
284
285 const int offset_0 =
286 bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
287 const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
288 const __m128i offset_const = _mm_set1_epi32(offset);
289 const int rounding_shift =
290 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
291 const __m128i rounding_const = _mm_set1_epi32((1 << rounding_shift) >> 1);
292 const __m128i clip_pixel_to_bd =
293 _mm_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
294
295 assert(bits >= 0);
296 prepare_coeffs(filter_params_x, subpel_x_qn, coeffs_x);
297
298 for (j = 0; j < w; j += 8) {
299 /* Horizontal filter */
300 for (i = 0; i < h; i += 1) {
301 const __m128i row00 =
302 _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]);
303 const __m128i row01 =
304 _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + (j + 8)]);
305
306 // even pixels
307 s[0] = _mm_alignr_epi8(row01, row00, 0);
308 s[1] = _mm_alignr_epi8(row01, row00, 4);
309 s[2] = _mm_alignr_epi8(row01, row00, 8);
310 s[3] = _mm_alignr_epi8(row01, row00, 12);
311
312 __m128i res_even = convolve(s, coeffs_x);
313 res_even =
314 _mm_sra_epi32(_mm_add_epi32(res_even, round_const_x), round_shift_x);
315
316 // odd pixels
317 s[0] = _mm_alignr_epi8(row01, row00, 2);
318 s[1] = _mm_alignr_epi8(row01, row00, 6);
319 s[2] = _mm_alignr_epi8(row01, row00, 10);
320 s[3] = _mm_alignr_epi8(row01, row00, 14);
321
322 __m128i res_odd = convolve(s, coeffs_x);
323 res_odd =
324 _mm_sra_epi32(_mm_add_epi32(res_odd, round_const_x), round_shift_x);
325
326 res_even = _mm_sll_epi32(res_even, round_shift_bits);
327 res_odd = _mm_sll_epi32(res_odd, round_shift_bits);
328
329 __m128i res1 = _mm_unpacklo_epi32(res_even, res_odd);
330 __m128i res_unsigned_lo = _mm_add_epi32(res1, offset_const);
331 if (w - j < 8) {
332 if (do_average) {
333 const __m128i data_0 =
334 _mm_loadl_epi64((__m128i *)(&dst[i * dst_stride + j]));
335 const __m128i data_ref_0 = _mm_unpacklo_epi16(data_0, zero);
336
337 const __m128i comp_avg_res = highbd_comp_avg_sse4_1(
338 &data_ref_0, &res_unsigned_lo, &wt0, &wt1, use_dist_wtd_comp_avg);
339 const __m128i round_result = highbd_convolve_rounding_sse2(
340 &comp_avg_res, &offset_const, &rounding_const, rounding_shift);
341
342 const __m128i res_16b = _mm_packus_epi32(round_result, round_result);
343 const __m128i res_clip = _mm_min_epi16(res_16b, clip_pixel_to_bd);
344 _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_clip);
345 } else {
346 __m128i res_16b = _mm_packus_epi32(res_unsigned_lo, res_unsigned_lo);
347 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_16b);
348 }
349 } else {
350 __m128i res2 = _mm_unpackhi_epi32(res_even, res_odd);
351 __m128i res_unsigned_hi = _mm_add_epi32(res2, offset_const);
352 if (do_average) {
353 const __m128i data_0 =
354 _mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j]));
355 const __m128i data_ref_0_lo = _mm_unpacklo_epi16(data_0, zero);
356 const __m128i data_ref_0_hi = _mm_unpackhi_epi16(data_0, zero);
357
358 const __m128i comp_avg_res_lo =
359 highbd_comp_avg_sse4_1(&data_ref_0_lo, &res_unsigned_lo, &wt0,
360 &wt1, use_dist_wtd_comp_avg);
361 const __m128i comp_avg_res_hi =
362 highbd_comp_avg_sse4_1(&data_ref_0_hi, &res_unsigned_hi, &wt0,
363 &wt1, use_dist_wtd_comp_avg);
364
365 const __m128i round_result_lo = highbd_convolve_rounding_sse2(
366 &comp_avg_res_lo, &offset_const, &rounding_const, rounding_shift);
367 const __m128i round_result_hi = highbd_convolve_rounding_sse2(
368 &comp_avg_res_hi, &offset_const, &rounding_const, rounding_shift);
369
370 const __m128i res_16b =
371 _mm_packus_epi32(round_result_lo, round_result_hi);
372 const __m128i res_clip = _mm_min_epi16(res_16b, clip_pixel_to_bd);
373 _mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j]), res_clip);
374 } else {
375 __m128i res_16b = _mm_packus_epi32(res_unsigned_lo, res_unsigned_hi);
376 _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_16b);
377 }
378 }
379 }
380 }
381 }
382