• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <immintrin.h>
13 
14 #include "config/av1_rtcd.h"
15 
16 #include "aom_dsp/x86/convolve_avx2.h"
17 #include "aom_dsp/x86/convolve_common_intrin.h"
18 #include "aom_dsp/aom_dsp_common.h"
19 #include "aom_dsp/aom_filter.h"
20 #include "aom_dsp/x86/synonyms.h"
21 #include "av1/common/convolve.h"
22 
av1_convolve_2d_sr_avx2(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_q4,const int subpel_y_q4,ConvolveParams * conv_params)23 void av1_convolve_2d_sr_avx2(const uint8_t *src, int src_stride, uint8_t *dst,
24                              int dst_stride, int w, int h,
25                              const InterpFilterParams *filter_params_x,
26                              const InterpFilterParams *filter_params_y,
27                              const int subpel_x_q4, const int subpel_y_q4,
28                              ConvolveParams *conv_params) {
29   const int bd = 8;
30   int im_stride = 8;
31   int i, is_horiz_4tap = 0, is_vert_4tap = 0;
32   DECLARE_ALIGNED(32, int16_t, im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * 8]);
33   const int bits =
34       FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1;
35   const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
36 
37   assert(conv_params->round_0 > 0);
38 
39   const __m256i round_const_h = _mm256_set1_epi16(
40       ((1 << (conv_params->round_0 - 1)) >> 1) + (1 << (bd + FILTER_BITS - 2)));
41   const __m128i round_shift_h = _mm_cvtsi32_si128(conv_params->round_0 - 1);
42 
43   const __m256i sum_round_v = _mm256_set1_epi32(
44       (1 << offset_bits) + ((1 << conv_params->round_1) >> 1));
45   const __m128i sum_shift_v = _mm_cvtsi32_si128(conv_params->round_1);
46 
47   const __m256i round_const_v = _mm256_set1_epi32(
48       ((1 << bits) >> 1) - (1 << (offset_bits - conv_params->round_1)) -
49       ((1 << (offset_bits - conv_params->round_1)) >> 1));
50   const __m128i round_shift_v = _mm_cvtsi32_si128(bits);
51 
52   __m256i filt[4], coeffs_h[4], coeffs_v[4];
53 
54   filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
55   filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
56 
57   prepare_coeffs_lowbd(filter_params_x, subpel_x_q4, coeffs_h);
58   prepare_coeffs(filter_params_y, subpel_y_q4, coeffs_v);
59 
60   // Condition for checking valid horz_filt taps
61   if (!(_mm256_extract_epi32(_mm256_or_si256(coeffs_h[0], coeffs_h[3]), 0)))
62     is_horiz_4tap = 1;
63 
64   // Condition for checking valid vert_filt taps
65   if (!(_mm256_extract_epi32(_mm256_or_si256(coeffs_v[0], coeffs_v[3]), 0)))
66     is_vert_4tap = 1;
67 
68   // horz_filt as 4 tap and vert_filt as 8 tap
69   if (is_horiz_4tap) {
70     int im_h = h + filter_params_y->taps - 1;
71     const int fo_vert = filter_params_y->taps / 2 - 1;
72     const int fo_horiz = 1;
73     const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
74 
75     // horz-filter
76     for (int j = 0; j < w; j += 8) {
77       for (i = 0; i < (im_h - 2); i += 2) {
78         __m256i data = _mm256_castsi128_si256(
79             _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));
80 
81         // Load the next line
82         data = _mm256_inserti128_si256(
83             data,
84             _mm_loadu_si128(
85                 (__m128i *)&src_ptr[(i * src_stride) + j + src_stride]),
86             1);
87         __m256i res = convolve_lowbd_x_4tap(data, coeffs_h + 1, filt);
88 
89         res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h),
90                                round_shift_h);
91         _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
92       }
93 
94       __m256i data_1 = _mm256_castsi128_si256(
95           _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));
96 
97       __m256i res = convolve_lowbd_x_4tap(data_1, coeffs_h + 1, filt);
98       res =
99           _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h);
100       _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
101 
102       // vert filter
103       CONVOLVE_SR_VERTICAL_FILTER_8TAP;
104     }
105   } else if (is_vert_4tap) {
106     int im_h = h + 3;
107     const int fo_vert = 1;
108     const int fo_horiz = filter_params_x->taps / 2 - 1;
109     const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
110 
111     filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
112     filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
113 
114     for (int j = 0; j < w; j += 8) {
115       // horz_filter
116       CONVOLVE_SR_HORIZONTAL_FILTER_8TAP;
117       // vert_filter
118       __m256i s[6];
119       __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));
120       __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));
121       __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));
122       __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));
123 
124       s[0] = _mm256_unpacklo_epi16(src_0, src_1);
125       s[1] = _mm256_unpacklo_epi16(src_2, src_3);
126       s[3] = _mm256_unpackhi_epi16(src_0, src_1);
127       s[4] = _mm256_unpackhi_epi16(src_2, src_3);
128 
129       for (i = 0; i < h; i += 2) {
130         const int16_t *data = &im_block[i * im_stride];
131 
132         const __m256i s4 =
133             _mm256_loadu_si256((__m256i *)(data + 4 * im_stride));
134         const __m256i s5 =
135             _mm256_loadu_si256((__m256i *)(data + 5 * im_stride));
136 
137         s[2] = _mm256_unpacklo_epi16(s4, s5);
138         s[5] = _mm256_unpackhi_epi16(s4, s5);
139 
140         __m256i res_a = convolve_4tap(s, coeffs_v + 1);
141         __m256i res_b = convolve_4tap(s + 3, coeffs_v + 1);
142 
143         // Combine V round and 2F-H-V round into a single rounding
144         res_a =
145             _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v);
146         res_b =
147             _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v);
148 
149         const __m256i res_a_round = _mm256_sra_epi32(
150             _mm256_add_epi32(res_a, round_const_v), round_shift_v);
151         const __m256i res_b_round = _mm256_sra_epi32(
152             _mm256_add_epi32(res_b, round_const_v), round_shift_v);
153 
154         /* rounding code */
155         // 16 bit conversion
156         const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);
157         // 8 bit conversion and saturation to uint8
158         const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);
159 
160         const __m128i res_0 = _mm256_castsi256_si128(res_8b);
161         const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
162 
163         // Store values into the destination buffer
164         __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];
165         __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];
166         if (w - j > 4) {
167           _mm_storel_epi64(p_0, res_0);
168           _mm_storel_epi64(p_1, res_1);
169         } else if (w == 4) {
170           xx_storel_32(p_0, res_0);
171           xx_storel_32(p_1, res_1);
172         } else {
173           *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0);
174           *(uint16_t *)p_1 = _mm_cvtsi128_si32(res_1);
175         }
176 
177         s[0] = s[1];
178         s[1] = s[2];
179         s[3] = s[4];
180         s[4] = s[5];
181       }
182     }
183   } else {
184     int j;
185     int im_h = h + filter_params_y->taps - 1;
186     const int fo_vert = filter_params_y->taps / 2 - 1;
187     const int fo_horiz = filter_params_x->taps / 2 - 1;
188     const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
189 
190     filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
191     filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
192 
193     for (j = 0; j < w; j += 8) {
194       CONVOLVE_SR_HORIZONTAL_FILTER_8TAP;
195 
196       CONVOLVE_SR_VERTICAL_FILTER_8TAP;
197     }
198   }
199 }
200 
copy_128(const uint8_t * src,uint8_t * dst)201 static INLINE void copy_128(const uint8_t *src, uint8_t *dst) {
202   __m256i s[4];
203   s[0] = _mm256_loadu_si256((__m256i *)(src + 0 * 32));
204   s[1] = _mm256_loadu_si256((__m256i *)(src + 1 * 32));
205   s[2] = _mm256_loadu_si256((__m256i *)(src + 2 * 32));
206   s[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 32));
207   _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[0]);
208   _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[1]);
209   _mm256_storeu_si256((__m256i *)(dst + 2 * 32), s[2]);
210   _mm256_storeu_si256((__m256i *)(dst + 3 * 32), s[3]);
211 }
212 
av1_convolve_2d_copy_sr_avx2(const uint8_t * src,int src_stride,uint8_t * dst,int dst_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_q4,const int subpel_y_q4,ConvolveParams * conv_params)213 void av1_convolve_2d_copy_sr_avx2(const uint8_t *src, int src_stride,
214                                   uint8_t *dst, int dst_stride, int w, int h,
215                                   const InterpFilterParams *filter_params_x,
216                                   const InterpFilterParams *filter_params_y,
217                                   const int subpel_x_q4, const int subpel_y_q4,
218                                   ConvolveParams *conv_params) {
219   (void)filter_params_x;
220   (void)filter_params_y;
221   (void)subpel_x_q4;
222   (void)subpel_y_q4;
223   (void)conv_params;
224 
225   if (w >= 16) {
226     assert(!((intptr_t)dst % 16));
227     assert(!(dst_stride % 16));
228   }
229 
230   if (w == 2) {
231     do {
232       memmove(dst, src, 2 * sizeof(*src));
233       src += src_stride;
234       dst += dst_stride;
235       memmove(dst, src, 2 * sizeof(*src));
236       src += src_stride;
237       dst += dst_stride;
238       h -= 2;
239     } while (h);
240   } else if (w == 4) {
241     do {
242       memmove(dst, src, 4 * sizeof(*src));
243       src += src_stride;
244       dst += dst_stride;
245       memmove(dst, src, 4 * sizeof(*src));
246       src += src_stride;
247       dst += dst_stride;
248       h -= 2;
249     } while (h);
250   } else if (w == 8) {
251     do {
252       __m128i s[2];
253       s[0] = _mm_loadl_epi64((__m128i *)src);
254       src += src_stride;
255       s[1] = _mm_loadl_epi64((__m128i *)src);
256       src += src_stride;
257       _mm_storel_epi64((__m128i *)dst, s[0]);
258       dst += dst_stride;
259       _mm_storel_epi64((__m128i *)dst, s[1]);
260       dst += dst_stride;
261       h -= 2;
262     } while (h);
263   } else if (w == 16) {
264     do {
265       __m128i s[2];
266       s[0] = _mm_loadu_si128((__m128i *)src);
267       src += src_stride;
268       s[1] = _mm_loadu_si128((__m128i *)src);
269       src += src_stride;
270       _mm_store_si128((__m128i *)dst, s[0]);
271       dst += dst_stride;
272       _mm_store_si128((__m128i *)dst, s[1]);
273       dst += dst_stride;
274       h -= 2;
275     } while (h);
276   } else if (w == 32) {
277     do {
278       __m256i s[2];
279       s[0] = _mm256_loadu_si256((__m256i *)src);
280       src += src_stride;
281       s[1] = _mm256_loadu_si256((__m256i *)src);
282       src += src_stride;
283       _mm256_storeu_si256((__m256i *)dst, s[0]);
284       dst += dst_stride;
285       _mm256_storeu_si256((__m256i *)dst, s[1]);
286       dst += dst_stride;
287       h -= 2;
288     } while (h);
289   } else if (w == 64) {
290     do {
291       __m256i s[4];
292       s[0] = _mm256_loadu_si256((__m256i *)(src + 0 * 32));
293       s[1] = _mm256_loadu_si256((__m256i *)(src + 1 * 32));
294       src += src_stride;
295       s[2] = _mm256_loadu_si256((__m256i *)(src + 0 * 32));
296       s[3] = _mm256_loadu_si256((__m256i *)(src + 1 * 32));
297       src += src_stride;
298       _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[0]);
299       _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[1]);
300       dst += dst_stride;
301       _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[2]);
302       _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[3]);
303       dst += dst_stride;
304       h -= 2;
305     } while (h);
306   } else {
307     do {
308       copy_128(src, dst);
309       src += src_stride;
310       dst += dst_stride;
311       copy_128(src, dst);
312       src += src_stride;
313       dst += dst_stride;
314       h -= 2;
315     } while (h);
316   }
317 }
318