• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 #include <immintrin.h>
12 
13 #include "config/av1_rtcd.h"
14 
15 #include "av1/common/cfl.h"
16 
17 #include "av1/common/x86/cfl_simd.h"
18 
19 #define CFL_GET_SUBSAMPLE_FUNCTION_AVX2(sub, bd)                           \
20   CFL_SUBSAMPLE(avx2, sub, bd, 32, 32)                                     \
21   CFL_SUBSAMPLE(avx2, sub, bd, 32, 16)                                     \
22   CFL_SUBSAMPLE(avx2, sub, bd, 32, 8)                                      \
23   cfl_subsample_##bd##_fn cfl_get_luma_subsampling_##sub##_##bd##_avx2(    \
24       TX_SIZE tx_size) {                                                   \
25     static const cfl_subsample_##bd##_fn subfn_##sub[TX_SIZES_ALL] = {     \
26       subsample_##bd##_##sub##_4x4_ssse3,   /* 4x4 */                      \
27       subsample_##bd##_##sub##_8x8_ssse3,   /* 8x8 */                      \
28       subsample_##bd##_##sub##_16x16_ssse3, /* 16x16 */                    \
29       subsample_##bd##_##sub##_32x32_avx2,  /* 32x32 */                    \
30       NULL,                                 /* 64x64 (invalid CFL size) */ \
31       subsample_##bd##_##sub##_4x8_ssse3,   /* 4x8 */                      \
32       subsample_##bd##_##sub##_8x4_ssse3,   /* 8x4 */                      \
33       subsample_##bd##_##sub##_8x16_ssse3,  /* 8x16 */                     \
34       subsample_##bd##_##sub##_16x8_ssse3,  /* 16x8 */                     \
35       subsample_##bd##_##sub##_16x32_ssse3, /* 16x32 */                    \
36       subsample_##bd##_##sub##_32x16_avx2,  /* 32x16 */                    \
37       NULL,                                 /* 32x64 (invalid CFL size) */ \
38       NULL,                                 /* 64x32 (invalid CFL size) */ \
39       subsample_##bd##_##sub##_4x16_ssse3,  /* 4x16  */                    \
40       subsample_##bd##_##sub##_16x4_ssse3,  /* 16x4  */                    \
41       subsample_##bd##_##sub##_8x32_ssse3,  /* 8x32  */                    \
42       subsample_##bd##_##sub##_32x8_avx2,   /* 32x8  */                    \
43       NULL,                                 /* 16x64 (invalid CFL size) */ \
44       NULL,                                 /* 64x16 (invalid CFL size) */ \
45     };                                                                     \
46     return subfn_##sub[tx_size];                                           \
47   }
48 
49 /**
50  * Adds 4 pixels (in a 2x2 grid) and multiplies them by 2. Resulting in a more
51  * precise version of a box filter 4:2:0 pixel subsampling in Q3.
52  *
53  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
54  * active area is specified using width and height.
55  *
56  * Note: We don't need to worry about going over the active area, as long as we
57  * stay inside the CfL prediction buffer.
58  *
59  * Note: For 4:2:0 luma subsampling, the width will never be greater than 16.
60  */
cfl_luma_subsampling_420_lbd_avx2(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)61 static void cfl_luma_subsampling_420_lbd_avx2(const uint8_t *input,
62                                               int input_stride,
63                                               uint16_t *pred_buf_q3, int width,
64                                               int height) {
65   (void)width;                               // Forever 32
66   const __m256i twos = _mm256_set1_epi8(2);  // Thirty two twos
67   const int luma_stride = input_stride << 1;
68   __m256i *row = (__m256i *)pred_buf_q3;
69   const __m256i *row_end = row + (height >> 1) * CFL_BUF_LINE_I256;
70   do {
71     __m256i top = _mm256_loadu_si256((__m256i *)input);
72     __m256i bot = _mm256_loadu_si256((__m256i *)(input + input_stride));
73 
74     __m256i top_16x16 = _mm256_maddubs_epi16(top, twos);
75     __m256i bot_16x16 = _mm256_maddubs_epi16(bot, twos);
76     __m256i sum_16x16 = _mm256_add_epi16(top_16x16, bot_16x16);
77 
78     _mm256_storeu_si256(row, sum_16x16);
79 
80     input += luma_stride;
81   } while ((row += CFL_BUF_LINE_I256) < row_end);
82 }
83 
84 CFL_GET_SUBSAMPLE_FUNCTION_AVX2(420, lbd)
85 
86 /**
87  * Adds 2 pixels (in a 2x1 grid) and multiplies them by 4. Resulting in a more
88  * precise version of a box filter 4:2:2 pixel subsampling in Q3.
89  *
90  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
91  * active area is specified using width and height.
92  *
93  * Note: We don't need to worry about going over the active area, as long as we
94  * stay inside the CfL prediction buffer.
95  */
cfl_luma_subsampling_422_lbd_avx2(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)96 static void cfl_luma_subsampling_422_lbd_avx2(const uint8_t *input,
97                                               int input_stride,
98                                               uint16_t *pred_buf_q3, int width,
99                                               int height) {
100   (void)width;                                // Forever 32
101   const __m256i fours = _mm256_set1_epi8(4);  // Thirty two fours
102   __m256i *row = (__m256i *)pred_buf_q3;
103   const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
104   do {
105     __m256i top = _mm256_loadu_si256((__m256i *)input);
106     __m256i top_16x16 = _mm256_maddubs_epi16(top, fours);
107     _mm256_storeu_si256(row, top_16x16);
108     input += input_stride;
109   } while ((row += CFL_BUF_LINE_I256) < row_end);
110 }
111 
112 CFL_GET_SUBSAMPLE_FUNCTION_AVX2(422, lbd)
113 
114 /**
115  * Multiplies the pixels by 8 (scaling in Q3). The AVX2 subsampling is only
116  * performed on block of width 32.
117  *
118  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
119  * active area is specified using width and height.
120  *
121  * Note: We don't need to worry about going over the active area, as long as we
122  * stay inside the CfL prediction buffer.
123  */
cfl_luma_subsampling_444_lbd_avx2(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)124 static void cfl_luma_subsampling_444_lbd_avx2(const uint8_t *input,
125                                               int input_stride,
126                                               uint16_t *pred_buf_q3, int width,
127                                               int height) {
128   (void)width;  // Forever 32
129   __m256i *row = (__m256i *)pred_buf_q3;
130   const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
131   const __m256i zeros = _mm256_setzero_si256();
132   do {
133     __m256i top = _mm256_loadu_si256((__m256i *)input);
134     top = _mm256_permute4x64_epi64(top, _MM_SHUFFLE(3, 1, 2, 0));
135 
136     __m256i row_lo = _mm256_unpacklo_epi8(top, zeros);
137     row_lo = _mm256_slli_epi16(row_lo, 3);
138     __m256i row_hi = _mm256_unpackhi_epi8(top, zeros);
139     row_hi = _mm256_slli_epi16(row_hi, 3);
140 
141     _mm256_storeu_si256(row, row_lo);
142     _mm256_storeu_si256(row + 1, row_hi);
143 
144     input += input_stride;
145   } while ((row += CFL_BUF_LINE_I256) < row_end);
146 }
147 
148 CFL_GET_SUBSAMPLE_FUNCTION_AVX2(444, lbd)
149 
150 /**
151  * Adds 4 pixels (in a 2x2 grid) and multiplies them by 2. Resulting in a more
152  * precise version of a box filter 4:2:0 pixel subsampling in Q3.
153  *
154  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
155  * active area is specified using width and height.
156  *
157  * Note: We don't need to worry about going over the active area, as long as we
158  * stay inside the CfL prediction buffer.
159  *
160  * Note: For 4:2:0 luma subsampling, the width will never be greater than 16.
161  */
cfl_luma_subsampling_420_hbd_avx2(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)162 static void cfl_luma_subsampling_420_hbd_avx2(const uint16_t *input,
163                                               int input_stride,
164                                               uint16_t *pred_buf_q3, int width,
165                                               int height) {
166   (void)width;  // Forever 32
167   const int luma_stride = input_stride << 1;
168   __m256i *row = (__m256i *)pred_buf_q3;
169   const __m256i *row_end = row + (height >> 1) * CFL_BUF_LINE_I256;
170   do {
171     __m256i top = _mm256_loadu_si256((__m256i *)input);
172     __m256i bot = _mm256_loadu_si256((__m256i *)(input + input_stride));
173     __m256i sum = _mm256_add_epi16(top, bot);
174 
175     __m256i top_1 = _mm256_loadu_si256((__m256i *)(input + 16));
176     __m256i bot_1 = _mm256_loadu_si256((__m256i *)(input + 16 + input_stride));
177     __m256i sum_1 = _mm256_add_epi16(top_1, bot_1);
178 
179     __m256i hsum = _mm256_hadd_epi16(sum, sum_1);
180     hsum = _mm256_permute4x64_epi64(hsum, _MM_SHUFFLE(3, 1, 2, 0));
181     hsum = _mm256_add_epi16(hsum, hsum);
182 
183     _mm256_storeu_si256(row, hsum);
184 
185     input += luma_stride;
186   } while ((row += CFL_BUF_LINE_I256) < row_end);
187 }
188 
189 CFL_GET_SUBSAMPLE_FUNCTION_AVX2(420, hbd)
190 
191 /**
192  * Adds 2 pixels (in a 2x1 grid) and multiplies them by 4. Resulting in a more
193  * precise version of a box filter 4:2:2 pixel subsampling in Q3.
194  *
195  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
196  * active area is specified using width and height.
197  *
198  * Note: We don't need to worry about going over the active area, as long as we
199  * stay inside the CfL prediction buffer.
200  *
201  */
cfl_luma_subsampling_422_hbd_avx2(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)202 static void cfl_luma_subsampling_422_hbd_avx2(const uint16_t *input,
203                                               int input_stride,
204                                               uint16_t *pred_buf_q3, int width,
205                                               int height) {
206   (void)width;  // Forever 32
207   __m256i *row = (__m256i *)pred_buf_q3;
208   const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
209   do {
210     __m256i top = _mm256_loadu_si256((__m256i *)input);
211     __m256i top_1 = _mm256_loadu_si256((__m256i *)(input + 16));
212     __m256i hsum = _mm256_hadd_epi16(top, top_1);
213     hsum = _mm256_permute4x64_epi64(hsum, _MM_SHUFFLE(3, 1, 2, 0));
214     hsum = _mm256_slli_epi16(hsum, 2);
215 
216     _mm256_storeu_si256(row, hsum);
217 
218     input += input_stride;
219   } while ((row += CFL_BUF_LINE_I256) < row_end);
220 }
221 
222 CFL_GET_SUBSAMPLE_FUNCTION_AVX2(422, hbd)
223 
cfl_luma_subsampling_444_hbd_avx2(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)224 static void cfl_luma_subsampling_444_hbd_avx2(const uint16_t *input,
225                                               int input_stride,
226                                               uint16_t *pred_buf_q3, int width,
227                                               int height) {
228   (void)width;  // Forever 32
229   __m256i *row = (__m256i *)pred_buf_q3;
230   const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
231   do {
232     __m256i top = _mm256_loadu_si256((__m256i *)input);
233     __m256i top_1 = _mm256_loadu_si256((__m256i *)(input + 16));
234     _mm256_storeu_si256(row, _mm256_slli_epi16(top, 3));
235     _mm256_storeu_si256(row + 1, _mm256_slli_epi16(top_1, 3));
236     input += input_stride;
237   } while ((row += CFL_BUF_LINE_I256) < row_end);
238 }
239 
240 CFL_GET_SUBSAMPLE_FUNCTION_AVX2(444, hbd)
241 
predict_unclipped(const __m256i * input,__m256i alpha_q12,__m256i alpha_sign,__m256i dc_q0)242 static INLINE __m256i predict_unclipped(const __m256i *input, __m256i alpha_q12,
243                                         __m256i alpha_sign, __m256i dc_q0) {
244   __m256i ac_q3 = _mm256_loadu_si256(input);
245   __m256i ac_sign = _mm256_sign_epi16(alpha_sign, ac_q3);
246   __m256i scaled_luma_q0 =
247       _mm256_mulhrs_epi16(_mm256_abs_epi16(ac_q3), alpha_q12);
248   scaled_luma_q0 = _mm256_sign_epi16(scaled_luma_q0, ac_sign);
249   return _mm256_add_epi16(scaled_luma_q0, dc_q0);
250 }
251 
cfl_predict_lbd_avx2(const int16_t * pred_buf_q3,uint8_t * dst,int dst_stride,int alpha_q3,int width,int height)252 static INLINE void cfl_predict_lbd_avx2(const int16_t *pred_buf_q3,
253                                         uint8_t *dst, int dst_stride,
254                                         int alpha_q3, int width, int height) {
255   (void)width;
256   const __m256i alpha_sign = _mm256_set1_epi16(alpha_q3);
257   const __m256i alpha_q12 = _mm256_slli_epi16(_mm256_abs_epi16(alpha_sign), 9);
258   const __m256i dc_q0 = _mm256_set1_epi16(*dst);
259   __m256i *row = (__m256i *)pred_buf_q3;
260   const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
261 
262   do {
263     __m256i res = predict_unclipped(row, alpha_q12, alpha_sign, dc_q0);
264     __m256i next = predict_unclipped(row + 1, alpha_q12, alpha_sign, dc_q0);
265     res = _mm256_packus_epi16(res, next);
266     res = _mm256_permute4x64_epi64(res, _MM_SHUFFLE(3, 1, 2, 0));
267     _mm256_storeu_si256((__m256i *)dst, res);
268     dst += dst_stride;
269   } while ((row += CFL_BUF_LINE_I256) < row_end);
270 }
271 
272 CFL_PREDICT_X(avx2, 32, 8, lbd);
273 CFL_PREDICT_X(avx2, 32, 16, lbd);
274 CFL_PREDICT_X(avx2, 32, 32, lbd);
275 
get_predict_lbd_fn_avx2(TX_SIZE tx_size)276 cfl_predict_lbd_fn get_predict_lbd_fn_avx2(TX_SIZE tx_size) {
277   static const cfl_predict_lbd_fn pred[TX_SIZES_ALL] = {
278     predict_lbd_4x4_ssse3,   /* 4x4 */
279     predict_lbd_8x8_ssse3,   /* 8x8 */
280     predict_lbd_16x16_ssse3, /* 16x16 */
281     predict_lbd_32x32_avx2,  /* 32x32 */
282     NULL,                    /* 64x64 (invalid CFL size) */
283     predict_lbd_4x8_ssse3,   /* 4x8 */
284     predict_lbd_8x4_ssse3,   /* 8x4 */
285     predict_lbd_8x16_ssse3,  /* 8x16 */
286     predict_lbd_16x8_ssse3,  /* 16x8 */
287     predict_lbd_16x32_ssse3, /* 16x32 */
288     predict_lbd_32x16_avx2,  /* 32x16 */
289     NULL,                    /* 32x64 (invalid CFL size) */
290     NULL,                    /* 64x32 (invalid CFL size) */
291     predict_lbd_4x16_ssse3,  /* 4x16  */
292     predict_lbd_16x4_ssse3,  /* 16x4  */
293     predict_lbd_8x32_ssse3,  /* 8x32  */
294     predict_lbd_32x8_avx2,   /* 32x8  */
295     NULL,                    /* 16x64 (invalid CFL size) */
296     NULL,                    /* 64x16 (invalid CFL size) */
297   };
298   // Modulo TX_SIZES_ALL to ensure that an attacker won't be able to index the
299   // function pointer array out of bounds.
300   return pred[tx_size % TX_SIZES_ALL];
301 }
302 
highbd_max_epi16(int bd)303 static __m256i highbd_max_epi16(int bd) {
304   const __m256i neg_one = _mm256_set1_epi16(-1);
305   // (1 << bd) - 1 => -(-1 << bd) -1 => -1 - (-1 << bd) => -1 ^ (-1 << bd)
306   return _mm256_xor_si256(_mm256_slli_epi16(neg_one, bd), neg_one);
307 }
308 
highbd_clamp_epi16(__m256i u,__m256i zero,__m256i max)309 static __m256i highbd_clamp_epi16(__m256i u, __m256i zero, __m256i max) {
310   return _mm256_max_epi16(_mm256_min_epi16(u, max), zero);
311 }
312 
cfl_predict_hbd_avx2(const int16_t * pred_buf_q3,uint16_t * dst,int dst_stride,int alpha_q3,int bd,int width,int height)313 static INLINE void cfl_predict_hbd_avx2(const int16_t *pred_buf_q3,
314                                         uint16_t *dst, int dst_stride,
315                                         int alpha_q3, int bd, int width,
316                                         int height) {
317   // Use SSSE3 version for smaller widths
318   assert(width == 16 || width == 32);
319   const __m256i alpha_sign = _mm256_set1_epi16(alpha_q3);
320   const __m256i alpha_q12 = _mm256_slli_epi16(_mm256_abs_epi16(alpha_sign), 9);
321   const __m256i dc_q0 = _mm256_loadu_si256((__m256i *)dst);
322   const __m256i max = highbd_max_epi16(bd);
323 
324   __m256i *row = (__m256i *)pred_buf_q3;
325   const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
326   do {
327     const __m256i res = predict_unclipped(row, alpha_q12, alpha_sign, dc_q0);
328     _mm256_storeu_si256((__m256i *)dst,
329                         highbd_clamp_epi16(res, _mm256_setzero_si256(), max));
330     if (width == 32) {
331       const __m256i res_1 =
332           predict_unclipped(row + 1, alpha_q12, alpha_sign, dc_q0);
333       _mm256_storeu_si256(
334           (__m256i *)(dst + 16),
335           highbd_clamp_epi16(res_1, _mm256_setzero_si256(), max));
336     }
337     dst += dst_stride;
338   } while ((row += CFL_BUF_LINE_I256) < row_end);
339 }
340 
341 CFL_PREDICT_X(avx2, 16, 4, hbd)
342 CFL_PREDICT_X(avx2, 16, 8, hbd)
343 CFL_PREDICT_X(avx2, 16, 16, hbd)
344 CFL_PREDICT_X(avx2, 16, 32, hbd)
345 CFL_PREDICT_X(avx2, 32, 8, hbd)
346 CFL_PREDICT_X(avx2, 32, 16, hbd)
347 CFL_PREDICT_X(avx2, 32, 32, hbd)
348 
get_predict_hbd_fn_avx2(TX_SIZE tx_size)349 cfl_predict_hbd_fn get_predict_hbd_fn_avx2(TX_SIZE tx_size) {
350   static const cfl_predict_hbd_fn pred[TX_SIZES_ALL] = {
351     predict_hbd_4x4_ssse3,  /* 4x4 */
352     predict_hbd_8x8_ssse3,  /* 8x8 */
353     predict_hbd_16x16_avx2, /* 16x16 */
354     predict_hbd_32x32_avx2, /* 32x32 */
355     NULL,                   /* 64x64 (invalid CFL size) */
356     predict_hbd_4x8_ssse3,  /* 4x8 */
357     predict_hbd_8x4_ssse3,  /* 8x4 */
358     predict_hbd_8x16_ssse3, /* 8x16 */
359     predict_hbd_16x8_avx2,  /* 16x8 */
360     predict_hbd_16x32_avx2, /* 16x32 */
361     predict_hbd_32x16_avx2, /* 32x16 */
362     NULL,                   /* 32x64 (invalid CFL size) */
363     NULL,                   /* 64x32 (invalid CFL size) */
364     predict_hbd_4x16_ssse3, /* 4x16  */
365     predict_hbd_16x4_avx2,  /* 16x4  */
366     predict_hbd_8x32_ssse3, /* 8x32  */
367     predict_hbd_32x8_avx2,  /* 32x8  */
368     NULL,                   /* 16x64 (invalid CFL size) */
369     NULL,                   /* 64x16 (invalid CFL size) */
370   };
371   // Modulo TX_SIZES_ALL to ensure that an attacker won't be able to index the
372   // function pointer array out of bounds.
373   return pred[tx_size % TX_SIZES_ALL];
374 }
375 
376 // Returns a vector where all the (32-bits) elements are the sum of all the
377 // lanes in a.
fill_sum_epi32(__m256i a)378 static INLINE __m256i fill_sum_epi32(__m256i a) {
379   // Given that a == [A, B, C, D, E, F, G, H]
380   a = _mm256_hadd_epi32(a, a);
381   // Given that A' == A + B, C' == C + D, E' == E + F, G' == G + H
382   // a == [A', C', A', C', E', G', E', G']
383   a = _mm256_permute4x64_epi64(a, _MM_SHUFFLE(3, 1, 2, 0));
384   // a == [A', C', E', G', A', C', E', G']
385   a = _mm256_hadd_epi32(a, a);
386   // Given that A'' == A' + C' and E'' == E' + G'
387   // a == [A'', E'', A'', E'', A'', E'', A'', E'']
388   return _mm256_hadd_epi32(a, a);
389   // Given that A''' == A'' + E''
390   // a == [A''', A''', A''', A''', A''', A''', A''', A''']
391 }
392 
_mm256_addl_epi16(__m256i a)393 static INLINE __m256i _mm256_addl_epi16(__m256i a) {
394   return _mm256_add_epi32(_mm256_unpacklo_epi16(a, _mm256_setzero_si256()),
395                           _mm256_unpackhi_epi16(a, _mm256_setzero_si256()));
396 }
397 
subtract_average_avx2(const uint16_t * src_ptr,int16_t * dst_ptr,int width,int height,int round_offset,int num_pel_log2)398 static INLINE void subtract_average_avx2(const uint16_t *src_ptr,
399                                          int16_t *dst_ptr, int width,
400                                          int height, int round_offset,
401                                          int num_pel_log2) {
402   // Use SSE2 version for smaller widths
403   assert(width == 16 || width == 32);
404 
405   const __m256i *src = (__m256i *)src_ptr;
406   const __m256i *const end = src + height * CFL_BUF_LINE_I256;
407   // To maximize usage of the AVX2 registers, we sum two rows per loop
408   // iteration
409   const int step = 2 * CFL_BUF_LINE_I256;
410 
411   __m256i sum = _mm256_setzero_si256();
412   // For width 32, we use a second sum accumulator to reduce accumulator
413   // dependencies in the loop.
414   __m256i sum2;
415   if (width == 32) sum2 = _mm256_setzero_si256();
416 
417   do {
418     // Add top row to the bottom row
419     __m256i l0 = _mm256_add_epi16(_mm256_loadu_si256(src),
420                                   _mm256_loadu_si256(src + CFL_BUF_LINE_I256));
421     sum = _mm256_add_epi32(sum, _mm256_addl_epi16(l0));
422     if (width == 32) { /* Don't worry, this if it gets optimized out. */
423       // Add the second part of the top row to the second part of the bottom row
424       __m256i l1 =
425           _mm256_add_epi16(_mm256_loadu_si256(src + 1),
426                            _mm256_loadu_si256(src + 1 + CFL_BUF_LINE_I256));
427       sum2 = _mm256_add_epi32(sum2, _mm256_addl_epi16(l1));
428     }
429     src += step;
430   } while (src < end);
431   // Combine both sum accumulators
432   if (width == 32) sum = _mm256_add_epi32(sum, sum2);
433 
434   __m256i fill = fill_sum_epi32(sum);
435 
436   __m256i avg_epi16 = _mm256_srli_epi32(
437       _mm256_add_epi32(fill, _mm256_set1_epi32(round_offset)), num_pel_log2);
438   avg_epi16 = _mm256_packs_epi32(avg_epi16, avg_epi16);
439 
440   // Store and subtract loop
441   src = (__m256i *)src_ptr;
442   __m256i *dst = (__m256i *)dst_ptr;
443   do {
444     _mm256_storeu_si256(dst,
445                         _mm256_sub_epi16(_mm256_loadu_si256(src), avg_epi16));
446     if (width == 32) {
447       _mm256_storeu_si256(
448           dst + 1, _mm256_sub_epi16(_mm256_loadu_si256(src + 1), avg_epi16));
449     }
450     src += CFL_BUF_LINE_I256;
451     dst += CFL_BUF_LINE_I256;
452   } while (src < end);
453 }
454 
455 // Declare wrappers for AVX2 sizes
456 CFL_SUB_AVG_X(avx2, 16, 4, 32, 6)
457 CFL_SUB_AVG_X(avx2, 16, 8, 64, 7)
458 CFL_SUB_AVG_X(avx2, 16, 16, 128, 8)
459 CFL_SUB_AVG_X(avx2, 16, 32, 256, 9)
460 CFL_SUB_AVG_X(avx2, 32, 8, 128, 8)
461 CFL_SUB_AVG_X(avx2, 32, 16, 256, 9)
462 CFL_SUB_AVG_X(avx2, 32, 32, 512, 10)
463 
464 // Based on the observation that for small blocks AVX2 does not outperform
465 // SSE2, we call the SSE2 code for block widths 4 and 8.
get_subtract_average_fn_avx2(TX_SIZE tx_size)466 cfl_subtract_average_fn get_subtract_average_fn_avx2(TX_SIZE tx_size) {
467   static const cfl_subtract_average_fn sub_avg[TX_SIZES_ALL] = {
468     subtract_average_4x4_sse2,   /* 4x4 */
469     subtract_average_8x8_sse2,   /* 8x8 */
470     subtract_average_16x16_avx2, /* 16x16 */
471     subtract_average_32x32_avx2, /* 32x32 */
472     NULL,                        /* 64x64 (invalid CFL size) */
473     subtract_average_4x8_sse2,   /* 4x8 */
474     subtract_average_8x4_sse2,   /* 8x4 */
475     subtract_average_8x16_sse2,  /* 8x16 */
476     subtract_average_16x8_avx2,  /* 16x8 */
477     subtract_average_16x32_avx2, /* 16x32 */
478     subtract_average_32x16_avx2, /* 32x16 */
479     NULL,                        /* 32x64 (invalid CFL size) */
480     NULL,                        /* 64x32 (invalid CFL size) */
481     subtract_average_4x16_sse2,  /* 4x16 */
482     subtract_average_16x4_avx2,  /* 16x4 */
483     subtract_average_8x32_sse2,  /* 8x32 */
484     subtract_average_32x8_avx2,  /* 32x8 */
485     NULL,                        /* 16x64 (invalid CFL size) */
486     NULL,                        /* 64x16 (invalid CFL size) */
487   };
488   // Modulo TX_SIZES_ALL to ensure that an attacker won't be able to
489   // index the function pointer array out of bounds.
490   return sub_avg[tx_size % TX_SIZES_ALL];
491 }
492