• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <immintrin.h>
13 
14 #include "config/aom_config.h"
15 #include "config/av1_rtcd.h"
16 
17 #include "av1/common/restoration.h"
18 #include "aom_dsp/x86/synonyms.h"
19 #include "aom_dsp/x86/synonyms_avx2.h"
20 
21 // Load 8 bytes from the possibly-misaligned pointer p, extend each byte to
22 // 32-bit precision and return them in an AVX2 register.
yy256_load_extend_8_32(const void * p)23 static __m256i yy256_load_extend_8_32(const void *p) {
24   return _mm256_cvtepu8_epi32(xx_loadl_64(p));
25 }
26 
27 // Load 8 halfwords from the possibly-misaligned pointer p, extend each
28 // halfword to 32-bit precision and return them in an AVX2 register.
yy256_load_extend_16_32(const void * p)29 static __m256i yy256_load_extend_16_32(const void *p) {
30   return _mm256_cvtepu16_epi32(xx_loadu_128(p));
31 }
32 
33 // Compute the scan of an AVX2 register holding 8 32-bit integers. If the
34 // register holds x0..x7 then the scan will hold x0, x0+x1, x0+x1+x2, ...,
35 // x0+x1+...+x7
36 //
37 // Let [...] represent a 128-bit block, and let a, ..., h be 32-bit integers
38 // (assumed small enough to be able to add them without overflow).
39 //
40 // Use -> as shorthand for summing, i.e. h->a = h + g + f + e + d + c + b + a.
41 //
42 // x   = [h g f e][d c b a]
43 // x01 = [g f e 0][c b a 0]
44 // x02 = [g+h f+g e+f e][c+d b+c a+b a]
45 // x03 = [e+f e 0 0][a+b a 0 0]
46 // x04 = [e->h e->g e->f e][a->d a->c a->b a]
47 // s   = a->d
48 // s01 = [a->d a->d a->d a->d]
49 // s02 = [a->d a->d a->d a->d][0 0 0 0]
50 // ret = [a->h a->g a->f a->e][a->d a->c a->b a]
scan_32(__m256i x)51 static __m256i scan_32(__m256i x) {
52   const __m256i x01 = _mm256_slli_si256(x, 4);
53   const __m256i x02 = _mm256_add_epi32(x, x01);
54   const __m256i x03 = _mm256_slli_si256(x02, 8);
55   const __m256i x04 = _mm256_add_epi32(x02, x03);
56   const int32_t s = _mm256_extract_epi32(x04, 3);
57   const __m128i s01 = _mm_set1_epi32(s);
58   const __m256i s02 = _mm256_insertf128_si256(_mm256_setzero_si256(), s01, 1);
59   return _mm256_add_epi32(x04, s02);
60 }
61 
62 // Compute two integral images from src. B sums elements; A sums their
63 // squares. The images are offset by one pixel, so will have width and height
64 // equal to width + 1, height + 1 and the first row and column will be zero.
65 //
66 // A+1 and B+1 should be aligned to 32 bytes. buf_stride should be a multiple
67 // of 8.
68 
memset_zero_avx(int32_t * dest,const __m256i * zero,size_t count)69 static void *memset_zero_avx(int32_t *dest, const __m256i *zero, size_t count) {
70   unsigned int i = 0;
71   for (i = 0; i < (count & 0xffffffe0); i += 32) {
72     _mm256_storeu_si256((__m256i *)(dest + i), *zero);
73     _mm256_storeu_si256((__m256i *)(dest + i + 8), *zero);
74     _mm256_storeu_si256((__m256i *)(dest + i + 16), *zero);
75     _mm256_storeu_si256((__m256i *)(dest + i + 24), *zero);
76   }
77   for (; i < (count & 0xfffffff8); i += 8) {
78     _mm256_storeu_si256((__m256i *)(dest + i), *zero);
79   }
80   for (; i < count; i++) {
81     dest[i] = 0;
82   }
83   return dest;
84 }
85 
integral_images(const uint8_t * src,int src_stride,int width,int height,int32_t * A,int32_t * B,int buf_stride)86 static void integral_images(const uint8_t *src, int src_stride, int width,
87                             int height, int32_t *A, int32_t *B,
88                             int buf_stride) {
89   const __m256i zero = _mm256_setzero_si256();
90   // Write out the zero top row
91   memset_zero_avx(A, &zero, (width + 8));
92   memset_zero_avx(B, &zero, (width + 8));
93   for (int i = 0; i < height; ++i) {
94     // Zero the left column.
95     A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
96 
97     // ldiff is the difference H - D where H is the output sample immediately
98     // to the left and D is the output sample above it. These are scalars,
99     // replicated across the eight lanes.
100     __m256i ldiff1 = zero, ldiff2 = zero;
101     for (int j = 0; j < width; j += 8) {
102       const int ABj = 1 + j;
103 
104       const __m256i above1 = yy_load_256(B + ABj + i * buf_stride);
105       const __m256i above2 = yy_load_256(A + ABj + i * buf_stride);
106 
107       const __m256i x1 = yy256_load_extend_8_32(src + j + i * src_stride);
108       const __m256i x2 = _mm256_madd_epi16(x1, x1);
109 
110       const __m256i sc1 = scan_32(x1);
111       const __m256i sc2 = scan_32(x2);
112 
113       const __m256i row1 =
114           _mm256_add_epi32(_mm256_add_epi32(sc1, above1), ldiff1);
115       const __m256i row2 =
116           _mm256_add_epi32(_mm256_add_epi32(sc2, above2), ldiff2);
117 
118       yy_store_256(B + ABj + (i + 1) * buf_stride, row1);
119       yy_store_256(A + ABj + (i + 1) * buf_stride, row2);
120 
121       // Calculate the new H - D.
122       ldiff1 = _mm256_set1_epi32(
123           _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7));
124       ldiff2 = _mm256_set1_epi32(
125           _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7));
126     }
127   }
128 }
129 
130 // Compute two integral images from src. B sums elements; A sums their squares
131 //
132 // A and B should be aligned to 32 bytes. buf_stride should be a multiple of 8.
integral_images_highbd(const uint16_t * src,int src_stride,int width,int height,int32_t * A,int32_t * B,int buf_stride)133 static void integral_images_highbd(const uint16_t *src, int src_stride,
134                                    int width, int height, int32_t *A,
135                                    int32_t *B, int buf_stride) {
136   const __m256i zero = _mm256_setzero_si256();
137   // Write out the zero top row
138   memset_zero_avx(A, &zero, (width + 8));
139   memset_zero_avx(B, &zero, (width + 8));
140 
141   for (int i = 0; i < height; ++i) {
142     // Zero the left column.
143     A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
144 
145     // ldiff is the difference H - D where H is the output sample immediately
146     // to the left and D is the output sample above it. These are scalars,
147     // replicated across the eight lanes.
148     __m256i ldiff1 = zero, ldiff2 = zero;
149     for (int j = 0; j < width; j += 8) {
150       const int ABj = 1 + j;
151 
152       const __m256i above1 = yy_load_256(B + ABj + i * buf_stride);
153       const __m256i above2 = yy_load_256(A + ABj + i * buf_stride);
154 
155       const __m256i x1 = yy256_load_extend_16_32(src + j + i * src_stride);
156       const __m256i x2 = _mm256_madd_epi16(x1, x1);
157 
158       const __m256i sc1 = scan_32(x1);
159       const __m256i sc2 = scan_32(x2);
160 
161       const __m256i row1 =
162           _mm256_add_epi32(_mm256_add_epi32(sc1, above1), ldiff1);
163       const __m256i row2 =
164           _mm256_add_epi32(_mm256_add_epi32(sc2, above2), ldiff2);
165 
166       yy_store_256(B + ABj + (i + 1) * buf_stride, row1);
167       yy_store_256(A + ABj + (i + 1) * buf_stride, row2);
168 
169       // Calculate the new H - D.
170       ldiff1 = _mm256_set1_epi32(
171           _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7));
172       ldiff2 = _mm256_set1_epi32(
173           _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7));
174     }
175   }
176 }
177 
178 // Compute 8 values of boxsum from the given integral image. ii should point
179 // at the middle of the box (for the first value). r is the box radius.
boxsum_from_ii(const int32_t * ii,int stride,int r)180 static INLINE __m256i boxsum_from_ii(const int32_t *ii, int stride, int r) {
181   const __m256i tl = yy_loadu_256(ii - (r + 1) - (r + 1) * stride);
182   const __m256i tr = yy_loadu_256(ii + (r + 0) - (r + 1) * stride);
183   const __m256i bl = yy_loadu_256(ii - (r + 1) + r * stride);
184   const __m256i br = yy_loadu_256(ii + (r + 0) + r * stride);
185   const __m256i u = _mm256_sub_epi32(tr, tl);
186   const __m256i v = _mm256_sub_epi32(br, bl);
187   return _mm256_sub_epi32(v, u);
188 }
189 
round_for_shift(unsigned shift)190 static __m256i round_for_shift(unsigned shift) {
191   return _mm256_set1_epi32((1 << shift) >> 1);
192 }
193 
compute_p(__m256i sum1,__m256i sum2,int bit_depth,int n)194 static __m256i compute_p(__m256i sum1, __m256i sum2, int bit_depth, int n) {
195   __m256i an, bb;
196   if (bit_depth > 8) {
197     const __m256i rounding_a = round_for_shift(2 * (bit_depth - 8));
198     const __m256i rounding_b = round_for_shift(bit_depth - 8);
199     const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8));
200     const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8);
201     const __m256i a =
202         _mm256_srl_epi32(_mm256_add_epi32(sum2, rounding_a), shift_a);
203     const __m256i b =
204         _mm256_srl_epi32(_mm256_add_epi32(sum1, rounding_b), shift_b);
205     // b < 2^14, so we can use a 16-bit madd rather than a 32-bit
206     // mullo to square it
207     bb = _mm256_madd_epi16(b, b);
208     an = _mm256_max_epi32(_mm256_mullo_epi32(a, _mm256_set1_epi32(n)), bb);
209   } else {
210     bb = _mm256_madd_epi16(sum1, sum1);
211     an = _mm256_mullo_epi32(sum2, _mm256_set1_epi32(n));
212   }
213   return _mm256_sub_epi32(an, bb);
214 }
215 
216 // Assumes that C, D are integral images for the original buffer which has been
217 // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
218 // on the sides. A, B, C, D point at logical position (0, 0).
calc_ab(int32_t * A,int32_t * B,const int32_t * C,const int32_t * D,int width,int height,int buf_stride,int bit_depth,int sgr_params_idx,int radius_idx)219 static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D,
220                     int width, int height, int buf_stride, int bit_depth,
221                     int sgr_params_idx, int radius_idx) {
222   const sgr_params_type *const params = &sgr_params[sgr_params_idx];
223   const int r = params->r[radius_idx];
224   const int n = (2 * r + 1) * (2 * r + 1);
225   const __m256i s = _mm256_set1_epi32(params->s[radius_idx]);
226   // one_over_n[n-1] is 2^12/n, so easily fits in an int16
227   const __m256i one_over_n = _mm256_set1_epi32(one_by_x[n - 1]);
228 
229   const __m256i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
230   const __m256i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
231 
232   // Set up masks
233   const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff);
234   __m256i mask[8];
235   for (int idx = 0; idx < 8; idx++) {
236     const __m128i shift = _mm_cvtsi32_si128(8 * (8 - idx));
237     mask[idx] = _mm256_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
238   }
239 
240   for (int i = -1; i < height + 1; ++i) {
241     for (int j = -1; j < width + 1; j += 8) {
242       const int32_t *Cij = C + i * buf_stride + j;
243       const int32_t *Dij = D + i * buf_stride + j;
244 
245       __m256i sum1 = boxsum_from_ii(Dij, buf_stride, r);
246       __m256i sum2 = boxsum_from_ii(Cij, buf_stride, r);
247 
248       // When width + 2 isn't a multiple of 8, sum1 and sum2 will contain
249       // some uninitialised data in their upper words. We use a mask to
250       // ensure that these bits are set to 0.
251       int idx = AOMMIN(8, width + 1 - j);
252       assert(idx >= 1);
253 
254       if (idx < 8) {
255         sum1 = _mm256_and_si256(mask[idx], sum1);
256         sum2 = _mm256_and_si256(mask[idx], sum2);
257       }
258 
259       const __m256i p = compute_p(sum1, sum2, bit_depth, n);
260 
261       const __m256i z = _mm256_min_epi32(
262           _mm256_srli_epi32(_mm256_add_epi32(_mm256_mullo_epi32(p, s), rnd_z),
263                             SGRPROJ_MTABLE_BITS),
264           _mm256_set1_epi32(255));
265 
266       const __m256i a_res = _mm256_i32gather_epi32(x_by_xplus1, z, 4);
267 
268       yy_storeu_256(A + i * buf_stride + j, a_res);
269 
270       const __m256i a_complement =
271           _mm256_sub_epi32(_mm256_set1_epi32(SGRPROJ_SGR), a_res);
272 
273       // sum1 might have lanes greater than 2^15, so we can't use madd to do
274       // multiplication involving sum1. However, a_complement and one_over_n
275       // are both less than 256, so we can multiply them first.
276       const __m256i a_comp_over_n = _mm256_madd_epi16(a_complement, one_over_n);
277       const __m256i b_int = _mm256_mullo_epi32(a_comp_over_n, sum1);
278       const __m256i b_res = _mm256_srli_epi32(_mm256_add_epi32(b_int, rnd_res),
279                                               SGRPROJ_RECIP_BITS);
280 
281       yy_storeu_256(B + i * buf_stride + j, b_res);
282     }
283   }
284 }
285 
286 // Calculate 8 values of the "cross sum" starting at buf. This is a 3x3 filter
287 // where the outer four corners have weight 3 and all other pixels have weight
288 // 4.
289 //
290 // Pixels are indexed as follows:
291 // xtl  xt   xtr
292 // xl    x   xr
293 // xbl  xb   xbr
294 //
295 // buf points to x
296 //
297 // fours = xl + xt + xr + xb + x
298 // threes = xtl + xtr + xbr + xbl
299 // cross_sum = 4 * fours + 3 * threes
300 //           = 4 * (fours + threes) - threes
301 //           = (fours + threes) << 2 - threes
cross_sum(const int32_t * buf,int stride)302 static INLINE __m256i cross_sum(const int32_t *buf, int stride) {
303   const __m256i xtl = yy_loadu_256(buf - 1 - stride);
304   const __m256i xt = yy_loadu_256(buf - stride);
305   const __m256i xtr = yy_loadu_256(buf + 1 - stride);
306   const __m256i xl = yy_loadu_256(buf - 1);
307   const __m256i x = yy_loadu_256(buf);
308   const __m256i xr = yy_loadu_256(buf + 1);
309   const __m256i xbl = yy_loadu_256(buf - 1 + stride);
310   const __m256i xb = yy_loadu_256(buf + stride);
311   const __m256i xbr = yy_loadu_256(buf + 1 + stride);
312 
313   const __m256i fours = _mm256_add_epi32(
314       xl, _mm256_add_epi32(xt, _mm256_add_epi32(xr, _mm256_add_epi32(xb, x))));
315   const __m256i threes =
316       _mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl)));
317 
318   return _mm256_sub_epi32(_mm256_slli_epi32(_mm256_add_epi32(fours, threes), 2),
319                           threes);
320 }
321 
322 // The final filter for self-guided restoration. Computes a weighted average
323 // across A, B with "cross sums" (see cross_sum implementation above).
final_filter(int32_t * dst,int dst_stride,const int32_t * A,const int32_t * B,int buf_stride,const void * dgd8,int dgd_stride,int width,int height,int highbd)324 static void final_filter(int32_t *dst, int dst_stride, const int32_t *A,
325                          const int32_t *B, int buf_stride, const void *dgd8,
326                          int dgd_stride, int width, int height, int highbd) {
327   const int nb = 5;
328   const __m256i rounding =
329       round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
330   const uint8_t *dgd_real =
331       highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
332 
333   for (int i = 0; i < height; ++i) {
334     for (int j = 0; j < width; j += 8) {
335       const __m256i a = cross_sum(A + i * buf_stride + j, buf_stride);
336       const __m256i b = cross_sum(B + i * buf_stride + j, buf_stride);
337 
338       const __m128i raw =
339           xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd));
340       const __m256i src =
341           highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw);
342 
343       __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b);
344       __m256i w = _mm256_srai_epi32(_mm256_add_epi32(v, rounding),
345                                     SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
346 
347       yy_storeu_256(dst + i * dst_stride + j, w);
348     }
349   }
350 }
351 
352 // Assumes that C, D are integral images for the original buffer which has been
353 // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
354 // on the sides. A, B, C, D point at logical position (0, 0).
calc_ab_fast(int32_t * A,int32_t * B,const int32_t * C,const int32_t * D,int width,int height,int buf_stride,int bit_depth,int sgr_params_idx,int radius_idx)355 static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C,
356                          const int32_t *D, int width, int height,
357                          int buf_stride, int bit_depth, int sgr_params_idx,
358                          int radius_idx) {
359   const sgr_params_type *const params = &sgr_params[sgr_params_idx];
360   const int r = params->r[radius_idx];
361   const int n = (2 * r + 1) * (2 * r + 1);
362   const __m256i s = _mm256_set1_epi32(params->s[radius_idx]);
363   // one_over_n[n-1] is 2^12/n, so easily fits in an int16
364   const __m256i one_over_n = _mm256_set1_epi32(one_by_x[n - 1]);
365 
366   const __m256i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
367   const __m256i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
368 
369   // Set up masks
370   const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff);
371   __m256i mask[8];
372   for (int idx = 0; idx < 8; idx++) {
373     const __m128i shift = _mm_cvtsi32_si128(8 * (8 - idx));
374     mask[idx] = _mm256_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
375   }
376 
377   for (int i = -1; i < height + 1; i += 2) {
378     for (int j = -1; j < width + 1; j += 8) {
379       const int32_t *Cij = C + i * buf_stride + j;
380       const int32_t *Dij = D + i * buf_stride + j;
381 
382       __m256i sum1 = boxsum_from_ii(Dij, buf_stride, r);
383       __m256i sum2 = boxsum_from_ii(Cij, buf_stride, r);
384 
385       // When width + 2 isn't a multiple of 8, sum1 and sum2 will contain
386       // some uninitialised data in their upper words. We use a mask to
387       // ensure that these bits are set to 0.
388       int idx = AOMMIN(8, width + 1 - j);
389       assert(idx >= 1);
390 
391       if (idx < 8) {
392         sum1 = _mm256_and_si256(mask[idx], sum1);
393         sum2 = _mm256_and_si256(mask[idx], sum2);
394       }
395 
396       const __m256i p = compute_p(sum1, sum2, bit_depth, n);
397 
398       const __m256i z = _mm256_min_epi32(
399           _mm256_srli_epi32(_mm256_add_epi32(_mm256_mullo_epi32(p, s), rnd_z),
400                             SGRPROJ_MTABLE_BITS),
401           _mm256_set1_epi32(255));
402 
403       const __m256i a_res = _mm256_i32gather_epi32(x_by_xplus1, z, 4);
404 
405       yy_storeu_256(A + i * buf_stride + j, a_res);
406 
407       const __m256i a_complement =
408           _mm256_sub_epi32(_mm256_set1_epi32(SGRPROJ_SGR), a_res);
409 
410       // sum1 might have lanes greater than 2^15, so we can't use madd to do
411       // multiplication involving sum1. However, a_complement and one_over_n
412       // are both less than 256, so we can multiply them first.
413       const __m256i a_comp_over_n = _mm256_madd_epi16(a_complement, one_over_n);
414       const __m256i b_int = _mm256_mullo_epi32(a_comp_over_n, sum1);
415       const __m256i b_res = _mm256_srli_epi32(_mm256_add_epi32(b_int, rnd_res),
416                                               SGRPROJ_RECIP_BITS);
417 
418       yy_storeu_256(B + i * buf_stride + j, b_res);
419     }
420   }
421 }
422 
423 // Calculate 8 values of the "cross sum" starting at buf.
424 //
425 // Pixels are indexed like this:
426 // xtl  xt   xtr
427 //  -   buf   -
428 // xbl  xb   xbr
429 //
430 // Pixels are weighted like this:
431 //  5    6    5
432 //  0    0    0
433 //  5    6    5
434 //
435 // fives = xtl + xtr + xbl + xbr
436 // sixes = xt + xb
437 // cross_sum = 6 * sixes + 5 * fives
438 //           = 5 * (fives + sixes) - sixes
439 //           = (fives + sixes) << 2 + (fives + sixes) + sixes
cross_sum_fast_even_row(const int32_t * buf,int stride)440 static INLINE __m256i cross_sum_fast_even_row(const int32_t *buf, int stride) {
441   const __m256i xtl = yy_loadu_256(buf - 1 - stride);
442   const __m256i xt = yy_loadu_256(buf - stride);
443   const __m256i xtr = yy_loadu_256(buf + 1 - stride);
444   const __m256i xbl = yy_loadu_256(buf - 1 + stride);
445   const __m256i xb = yy_loadu_256(buf + stride);
446   const __m256i xbr = yy_loadu_256(buf + 1 + stride);
447 
448   const __m256i fives =
449       _mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl)));
450   const __m256i sixes = _mm256_add_epi32(xt, xb);
451   const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes);
452 
453   return _mm256_add_epi32(
454       _mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2),
455                        fives_plus_sixes),
456       sixes);
457 }
458 
459 // Calculate 8 values of the "cross sum" starting at buf.
460 //
461 // Pixels are indexed like this:
462 // xl    x   xr
463 //
464 // Pixels are weighted like this:
465 //  5    6    5
466 //
467 // buf points to x
468 //
469 // fives = xl + xr
470 // sixes = x
471 // cross_sum = 5 * fives + 6 * sixes
472 //           = 4 * (fives + sixes) + (fives + sixes) + sixes
473 //           = (fives + sixes) << 2 + (fives + sixes) + sixes
cross_sum_fast_odd_row(const int32_t * buf)474 static INLINE __m256i cross_sum_fast_odd_row(const int32_t *buf) {
475   const __m256i xl = yy_loadu_256(buf - 1);
476   const __m256i x = yy_loadu_256(buf);
477   const __m256i xr = yy_loadu_256(buf + 1);
478 
479   const __m256i fives = _mm256_add_epi32(xl, xr);
480   const __m256i sixes = x;
481 
482   const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes);
483 
484   return _mm256_add_epi32(
485       _mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2),
486                        fives_plus_sixes),
487       sixes);
488 }
489 
490 // The final filter for the self-guided restoration. Computes a
491 // weighted average across A, B with "cross sums" (see cross_sum_...
492 // implementations above).
final_filter_fast(int32_t * dst,int dst_stride,const int32_t * A,const int32_t * B,int buf_stride,const void * dgd8,int dgd_stride,int width,int height,int highbd)493 static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A,
494                               const int32_t *B, int buf_stride,
495                               const void *dgd8, int dgd_stride, int width,
496                               int height, int highbd) {
497   const int nb0 = 5;
498   const int nb1 = 4;
499 
500   const __m256i rounding0 =
501       round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
502   const __m256i rounding1 =
503       round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
504 
505   const uint8_t *dgd_real =
506       highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
507 
508   for (int i = 0; i < height; ++i) {
509     if (!(i & 1)) {  // even row
510       for (int j = 0; j < width; j += 8) {
511         const __m256i a =
512             cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride);
513         const __m256i b =
514             cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride);
515 
516         const __m128i raw =
517             xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd));
518         const __m256i src =
519             highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw);
520 
521         __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b);
522         __m256i w =
523             _mm256_srai_epi32(_mm256_add_epi32(v, rounding0),
524                               SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
525 
526         yy_storeu_256(dst + i * dst_stride + j, w);
527       }
528     } else {  // odd row
529       for (int j = 0; j < width; j += 8) {
530         const __m256i a = cross_sum_fast_odd_row(A + i * buf_stride + j);
531         const __m256i b = cross_sum_fast_odd_row(B + i * buf_stride + j);
532 
533         const __m128i raw =
534             xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd));
535         const __m256i src =
536             highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw);
537 
538         __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b);
539         __m256i w =
540             _mm256_srai_epi32(_mm256_add_epi32(v, rounding1),
541                               SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
542 
543         yy_storeu_256(dst + i * dst_stride + j, w);
544       }
545     }
546   }
547 }
548 
av1_selfguided_restoration_avx2(const uint8_t * dgd8,int width,int height,int dgd_stride,int32_t * flt0,int32_t * flt1,int flt_stride,int sgr_params_idx,int bit_depth,int highbd)549 int av1_selfguided_restoration_avx2(const uint8_t *dgd8, int width, int height,
550                                     int dgd_stride, int32_t *flt0,
551                                     int32_t *flt1, int flt_stride,
552                                     int sgr_params_idx, int bit_depth,
553                                     int highbd) {
554   // The ALIGN_POWER_OF_TWO macro here ensures that column 1 of Atl, Btl,
555   // Ctl and Dtl is 32-byte aligned.
556   const int buf_elts = ALIGN_POWER_OF_TWO(RESTORATION_PROC_UNIT_PELS, 3);
557 
558   int32_t *buf = aom_memalign(
559       32, 4 * sizeof(*buf) * ALIGN_POWER_OF_TWO(RESTORATION_PROC_UNIT_PELS, 3));
560   if (!buf) return -1;
561 
562   const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
563   const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
564 
565   // Adjusting the stride of A and B here appears to avoid bad cache effects,
566   // leading to a significant speed improvement.
567   // We also align the stride to a multiple of 32 bytes for efficiency.
568   int buf_stride = ALIGN_POWER_OF_TWO(width_ext + 16, 3);
569 
570   // The "tl" pointers point at the top-left of the initialised data for the
571   // array.
572   int32_t *Atl = buf + 0 * buf_elts + 7;
573   int32_t *Btl = buf + 1 * buf_elts + 7;
574   int32_t *Ctl = buf + 2 * buf_elts + 7;
575   int32_t *Dtl = buf + 3 * buf_elts + 7;
576 
577   // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note
578   // there's a zero row and column in A, B (integral images), so we move down
579   // and right one for them.
580   const int buf_diag_border =
581       SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT;
582 
583   int32_t *A0 = Atl + 1 + buf_stride;
584   int32_t *B0 = Btl + 1 + buf_stride;
585   int32_t *C0 = Ctl + 1 + buf_stride;
586   int32_t *D0 = Dtl + 1 + buf_stride;
587 
588   // Finally, A, B, C, D point at position (0, 0).
589   int32_t *A = A0 + buf_diag_border;
590   int32_t *B = B0 + buf_diag_border;
591   int32_t *C = C0 + buf_diag_border;
592   int32_t *D = D0 + buf_diag_border;
593 
594   const int dgd_diag_border =
595       SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT;
596   const uint8_t *dgd0 = dgd8 - dgd_diag_border;
597 
598   // Generate integral images from the input. C will contain sums of squares; D
599   // will contain just sums
600   if (highbd)
601     integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext,
602                            height_ext, Ctl, Dtl, buf_stride);
603   else
604     integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl,
605                     buf_stride);
606 
607   const sgr_params_type *const params = &sgr_params[sgr_params_idx];
608   // Write to flt0 and flt1
609   // If params->r == 0 we skip the corresponding filter. We only allow one of
610   // the radii to be 0, as having both equal to 0 would be equivalent to
611   // skipping SGR entirely.
612   assert(!(params->r[0] == 0 && params->r[1] == 0));
613   assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
614   assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
615 
616   if (params->r[0] > 0) {
617     calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth,
618                  sgr_params_idx, 0);
619     final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride,
620                       width, height, highbd);
621   }
622 
623   if (params->r[1] > 0) {
624     calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx,
625             1);
626     final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width,
627                  height, highbd);
628   }
629   aom_free(buf);
630   return 0;
631 }
632 
apply_selfguided_restoration_avx2(const uint8_t * dat8,int width,int height,int stride,int eps,const int * xqd,uint8_t * dst8,int dst_stride,int32_t * tmpbuf,int bit_depth,int highbd)633 void apply_selfguided_restoration_avx2(const uint8_t *dat8, int width,
634                                        int height, int stride, int eps,
635                                        const int *xqd, uint8_t *dst8,
636                                        int dst_stride, int32_t *tmpbuf,
637                                        int bit_depth, int highbd) {
638   int32_t *flt0 = tmpbuf;
639   int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX;
640   assert(width * height <= RESTORATION_UNITPELS_MAX);
641   const int ret = av1_selfguided_restoration_avx2(
642       dat8, width, height, stride, flt0, flt1, width, eps, bit_depth, highbd);
643   (void)ret;
644   assert(!ret);
645   const sgr_params_type *const params = &sgr_params[eps];
646   int xq[2];
647   decode_xq(xqd, xq, params);
648 
649   __m256i xq0 = _mm256_set1_epi32(xq[0]);
650   __m256i xq1 = _mm256_set1_epi32(xq[1]);
651 
652   for (int i = 0; i < height; ++i) {
653     // Calculate output in batches of 16 pixels
654     for (int j = 0; j < width; j += 16) {
655       const int k = i * width + j;
656       const int m = i * dst_stride + j;
657 
658       const uint8_t *dat8ij = dat8 + i * stride + j;
659       __m256i ep_0, ep_1;
660       __m128i src_0, src_1;
661       if (highbd) {
662         src_0 = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij));
663         src_1 = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij + 8));
664         ep_0 = _mm256_cvtepu16_epi32(src_0);
665         ep_1 = _mm256_cvtepu16_epi32(src_1);
666       } else {
667         src_0 = xx_loadu_128(dat8ij);
668         ep_0 = _mm256_cvtepu8_epi32(src_0);
669         ep_1 = _mm256_cvtepu8_epi32(_mm_srli_si128(src_0, 8));
670       }
671 
672       const __m256i u_0 = _mm256_slli_epi32(ep_0, SGRPROJ_RST_BITS);
673       const __m256i u_1 = _mm256_slli_epi32(ep_1, SGRPROJ_RST_BITS);
674 
675       __m256i v_0 = _mm256_slli_epi32(u_0, SGRPROJ_PRJ_BITS);
676       __m256i v_1 = _mm256_slli_epi32(u_1, SGRPROJ_PRJ_BITS);
677 
678       if (params->r[0] > 0) {
679         const __m256i f1_0 = _mm256_sub_epi32(yy_loadu_256(&flt0[k]), u_0);
680         v_0 = _mm256_add_epi32(v_0, _mm256_mullo_epi32(xq0, f1_0));
681 
682         const __m256i f1_1 = _mm256_sub_epi32(yy_loadu_256(&flt0[k + 8]), u_1);
683         v_1 = _mm256_add_epi32(v_1, _mm256_mullo_epi32(xq0, f1_1));
684       }
685 
686       if (params->r[1] > 0) {
687         const __m256i f2_0 = _mm256_sub_epi32(yy_loadu_256(&flt1[k]), u_0);
688         v_0 = _mm256_add_epi32(v_0, _mm256_mullo_epi32(xq1, f2_0));
689 
690         const __m256i f2_1 = _mm256_sub_epi32(yy_loadu_256(&flt1[k + 8]), u_1);
691         v_1 = _mm256_add_epi32(v_1, _mm256_mullo_epi32(xq1, f2_1));
692       }
693 
694       const __m256i rounding =
695           round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
696       const __m256i w_0 = _mm256_srai_epi32(
697           _mm256_add_epi32(v_0, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
698       const __m256i w_1 = _mm256_srai_epi32(
699           _mm256_add_epi32(v_1, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
700 
701       if (highbd) {
702         // Pack into 16 bits and clamp to [0, 2^bit_depth)
703         // Note that packing into 16 bits messes up the order of the bits,
704         // so we use a permute function to correct this
705         const __m256i tmp = _mm256_packus_epi32(w_0, w_1);
706         const __m256i tmp2 = _mm256_permute4x64_epi64(tmp, 0xd8);
707         const __m256i max = _mm256_set1_epi16((1 << bit_depth) - 1);
708         const __m256i res = _mm256_min_epi16(tmp2, max);
709         yy_storeu_256(CONVERT_TO_SHORTPTR(dst8 + m), res);
710       } else {
711         // Pack into 8 bits and clamp to [0, 256)
712         // Note that each pack messes up the order of the bits,
713         // so we use a permute function to correct this
714         const __m256i tmp = _mm256_packs_epi32(w_0, w_1);
715         const __m256i tmp2 = _mm256_permute4x64_epi64(tmp, 0xd8);
716         const __m256i res =
717             _mm256_packus_epi16(tmp2, tmp2 /* "don't care" value */);
718         const __m128i res2 =
719             _mm256_castsi256_si128(_mm256_permute4x64_epi64(res, 0xd8));
720         xx_storeu_128(dst8 + m, res2);
721       }
722     }
723   }
724 }
725