1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <smmintrin.h>
13
14 #include "config/aom_config.h"
15 #include "config/av1_rtcd.h"
16
17 #include "av1/common/restoration.h"
18 #include "aom_dsp/x86/synonyms.h"
19
20 // Load 4 bytes from the possibly-misaligned pointer p, extend each byte to
21 // 32-bit precision and return them in an SSE register.
xx_load_extend_8_32(const void * p)22 static __m128i xx_load_extend_8_32(const void *p) {
23 return _mm_cvtepu8_epi32(xx_loadl_32(p));
24 }
25
26 // Load 4 halfwords from the possibly-misaligned pointer p, extend each
27 // halfword to 32-bit precision and return them in an SSE register.
xx_load_extend_16_32(const void * p)28 static __m128i xx_load_extend_16_32(const void *p) {
29 return _mm_cvtepu16_epi32(xx_loadl_64(p));
30 }
31
32 // Compute the scan of an SSE register holding 4 32-bit integers. If the
33 // register holds x0..x3 then the scan will hold x0, x0+x1, x0+x1+x2,
34 // x0+x1+x2+x3
scan_32(__m128i x)35 static __m128i scan_32(__m128i x) {
36 const __m128i x01 = _mm_add_epi32(x, _mm_slli_si128(x, 4));
37 return _mm_add_epi32(x01, _mm_slli_si128(x01, 8));
38 }
39
40 // Compute two integral images from src. B sums elements; A sums their
41 // squares. The images are offset by one pixel, so will have width and height
42 // equal to width + 1, height + 1 and the first row and column will be zero.
43 //
44 // A+1 and B+1 should be aligned to 16 bytes. buf_stride should be a multiple
45 // of 4.
integral_images(const uint8_t * src,int src_stride,int width,int height,int32_t * A,int32_t * B,int buf_stride)46 static void integral_images(const uint8_t *src, int src_stride, int width,
47 int height, int32_t *A, int32_t *B,
48 int buf_stride) {
49 // Write out the zero top row
50 memset(A, 0, sizeof(*A) * (width + 1));
51 memset(B, 0, sizeof(*B) * (width + 1));
52
53 const __m128i zero = _mm_setzero_si128();
54 for (int i = 0; i < height; ++i) {
55 // Zero the left column.
56 A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
57
58 // ldiff is the difference H - D where H is the output sample immediately
59 // to the left and D is the output sample above it. These are scalars,
60 // replicated across the four lanes.
61 __m128i ldiff1 = zero, ldiff2 = zero;
62 for (int j = 0; j < width; j += 4) {
63 const int ABj = 1 + j;
64
65 const __m128i above1 = xx_load_128(B + ABj + i * buf_stride);
66 const __m128i above2 = xx_load_128(A + ABj + i * buf_stride);
67
68 const __m128i x1 = xx_load_extend_8_32(src + j + i * src_stride);
69 const __m128i x2 = _mm_madd_epi16(x1, x1);
70
71 const __m128i sc1 = scan_32(x1);
72 const __m128i sc2 = scan_32(x2);
73
74 const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1);
75 const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2);
76
77 xx_store_128(B + ABj + (i + 1) * buf_stride, row1);
78 xx_store_128(A + ABj + (i + 1) * buf_stride, row2);
79
80 // Calculate the new H - D.
81 ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff);
82 ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff);
83 }
84 }
85 }
86
87 // Compute two integral images from src. B sums elements; A sums their squares
88 //
89 // A and B should be aligned to 16 bytes. buf_stride should be a multiple of 4.
integral_images_highbd(const uint16_t * src,int src_stride,int width,int height,int32_t * A,int32_t * B,int buf_stride)90 static void integral_images_highbd(const uint16_t *src, int src_stride,
91 int width, int height, int32_t *A,
92 int32_t *B, int buf_stride) {
93 // Write out the zero top row
94 memset(A, 0, sizeof(*A) * (width + 1));
95 memset(B, 0, sizeof(*B) * (width + 1));
96
97 const __m128i zero = _mm_setzero_si128();
98 for (int i = 0; i < height; ++i) {
99 // Zero the left column.
100 A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
101
102 // ldiff is the difference H - D where H is the output sample immediately
103 // to the left and D is the output sample above it. These are scalars,
104 // replicated across the four lanes.
105 __m128i ldiff1 = zero, ldiff2 = zero;
106 for (int j = 0; j < width; j += 4) {
107 const int ABj = 1 + j;
108
109 const __m128i above1 = xx_load_128(B + ABj + i * buf_stride);
110 const __m128i above2 = xx_load_128(A + ABj + i * buf_stride);
111
112 const __m128i x1 = xx_load_extend_16_32(src + j + i * src_stride);
113 const __m128i x2 = _mm_madd_epi16(x1, x1);
114
115 const __m128i sc1 = scan_32(x1);
116 const __m128i sc2 = scan_32(x2);
117
118 const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1);
119 const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2);
120
121 xx_store_128(B + ABj + (i + 1) * buf_stride, row1);
122 xx_store_128(A + ABj + (i + 1) * buf_stride, row2);
123
124 // Calculate the new H - D.
125 ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff);
126 ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff);
127 }
128 }
129 }
130
131 // Compute 4 values of boxsum from the given integral image. ii should point
132 // at the middle of the box (for the first value). r is the box radius.
boxsum_from_ii(const int32_t * ii,int stride,int r)133 static INLINE __m128i boxsum_from_ii(const int32_t *ii, int stride, int r) {
134 const __m128i tl = xx_loadu_128(ii - (r + 1) - (r + 1) * stride);
135 const __m128i tr = xx_loadu_128(ii + (r + 0) - (r + 1) * stride);
136 const __m128i bl = xx_loadu_128(ii - (r + 1) + r * stride);
137 const __m128i br = xx_loadu_128(ii + (r + 0) + r * stride);
138 const __m128i u = _mm_sub_epi32(tr, tl);
139 const __m128i v = _mm_sub_epi32(br, bl);
140 return _mm_sub_epi32(v, u);
141 }
142
round_for_shift(unsigned shift)143 static __m128i round_for_shift(unsigned shift) {
144 return _mm_set1_epi32((1 << shift) >> 1);
145 }
146
compute_p(__m128i sum1,__m128i sum2,int bit_depth,int n)147 static __m128i compute_p(__m128i sum1, __m128i sum2, int bit_depth, int n) {
148 __m128i an, bb;
149 if (bit_depth > 8) {
150 const __m128i rounding_a = round_for_shift(2 * (bit_depth - 8));
151 const __m128i rounding_b = round_for_shift(bit_depth - 8);
152 const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8));
153 const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8);
154 const __m128i a = _mm_srl_epi32(_mm_add_epi32(sum2, rounding_a), shift_a);
155 const __m128i b = _mm_srl_epi32(_mm_add_epi32(sum1, rounding_b), shift_b);
156 // b < 2^14, so we can use a 16-bit madd rather than a 32-bit
157 // mullo to square it
158 bb = _mm_madd_epi16(b, b);
159 an = _mm_max_epi32(_mm_mullo_epi32(a, _mm_set1_epi32(n)), bb);
160 } else {
161 bb = _mm_madd_epi16(sum1, sum1);
162 an = _mm_mullo_epi32(sum2, _mm_set1_epi32(n));
163 }
164 return _mm_sub_epi32(an, bb);
165 }
166
167 // Assumes that C, D are integral images for the original buffer which has been
168 // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
169 // on the sides. A, B, C, D point at logical position (0, 0).
calc_ab(int32_t * A,int32_t * B,const int32_t * C,const int32_t * D,int width,int height,int buf_stride,int bit_depth,int sgr_params_idx,int radius_idx)170 static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D,
171 int width, int height, int buf_stride, int bit_depth,
172 int sgr_params_idx, int radius_idx) {
173 const sgr_params_type *const params = &sgr_params[sgr_params_idx];
174 const int r = params->r[radius_idx];
175 const int n = (2 * r + 1) * (2 * r + 1);
176 const __m128i s = _mm_set1_epi32(params->s[radius_idx]);
177 // one_over_n[n-1] is 2^12/n, so easily fits in an int16
178 const __m128i one_over_n = _mm_set1_epi32(one_by_x[n - 1]);
179
180 const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
181 const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
182
183 // Set up masks
184 const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff);
185 __m128i mask[4];
186 for (int idx = 0; idx < 4; idx++) {
187 const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx));
188 mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
189 }
190
191 for (int i = -1; i < height + 1; ++i) {
192 for (int j = -1; j < width + 1; j += 4) {
193 const int32_t *Cij = C + i * buf_stride + j;
194 const int32_t *Dij = D + i * buf_stride + j;
195
196 __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r);
197 __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r);
198
199 // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain
200 // some uninitialised data in their upper words. We use a mask to
201 // ensure that these bits are set to 0.
202 int idx = AOMMIN(4, width + 1 - j);
203 assert(idx >= 1);
204
205 if (idx < 4) {
206 sum1 = _mm_and_si128(mask[idx], sum1);
207 sum2 = _mm_and_si128(mask[idx], sum2);
208 }
209
210 const __m128i p = compute_p(sum1, sum2, bit_depth, n);
211
212 const __m128i z = _mm_min_epi32(
213 _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z),
214 SGRPROJ_MTABLE_BITS),
215 _mm_set1_epi32(255));
216
217 // 'Gather' type instructions are not available pre-AVX2, so synthesize a
218 // gather using scalar loads.
219 const __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)],
220 x_by_xplus1[_mm_extract_epi32(z, 2)],
221 x_by_xplus1[_mm_extract_epi32(z, 1)],
222 x_by_xplus1[_mm_extract_epi32(z, 0)]);
223
224 xx_storeu_128(A + i * buf_stride + j, a_res);
225
226 const __m128i a_complement =
227 _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res);
228
229 // sum1 might have lanes greater than 2^15, so we can't use madd to do
230 // multiplication involving sum1. However, a_complement and one_over_n
231 // are both less than 256, so we can multiply them first.
232 const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n);
233 const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1);
234 const __m128i b_res =
235 _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS);
236
237 xx_storeu_128(B + i * buf_stride + j, b_res);
238 }
239 }
240 }
241
242 // Calculate 4 values of the "cross sum" starting at buf. This is a 3x3 filter
243 // where the outer four corners have weight 3 and all other pixels have weight
244 // 4.
245 //
246 // Pixels are indexed like this:
247 // xtl xt xtr
248 // xl x xr
249 // xbl xb xbr
250 //
251 // buf points to x
252 //
253 // fours = xl + xt + xr + xb + x
254 // threes = xtl + xtr + xbr + xbl
255 // cross_sum = 4 * fours + 3 * threes
256 // = 4 * (fours + threes) - threes
257 // = (fours + threes) << 2 - threes
cross_sum(const int32_t * buf,int stride)258 static INLINE __m128i cross_sum(const int32_t *buf, int stride) {
259 const __m128i xtl = xx_loadu_128(buf - 1 - stride);
260 const __m128i xt = xx_loadu_128(buf - stride);
261 const __m128i xtr = xx_loadu_128(buf + 1 - stride);
262 const __m128i xl = xx_loadu_128(buf - 1);
263 const __m128i x = xx_loadu_128(buf);
264 const __m128i xr = xx_loadu_128(buf + 1);
265 const __m128i xbl = xx_loadu_128(buf - 1 + stride);
266 const __m128i xb = xx_loadu_128(buf + stride);
267 const __m128i xbr = xx_loadu_128(buf + 1 + stride);
268
269 const __m128i fours = _mm_add_epi32(
270 xl, _mm_add_epi32(xt, _mm_add_epi32(xr, _mm_add_epi32(xb, x))));
271 const __m128i threes =
272 _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl)));
273
274 return _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(fours, threes), 2), threes);
275 }
276
277 // The final filter for self-guided restoration. Computes a weighted average
278 // across A, B with "cross sums" (see cross_sum implementation above).
final_filter(int32_t * dst,int dst_stride,const int32_t * A,const int32_t * B,int buf_stride,const void * dgd8,int dgd_stride,int width,int height,int highbd)279 static void final_filter(int32_t *dst, int dst_stride, const int32_t *A,
280 const int32_t *B, int buf_stride, const void *dgd8,
281 int dgd_stride, int width, int height, int highbd) {
282 const int nb = 5;
283 const __m128i rounding =
284 round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
285 const uint8_t *dgd_real =
286 highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
287
288 for (int i = 0; i < height; ++i) {
289 for (int j = 0; j < width; j += 4) {
290 const __m128i a = cross_sum(A + i * buf_stride + j, buf_stride);
291 const __m128i b = cross_sum(B + i * buf_stride + j, buf_stride);
292 const __m128i raw =
293 xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
294 const __m128i src =
295 highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
296
297 __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
298 __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding),
299 SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
300
301 xx_storeu_128(dst + i * dst_stride + j, w);
302 }
303 }
304 }
305
306 // Assumes that C, D are integral images for the original buffer which has been
307 // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
308 // on the sides. A, B, C, D point at logical position (0, 0).
calc_ab_fast(int32_t * A,int32_t * B,const int32_t * C,const int32_t * D,int width,int height,int buf_stride,int bit_depth,int sgr_params_idx,int radius_idx)309 static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C,
310 const int32_t *D, int width, int height,
311 int buf_stride, int bit_depth, int sgr_params_idx,
312 int radius_idx) {
313 const sgr_params_type *const params = &sgr_params[sgr_params_idx];
314 const int r = params->r[radius_idx];
315 const int n = (2 * r + 1) * (2 * r + 1);
316 const __m128i s = _mm_set1_epi32(params->s[radius_idx]);
317 // one_over_n[n-1] is 2^12/n, so easily fits in an int16
318 const __m128i one_over_n = _mm_set1_epi32(one_by_x[n - 1]);
319
320 const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
321 const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
322
323 // Set up masks
324 const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff);
325 __m128i mask[4];
326 for (int idx = 0; idx < 4; idx++) {
327 const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx));
328 mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
329 }
330
331 for (int i = -1; i < height + 1; i += 2) {
332 for (int j = -1; j < width + 1; j += 4) {
333 const int32_t *Cij = C + i * buf_stride + j;
334 const int32_t *Dij = D + i * buf_stride + j;
335
336 __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r);
337 __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r);
338
339 // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain
340 // some uninitialised data in their upper words. We use a mask to
341 // ensure that these bits are set to 0.
342 int idx = AOMMIN(4, width + 1 - j);
343 assert(idx >= 1);
344
345 if (idx < 4) {
346 sum1 = _mm_and_si128(mask[idx], sum1);
347 sum2 = _mm_and_si128(mask[idx], sum2);
348 }
349
350 const __m128i p = compute_p(sum1, sum2, bit_depth, n);
351
352 const __m128i z = _mm_min_epi32(
353 _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z),
354 SGRPROJ_MTABLE_BITS),
355 _mm_set1_epi32(255));
356
357 // 'Gather' type instructions are not available pre-AVX2, so synthesize a
358 // gather using scalar loads.
359 const __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)],
360 x_by_xplus1[_mm_extract_epi32(z, 2)],
361 x_by_xplus1[_mm_extract_epi32(z, 1)],
362 x_by_xplus1[_mm_extract_epi32(z, 0)]);
363
364 xx_storeu_128(A + i * buf_stride + j, a_res);
365
366 const __m128i a_complement =
367 _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res);
368
369 // sum1 might have lanes greater than 2^15, so we can't use madd to do
370 // multiplication involving sum1. However, a_complement and one_over_n
371 // are both less than 256, so we can multiply them first.
372 const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n);
373 const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1);
374 const __m128i b_res =
375 _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS);
376
377 xx_storeu_128(B + i * buf_stride + j, b_res);
378 }
379 }
380 }
381
382 // Calculate 4 values of the "cross sum" starting at buf.
383 //
384 // Pixels are indexed like this:
385 // xtl xt xtr
386 // - buf -
387 // xbl xb xbr
388 //
389 // Pixels are weighted like this:
390 // 5 6 5
391 // 0 0 0
392 // 5 6 5
393 //
394 // fives = xtl + xtr + xbl + xbr
395 // sixes = xt + xb
396 // cross_sum = 6 * sixes + 5 * fives
397 // = 5 * (fives + sixes) - sixes
398 // = (fives + sixes) << 2 + (fives + sixes) + sixes
cross_sum_fast_even_row(const int32_t * buf,int stride)399 static INLINE __m128i cross_sum_fast_even_row(const int32_t *buf, int stride) {
400 const __m128i xtl = xx_loadu_128(buf - 1 - stride);
401 const __m128i xt = xx_loadu_128(buf - stride);
402 const __m128i xtr = xx_loadu_128(buf + 1 - stride);
403 const __m128i xbl = xx_loadu_128(buf - 1 + stride);
404 const __m128i xb = xx_loadu_128(buf + stride);
405 const __m128i xbr = xx_loadu_128(buf + 1 + stride);
406
407 const __m128i fives =
408 _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl)));
409 const __m128i sixes = _mm_add_epi32(xt, xb);
410 const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes);
411
412 return _mm_add_epi32(
413 _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes),
414 sixes);
415 }
416
417 // Calculate 4 values of the "cross sum" starting at buf.
418 //
419 // Pixels are indexed like this:
420 // xl x xr
421 //
422 // Pixels are weighted like this:
423 // 5 6 5
424 //
425 // buf points to x
426 //
427 // fives = xl + xr
428 // sixes = x
429 // cross_sum = 5 * fives + 6 * sixes
430 // = 4 * (fives + sixes) + (fives + sixes) + sixes
431 // = (fives + sixes) << 2 + (fives + sixes) + sixes
cross_sum_fast_odd_row(const int32_t * buf)432 static INLINE __m128i cross_sum_fast_odd_row(const int32_t *buf) {
433 const __m128i xl = xx_loadu_128(buf - 1);
434 const __m128i x = xx_loadu_128(buf);
435 const __m128i xr = xx_loadu_128(buf + 1);
436
437 const __m128i fives = _mm_add_epi32(xl, xr);
438 const __m128i sixes = x;
439
440 const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes);
441
442 return _mm_add_epi32(
443 _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes),
444 sixes);
445 }
446
447 // The final filter for the self-guided restoration. Computes a
448 // weighted average across A, B with "cross sums" (see cross_sum_...
449 // implementations above).
final_filter_fast(int32_t * dst,int dst_stride,const int32_t * A,const int32_t * B,int buf_stride,const void * dgd8,int dgd_stride,int width,int height,int highbd)450 static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A,
451 const int32_t *B, int buf_stride,
452 const void *dgd8, int dgd_stride, int width,
453 int height, int highbd) {
454 const int nb0 = 5;
455 const int nb1 = 4;
456
457 const __m128i rounding0 =
458 round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
459 const __m128i rounding1 =
460 round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
461
462 const uint8_t *dgd_real =
463 highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
464
465 for (int i = 0; i < height; ++i) {
466 if (!(i & 1)) { // even row
467 for (int j = 0; j < width; j += 4) {
468 const __m128i a =
469 cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride);
470 const __m128i b =
471 cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride);
472 const __m128i raw =
473 xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
474 const __m128i src =
475 highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
476
477 __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
478 __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding0),
479 SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
480
481 xx_storeu_128(dst + i * dst_stride + j, w);
482 }
483 } else { // odd row
484 for (int j = 0; j < width; j += 4) {
485 const __m128i a = cross_sum_fast_odd_row(A + i * buf_stride + j);
486 const __m128i b = cross_sum_fast_odd_row(B + i * buf_stride + j);
487 const __m128i raw =
488 xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
489 const __m128i src =
490 highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
491
492 __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
493 __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding1),
494 SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
495
496 xx_storeu_128(dst + i * dst_stride + j, w);
497 }
498 }
499 }
500 }
501
av1_selfguided_restoration_sse4_1(const uint8_t * dgd8,int width,int height,int dgd_stride,int32_t * flt0,int32_t * flt1,int flt_stride,int sgr_params_idx,int bit_depth,int highbd)502 int av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width,
503 int height, int dgd_stride, int32_t *flt0,
504 int32_t *flt1, int flt_stride,
505 int sgr_params_idx, int bit_depth,
506 int highbd) {
507 int32_t *buf = (int32_t *)aom_memalign(
508 16, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS);
509 if (!buf) return -1;
510 memset(buf, 0, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS);
511
512 const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
513 const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
514
515 // Adjusting the stride of A and B here appears to avoid bad cache effects,
516 // leading to a significant speed improvement.
517 // We also align the stride to a multiple of 16 bytes for efficiency.
518 int buf_stride = ((width_ext + 3) & ~3) + 16;
519
520 // The "tl" pointers point at the top-left of the initialised data for the
521 // array. Adding 3 here ensures that column 1 is 16-byte aligned.
522 int32_t *Atl = buf + 0 * RESTORATION_PROC_UNIT_PELS + 3;
523 int32_t *Btl = buf + 1 * RESTORATION_PROC_UNIT_PELS + 3;
524 int32_t *Ctl = buf + 2 * RESTORATION_PROC_UNIT_PELS + 3;
525 int32_t *Dtl = buf + 3 * RESTORATION_PROC_UNIT_PELS + 3;
526
527 // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note
528 // there's a zero row and column in A, B (integral images), so we move down
529 // and right one for them.
530 const int buf_diag_border =
531 SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT;
532
533 int32_t *A0 = Atl + 1 + buf_stride;
534 int32_t *B0 = Btl + 1 + buf_stride;
535 int32_t *C0 = Ctl + 1 + buf_stride;
536 int32_t *D0 = Dtl + 1 + buf_stride;
537
538 // Finally, A, B, C, D point at position (0, 0).
539 int32_t *A = A0 + buf_diag_border;
540 int32_t *B = B0 + buf_diag_border;
541 int32_t *C = C0 + buf_diag_border;
542 int32_t *D = D0 + buf_diag_border;
543
544 const int dgd_diag_border =
545 SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT;
546 const uint8_t *dgd0 = dgd8 - dgd_diag_border;
547
548 // Generate integral images from the input. C will contain sums of squares; D
549 // will contain just sums
550 if (highbd)
551 integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext,
552 height_ext, Ctl, Dtl, buf_stride);
553 else
554 integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl,
555 buf_stride);
556
557 const sgr_params_type *const params = &sgr_params[sgr_params_idx];
558 // Write to flt0 and flt1
559 // If params->r == 0 we skip the corresponding filter. We only allow one of
560 // the radii to be 0, as having both equal to 0 would be equivalent to
561 // skipping SGR entirely.
562 assert(!(params->r[0] == 0 && params->r[1] == 0));
563 assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
564 assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
565
566 if (params->r[0] > 0) {
567 calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth,
568 sgr_params_idx, 0);
569 final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride,
570 width, height, highbd);
571 }
572
573 if (params->r[1] > 0) {
574 calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx,
575 1);
576 final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width,
577 height, highbd);
578 }
579 aom_free(buf);
580 return 0;
581 }
582
apply_selfguided_restoration_sse4_1(const uint8_t * dat8,int width,int height,int stride,int eps,const int * xqd,uint8_t * dst8,int dst_stride,int32_t * tmpbuf,int bit_depth,int highbd)583 void apply_selfguided_restoration_sse4_1(const uint8_t *dat8, int width,
584 int height, int stride, int eps,
585 const int *xqd, uint8_t *dst8,
586 int dst_stride, int32_t *tmpbuf,
587 int bit_depth, int highbd) {
588 int32_t *flt0 = tmpbuf;
589 int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX;
590 assert(width * height <= RESTORATION_UNITPELS_MAX);
591 const int ret = av1_selfguided_restoration_sse4_1(
592 dat8, width, height, stride, flt0, flt1, width, eps, bit_depth, highbd);
593 (void)ret;
594 assert(!ret);
595 const sgr_params_type *const params = &sgr_params[eps];
596 int xq[2];
597 decode_xq(xqd, xq, params);
598
599 __m128i xq0 = _mm_set1_epi32(xq[0]);
600 __m128i xq1 = _mm_set1_epi32(xq[1]);
601
602 for (int i = 0; i < height; ++i) {
603 // Calculate output in batches of 8 pixels
604 for (int j = 0; j < width; j += 8) {
605 const int k = i * width + j;
606 const int m = i * dst_stride + j;
607
608 const uint8_t *dat8ij = dat8 + i * stride + j;
609 __m128i src;
610 if (highbd) {
611 src = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij));
612 } else {
613 src = _mm_cvtepu8_epi16(xx_loadl_64(dat8ij));
614 }
615
616 const __m128i u = _mm_slli_epi16(src, SGRPROJ_RST_BITS);
617 const __m128i u_0 = _mm_cvtepu16_epi32(u);
618 const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(u, 8));
619
620 __m128i v_0 = _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS);
621 __m128i v_1 = _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS);
622
623 if (params->r[0] > 0) {
624 const __m128i f1_0 = _mm_sub_epi32(xx_loadu_128(&flt0[k]), u_0);
625 v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq0, f1_0));
626
627 const __m128i f1_1 = _mm_sub_epi32(xx_loadu_128(&flt0[k + 4]), u_1);
628 v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq0, f1_1));
629 }
630
631 if (params->r[1] > 0) {
632 const __m128i f2_0 = _mm_sub_epi32(xx_loadu_128(&flt1[k]), u_0);
633 v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq1, f2_0));
634
635 const __m128i f2_1 = _mm_sub_epi32(xx_loadu_128(&flt1[k + 4]), u_1);
636 v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq1, f2_1));
637 }
638
639 const __m128i rounding =
640 round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
641 const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding),
642 SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
643 const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding),
644 SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
645
646 if (highbd) {
647 // Pack into 16 bits and clamp to [0, 2^bit_depth)
648 const __m128i tmp = _mm_packus_epi32(w_0, w_1);
649 const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1);
650 const __m128i res = _mm_min_epi16(tmp, max);
651 xx_storeu_128(CONVERT_TO_SHORTPTR(dst8 + m), res);
652 } else {
653 // Pack into 8 bits and clamp to [0, 256)
654 const __m128i tmp = _mm_packs_epi32(w_0, w_1);
655 const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */);
656 xx_storel_64(dst8 + m, res);
657 }
658 }
659 }
660 }
661