• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <math.h>
13 
14 #include <immintrin.h>
15 #include "config/av1_rtcd.h"
16 
17 #include "aom_ports/mem.h"
18 #include "av1/encoder/corner_match.h"
19 
20 DECLARE_ALIGNED(16, static const uint8_t, byte_mask[16]) = {
21   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0
22 };
23 #if MATCH_SZ != 13
24 #error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
25 #endif
26 
27 /* Compute corr(im1, im2) * MATCH_SZ * stddev(im1), where the
28 correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
29 of each image, centered at (x1, y1) and (x2, y2) respectively.
30 */
compute_cross_correlation_avx2(unsigned char * im1,int stride1,int x1,int y1,unsigned char * im2,int stride2,int x2,int y2)31 double compute_cross_correlation_avx2(unsigned char *im1, int stride1, int x1,
32                                       int y1, unsigned char *im2, int stride2,
33                                       int x2, int y2) {
34   int i, stride1_i = 0, stride2_i = 0;
35   __m256i temp1, sum_vec, sumsq2_vec, cross_vec, v, v1_1, v2_1;
36   const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
37   const __m256i zero = _mm256_setzero_si256();
38   __m128i v1, v2;
39 
40   sum_vec = zero;
41   sumsq2_vec = zero;
42   cross_vec = zero;
43 
44   im1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
45   im2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
46 
47   for (i = 0; i < MATCH_SZ; ++i) {
48     v1 = _mm_and_si128(_mm_loadu_si128((__m128i *)&im1[stride1_i]), mask);
49     v1_1 = _mm256_cvtepu8_epi16(v1);
50     v2 = _mm_and_si128(_mm_loadu_si128((__m128i *)&im2[stride2_i]), mask);
51     v2_1 = _mm256_cvtepu8_epi16(v2);
52 
53     v = _mm256_insertf128_si256(_mm256_castsi128_si256(v1), v2, 1);
54     sumsq2_vec = _mm256_add_epi32(sumsq2_vec, _mm256_madd_epi16(v2_1, v2_1));
55 
56     sum_vec = _mm256_add_epi16(sum_vec, _mm256_sad_epu8(v, zero));
57     cross_vec = _mm256_add_epi32(cross_vec, _mm256_madd_epi16(v1_1, v2_1));
58     stride1_i += stride1;
59     stride2_i += stride2;
60   }
61   __m256i sum_vec1 = _mm256_srli_si256(sum_vec, 8);
62   sum_vec = _mm256_add_epi32(sum_vec, sum_vec1);
63   int sum1_acc = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_vec));
64   int sum2_acc = _mm256_extract_epi32(sum_vec, 4);
65 
66   __m256i unp_low = _mm256_unpacklo_epi64(sumsq2_vec, cross_vec);
67   __m256i unp_hig = _mm256_unpackhi_epi64(sumsq2_vec, cross_vec);
68   temp1 = _mm256_add_epi32(unp_low, unp_hig);
69 
70   __m128i low_sumsq = _mm256_castsi256_si128(temp1);
71   low_sumsq = _mm_add_epi32(low_sumsq, _mm256_extractf128_si256(temp1, 1));
72   low_sumsq = _mm_add_epi32(low_sumsq, _mm_srli_epi64(low_sumsq, 32));
73   int sumsq2_acc = _mm_cvtsi128_si32(low_sumsq);
74   int cross_acc = _mm_extract_epi32(low_sumsq, 2);
75 
76   int var2 = sumsq2_acc * MATCH_SZ_SQ - sum2_acc * sum2_acc;
77   int cov = cross_acc * MATCH_SZ_SQ - sum1_acc * sum2_acc;
78   return cov / sqrt((double)var2);
79 }
80