• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <stdlib.h>
13 #include <memory.h>
14 #include <math.h>
15 #include <assert.h>
16 
17 #include <smmintrin.h>
18 
19 #include "config/av1_rtcd.h"
20 
21 #include "aom_ports/mem.h"
22 #include "av1/encoder/corner_match.h"
23 
24 DECLARE_ALIGNED(16, static const uint8_t, byte_mask[16]) = {
25   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0
26 };
27 #if MATCH_SZ != 13
28 #error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
29 #endif
30 
31 /* Compute corr(im1, im2) * MATCH_SZ * stddev(im1), where the
32    correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
33    of each image, centered at (x1, y1) and (x2, y2) respectively.
34 */
compute_cross_correlation_sse4_1(unsigned char * im1,int stride1,int x1,int y1,unsigned char * im2,int stride2,int x2,int y2)35 double compute_cross_correlation_sse4_1(unsigned char *im1, int stride1, int x1,
36                                         int y1, unsigned char *im2, int stride2,
37                                         int x2, int y2) {
38   int i;
39   // 2 16-bit partial sums in lanes 0, 4 (== 2 32-bit partial sums in lanes 0,
40   // 2)
41   __m128i sum1_vec = _mm_setzero_si128();
42   __m128i sum2_vec = _mm_setzero_si128();
43   // 4 32-bit partial sums of squares
44   __m128i sumsq2_vec = _mm_setzero_si128();
45   __m128i cross_vec = _mm_setzero_si128();
46 
47   const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
48   const __m128i zero = _mm_setzero_si128();
49 
50   im1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
51   im2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
52 
53   for (i = 0; i < MATCH_SZ; ++i) {
54     const __m128i v1 =
55         _mm_and_si128(_mm_loadu_si128((__m128i *)&im1[i * stride1]), mask);
56     const __m128i v2 =
57         _mm_and_si128(_mm_loadu_si128((__m128i *)&im2[i * stride2]), mask);
58 
59     // Using the 'sad' intrinsic here is a bit faster than adding
60     // v1_l + v1_r and v2_l + v2_r, plus it avoids the need for a 16->32 bit
61     // conversion step later, for a net speedup of ~10%
62     sum1_vec = _mm_add_epi16(sum1_vec, _mm_sad_epu8(v1, zero));
63     sum2_vec = _mm_add_epi16(sum2_vec, _mm_sad_epu8(v2, zero));
64 
65     const __m128i v1_l = _mm_cvtepu8_epi16(v1);
66     const __m128i v1_r = _mm_cvtepu8_epi16(_mm_srli_si128(v1, 8));
67     const __m128i v2_l = _mm_cvtepu8_epi16(v2);
68     const __m128i v2_r = _mm_cvtepu8_epi16(_mm_srli_si128(v2, 8));
69 
70     sumsq2_vec = _mm_add_epi32(
71         sumsq2_vec,
72         _mm_add_epi32(_mm_madd_epi16(v2_l, v2_l), _mm_madd_epi16(v2_r, v2_r)));
73     cross_vec = _mm_add_epi32(
74         cross_vec,
75         _mm_add_epi32(_mm_madd_epi16(v1_l, v2_l), _mm_madd_epi16(v1_r, v2_r)));
76   }
77 
78   // Now we can treat the four registers (sum1_vec, sum2_vec, sumsq2_vec,
79   // cross_vec)
80   // as holding 4 32-bit elements each, which we want to sum horizontally.
81   // We do this by transposing and then summing vertically.
82   __m128i tmp_0 = _mm_unpacklo_epi32(sum1_vec, sum2_vec);
83   __m128i tmp_1 = _mm_unpackhi_epi32(sum1_vec, sum2_vec);
84   __m128i tmp_2 = _mm_unpacklo_epi32(sumsq2_vec, cross_vec);
85   __m128i tmp_3 = _mm_unpackhi_epi32(sumsq2_vec, cross_vec);
86 
87   __m128i tmp_4 = _mm_unpacklo_epi64(tmp_0, tmp_2);
88   __m128i tmp_5 = _mm_unpackhi_epi64(tmp_0, tmp_2);
89   __m128i tmp_6 = _mm_unpacklo_epi64(tmp_1, tmp_3);
90   __m128i tmp_7 = _mm_unpackhi_epi64(tmp_1, tmp_3);
91 
92   __m128i res =
93       _mm_add_epi32(_mm_add_epi32(tmp_4, tmp_5), _mm_add_epi32(tmp_6, tmp_7));
94 
95   int sum1 = _mm_extract_epi32(res, 0);
96   int sum2 = _mm_extract_epi32(res, 1);
97   int sumsq2 = _mm_extract_epi32(res, 2);
98   int cross = _mm_extract_epi32(res, 3);
99 
100   int var2 = sumsq2 * MATCH_SZ_SQ - sum2 * sum2;
101   int cov = cross * MATCH_SZ_SQ - sum1 * sum2;
102   return cov / sqrt((double)var2);
103 }
104