1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <assert.h>
13 #include <emmintrin.h> // SSE2
14 #include <tmmintrin.h>
15
16 #include "config/aom_config.h"
17 #include "config/aom_dsp_rtcd.h"
18
19 #include "aom_dsp/x86/synonyms.h"
20
21 void aom_var_filter_block2d_bil_first_pass_ssse3(
22 const uint8_t *a, uint16_t *b, unsigned int src_pixels_per_line,
23 unsigned int pixel_step, unsigned int output_height,
24 unsigned int output_width, const uint8_t *filter);
25
26 void aom_var_filter_block2d_bil_second_pass_ssse3(
27 const uint16_t *a, uint8_t *b, unsigned int src_pixels_per_line,
28 unsigned int pixel_step, unsigned int output_height,
29 unsigned int output_width, const uint8_t *filter);
30
compute_dist_wtd_avg(__m128i * p0,__m128i * p1,const __m128i * w,const __m128i * r,void * const result)31 static INLINE void compute_dist_wtd_avg(__m128i *p0, __m128i *p1,
32 const __m128i *w, const __m128i *r,
33 void *const result) {
34 __m128i p_lo = _mm_unpacklo_epi8(*p0, *p1);
35 __m128i mult_lo = _mm_maddubs_epi16(p_lo, *w);
36 __m128i round_lo = _mm_add_epi16(mult_lo, *r);
37 __m128i shift_lo = _mm_srai_epi16(round_lo, DIST_PRECISION_BITS);
38
39 __m128i p_hi = _mm_unpackhi_epi8(*p0, *p1);
40 __m128i mult_hi = _mm_maddubs_epi16(p_hi, *w);
41 __m128i round_hi = _mm_add_epi16(mult_hi, *r);
42 __m128i shift_hi = _mm_srai_epi16(round_hi, DIST_PRECISION_BITS);
43
44 xx_storeu_128(result, _mm_packus_epi16(shift_lo, shift_hi));
45 }
46
aom_dist_wtd_comp_avg_pred_ssse3(uint8_t * comp_pred,const uint8_t * pred,int width,int height,const uint8_t * ref,int ref_stride,const DIST_WTD_COMP_PARAMS * jcp_param)47 void aom_dist_wtd_comp_avg_pred_ssse3(uint8_t *comp_pred, const uint8_t *pred,
48 int width, int height, const uint8_t *ref,
49 int ref_stride,
50 const DIST_WTD_COMP_PARAMS *jcp_param) {
51 int i;
52 const int8_t w0 = (int8_t)jcp_param->fwd_offset;
53 const int8_t w1 = (int8_t)jcp_param->bck_offset;
54 const __m128i w = _mm_set_epi8(w1, w0, w1, w0, w1, w0, w1, w0, w1, w0, w1, w0,
55 w1, w0, w1, w0);
56 const int16_t round = (int16_t)((1 << DIST_PRECISION_BITS) >> 1);
57 const __m128i r = _mm_set1_epi16(round);
58
59 if (width >= 16) {
60 // Read 16 pixels one row at a time
61 assert(!(width & 15));
62 for (i = 0; i < height; ++i) {
63 int j;
64 for (j = 0; j < width; j += 16) {
65 __m128i p0 = xx_loadu_128(ref);
66 __m128i p1 = xx_loadu_128(pred);
67
68 compute_dist_wtd_avg(&p0, &p1, &w, &r, comp_pred);
69
70 comp_pred += 16;
71 pred += 16;
72 ref += 16;
73 }
74 ref += ref_stride - width;
75 }
76 } else if (width >= 8) {
77 // Read 8 pixels two row at a time
78 assert(!(width & 7));
79 assert(!(width & 1));
80 for (i = 0; i < height; i += 2) {
81 __m128i p0_0 = xx_loadl_64(ref + 0 * ref_stride);
82 __m128i p0_1 = xx_loadl_64(ref + 1 * ref_stride);
83 __m128i p0 = _mm_unpacklo_epi64(p0_0, p0_1);
84 __m128i p1 = xx_loadu_128(pred);
85
86 compute_dist_wtd_avg(&p0, &p1, &w, &r, comp_pred);
87
88 comp_pred += 16;
89 pred += 16;
90 ref += 2 * ref_stride;
91 }
92 } else {
93 // Read 4 pixels four row at a time
94 assert(!(width & 3));
95 assert(!(height & 3));
96 for (i = 0; i < height; i += 4) {
97 const int8_t *row0 = (const int8_t *)ref + 0 * ref_stride;
98 const int8_t *row1 = (const int8_t *)ref + 1 * ref_stride;
99 const int8_t *row2 = (const int8_t *)ref + 2 * ref_stride;
100 const int8_t *row3 = (const int8_t *)ref + 3 * ref_stride;
101
102 __m128i p0 =
103 _mm_setr_epi8(row0[0], row0[1], row0[2], row0[3], row1[0], row1[1],
104 row1[2], row1[3], row2[0], row2[1], row2[2], row2[3],
105 row3[0], row3[1], row3[2], row3[3]);
106 __m128i p1 = xx_loadu_128(pred);
107
108 compute_dist_wtd_avg(&p0, &p1, &w, &r, comp_pred);
109
110 comp_pred += 16;
111 pred += 16;
112 ref += 4 * ref_stride;
113 }
114 }
115 }
116
117 #define DIST_WTD_SUBPIX_AVG_VAR(W, H) \
118 uint32_t aom_dist_wtd_sub_pixel_avg_variance##W##x##H##_ssse3( \
119 const uint8_t *a, int a_stride, int xoffset, int yoffset, \
120 const uint8_t *b, int b_stride, uint32_t *sse, \
121 const uint8_t *second_pred, const DIST_WTD_COMP_PARAMS *jcp_param) { \
122 uint16_t fdata3[(H + 1) * W]; \
123 uint8_t temp2[H * W]; \
124 DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
125 \
126 aom_var_filter_block2d_bil_first_pass_ssse3( \
127 a, fdata3, a_stride, 1, H + 1, W, bilinear_filters_2t[xoffset]); \
128 aom_var_filter_block2d_bil_second_pass_ssse3( \
129 fdata3, temp2, W, W, H, W, bilinear_filters_2t[yoffset]); \
130 \
131 aom_dist_wtd_comp_avg_pred_ssse3(temp3, second_pred, W, H, temp2, W, \
132 jcp_param); \
133 \
134 return aom_variance##W##x##H(temp3, W, b, b_stride, sse); \
135 }
136
137 DIST_WTD_SUBPIX_AVG_VAR(128, 128)
138 DIST_WTD_SUBPIX_AVG_VAR(128, 64)
139 DIST_WTD_SUBPIX_AVG_VAR(64, 128)
140 DIST_WTD_SUBPIX_AVG_VAR(64, 64)
141 DIST_WTD_SUBPIX_AVG_VAR(64, 32)
142 DIST_WTD_SUBPIX_AVG_VAR(32, 64)
143 DIST_WTD_SUBPIX_AVG_VAR(32, 32)
144 DIST_WTD_SUBPIX_AVG_VAR(32, 16)
145 DIST_WTD_SUBPIX_AVG_VAR(16, 32)
146 DIST_WTD_SUBPIX_AVG_VAR(16, 16)
147 DIST_WTD_SUBPIX_AVG_VAR(16, 8)
148 DIST_WTD_SUBPIX_AVG_VAR(8, 16)
149 DIST_WTD_SUBPIX_AVG_VAR(8, 8)
150 DIST_WTD_SUBPIX_AVG_VAR(8, 4)
151 DIST_WTD_SUBPIX_AVG_VAR(4, 8)
152 DIST_WTD_SUBPIX_AVG_VAR(4, 4)
153
154 #if !CONFIG_REALTIME_ONLY
155 DIST_WTD_SUBPIX_AVG_VAR(4, 16)
156 DIST_WTD_SUBPIX_AVG_VAR(16, 4)
157 DIST_WTD_SUBPIX_AVG_VAR(8, 32)
158 DIST_WTD_SUBPIX_AVG_VAR(32, 8)
159 DIST_WTD_SUBPIX_AVG_VAR(16, 64)
160 DIST_WTD_SUBPIX_AVG_VAR(64, 16)
161 #endif
162