1 /*
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10 #include <stdlib.h>
11 #include <new>
12
13 #include "third_party/googletest/src/include/gtest/gtest.h"
14
15 #include "test/clear_system_state.h"
16 #include "test/register_state_check.h"
17
18 #include "vpx/vpx_integer.h"
19 #include "./vpx_config.h"
20 #include "vpx_mem/vpx_mem.h"
21 #if CONFIG_VP8_ENCODER
22 # include "./vp8_rtcd.h"
23 # include "vp8/common/variance.h"
24 #endif
25 #if CONFIG_VP9_ENCODER
26 # include "./vp9_rtcd.h"
27 # include "vp9/encoder/vp9_variance.h"
28 #endif
29 #include "test/acm_random.h"
30
31 namespace {
32
33 using ::std::tr1::get;
34 using ::std::tr1::make_tuple;
35 using ::std::tr1::tuple;
36 using libvpx_test::ACMRandom;
37
variance_ref(const uint8_t * ref,const uint8_t * src,int l2w,int l2h,unsigned int * sse_ptr)38 static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
39 int l2w, int l2h, unsigned int *sse_ptr) {
40 int se = 0;
41 unsigned int sse = 0;
42 const int w = 1 << l2w, h = 1 << l2h;
43 for (int y = 0; y < h; y++) {
44 for (int x = 0; x < w; x++) {
45 int diff = ref[w * y + x] - src[w * y + x];
46 se += diff;
47 sse += diff * diff;
48 }
49 }
50 *sse_ptr = sse;
51 return sse - (((int64_t) se * se) >> (l2w + l2h));
52 }
53
subpel_variance_ref(const uint8_t * ref,const uint8_t * src,int l2w,int l2h,int xoff,int yoff,unsigned int * sse_ptr)54 static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
55 int l2w, int l2h, int xoff, int yoff,
56 unsigned int *sse_ptr) {
57 int se = 0;
58 unsigned int sse = 0;
59 const int w = 1 << l2w, h = 1 << l2h;
60 for (int y = 0; y < h; y++) {
61 for (int x = 0; x < w; x++) {
62 // bilinear interpolation at a 16th pel step
63 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
64 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
65 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
66 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
67 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
68 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
69 const int r = a + (((b - a) * yoff + 8) >> 4);
70 int diff = r - src[w * y + x];
71 se += diff;
72 sse += diff * diff;
73 }
74 }
75 *sse_ptr = sse;
76 return sse - (((int64_t) se * se) >> (l2w + l2h));
77 }
78
79 template<typename VarianceFunctionType>
80 class VarianceTest
81 : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
82 public:
SetUp()83 virtual void SetUp() {
84 const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
85 log2width_ = get<0>(params);
86 width_ = 1 << log2width_;
87 log2height_ = get<1>(params);
88 height_ = 1 << log2height_;
89 variance_ = get<2>(params);
90
91 rnd(ACMRandom::DeterministicSeed());
92 block_size_ = width_ * height_;
93 src_ = new uint8_t[block_size_];
94 ref_ = new uint8_t[block_size_];
95 ASSERT_TRUE(src_ != NULL);
96 ASSERT_TRUE(ref_ != NULL);
97 }
98
TearDown()99 virtual void TearDown() {
100 delete[] src_;
101 delete[] ref_;
102 libvpx_test::ClearSystemState();
103 }
104
105 protected:
106 void ZeroTest();
107 void RefTest();
108 void OneQuarterTest();
109
110 ACMRandom rnd;
111 uint8_t* src_;
112 uint8_t* ref_;
113 int width_, log2width_;
114 int height_, log2height_;
115 int block_size_;
116 VarianceFunctionType variance_;
117 };
118
119 template<typename VarianceFunctionType>
ZeroTest()120 void VarianceTest<VarianceFunctionType>::ZeroTest() {
121 for (int i = 0; i <= 255; ++i) {
122 memset(src_, i, block_size_);
123 for (int j = 0; j <= 255; ++j) {
124 memset(ref_, j, block_size_);
125 unsigned int sse;
126 unsigned int var;
127 REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
128 EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j;
129 }
130 }
131 }
132
133 template<typename VarianceFunctionType>
RefTest()134 void VarianceTest<VarianceFunctionType>::RefTest() {
135 for (int i = 0; i < 10; ++i) {
136 for (int j = 0; j < block_size_; j++) {
137 src_[j] = rnd.Rand8();
138 ref_[j] = rnd.Rand8();
139 }
140 unsigned int sse1, sse2;
141 unsigned int var1;
142 REGISTER_STATE_CHECK(var1 = variance_(src_, width_, ref_, width_, &sse1));
143 const unsigned int var2 = variance_ref(src_, ref_, log2width_,
144 log2height_, &sse2);
145 EXPECT_EQ(sse1, sse2);
146 EXPECT_EQ(var1, var2);
147 }
148 }
149
150 template<typename VarianceFunctionType>
OneQuarterTest()151 void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
152 memset(src_, 255, block_size_);
153 const int half = block_size_ / 2;
154 memset(ref_, 255, half);
155 memset(ref_ + half, 0, half);
156 unsigned int sse;
157 unsigned int var;
158 REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
159 const unsigned int expected = block_size_ * 255 * 255 / 4;
160 EXPECT_EQ(expected, var);
161 }
162
163 #if CONFIG_VP9_ENCODER
164
subpel_avg_variance_ref(const uint8_t * ref,const uint8_t * src,const uint8_t * second_pred,int l2w,int l2h,int xoff,int yoff,unsigned int * sse_ptr)165 unsigned int subpel_avg_variance_ref(const uint8_t *ref,
166 const uint8_t *src,
167 const uint8_t *second_pred,
168 int l2w, int l2h,
169 int xoff, int yoff,
170 unsigned int *sse_ptr) {
171 int se = 0;
172 unsigned int sse = 0;
173 const int w = 1 << l2w, h = 1 << l2h;
174 for (int y = 0; y < h; y++) {
175 for (int x = 0; x < w; x++) {
176 // bilinear interpolation at a 16th pel step
177 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
178 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
179 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
180 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
181 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
182 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
183 const int r = a + (((b - a) * yoff + 8) >> 4);
184 int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
185 se += diff;
186 sse += diff * diff;
187 }
188 }
189 *sse_ptr = sse;
190 return sse - (((int64_t) se * se) >> (l2w + l2h));
191 }
192
193 template<typename SubpelVarianceFunctionType>
194 class SubpelVarianceTest
195 : public ::testing::TestWithParam<tuple<int, int,
196 SubpelVarianceFunctionType> > {
197 public:
SetUp()198 virtual void SetUp() {
199 const tuple<int, int, SubpelVarianceFunctionType>& params =
200 this->GetParam();
201 log2width_ = get<0>(params);
202 width_ = 1 << log2width_;
203 log2height_ = get<1>(params);
204 height_ = 1 << log2height_;
205 subpel_variance_ = get<2>(params);
206
207 rnd(ACMRandom::DeterministicSeed());
208 block_size_ = width_ * height_;
209 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
210 sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
211 ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
212 ASSERT_TRUE(src_ != NULL);
213 ASSERT_TRUE(sec_ != NULL);
214 ASSERT_TRUE(ref_ != NULL);
215 }
216
TearDown()217 virtual void TearDown() {
218 vpx_free(src_);
219 delete[] ref_;
220 vpx_free(sec_);
221 libvpx_test::ClearSystemState();
222 }
223
224 protected:
225 void RefTest();
226
227 ACMRandom rnd;
228 uint8_t *src_;
229 uint8_t *ref_;
230 uint8_t *sec_;
231 int width_, log2width_;
232 int height_, log2height_;
233 int block_size_;
234 SubpelVarianceFunctionType subpel_variance_;
235 };
236
237 template<typename SubpelVarianceFunctionType>
RefTest()238 void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
239 for (int x = 0; x < 16; ++x) {
240 for (int y = 0; y < 16; ++y) {
241 for (int j = 0; j < block_size_; j++) {
242 src_[j] = rnd.Rand8();
243 }
244 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
245 ref_[j] = rnd.Rand8();
246 }
247 unsigned int sse1, sse2;
248 unsigned int var1;
249 REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
250 src_, width_, &sse1));
251 const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
252 log2height_, x, y, &sse2);
253 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
254 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
255 }
256 }
257 }
258
259 template<>
RefTest()260 void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
261 for (int x = 0; x < 16; ++x) {
262 for (int y = 0; y < 16; ++y) {
263 for (int j = 0; j < block_size_; j++) {
264 src_[j] = rnd.Rand8();
265 sec_[j] = rnd.Rand8();
266 }
267 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
268 ref_[j] = rnd.Rand8();
269 }
270 unsigned int sse1, sse2;
271 unsigned int var1;
272 REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
273 src_, width_, &sse1, sec_));
274 const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
275 log2width_, log2height_,
276 x, y, &sse2);
277 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
278 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
279 }
280 }
281 }
282
283 #endif // CONFIG_VP9_ENCODER
284
285 // -----------------------------------------------------------------------------
286 // VP8 test cases.
287
288 namespace vp8 {
289
290 #if CONFIG_VP8_ENCODER
291 typedef VarianceTest<vp8_variance_fn_t> VP8VarianceTest;
292
TEST_P(VP8VarianceTest,Zero)293 TEST_P(VP8VarianceTest, Zero) { ZeroTest(); }
TEST_P(VP8VarianceTest,Ref)294 TEST_P(VP8VarianceTest, Ref) { RefTest(); }
TEST_P(VP8VarianceTest,OneQuarter)295 TEST_P(VP8VarianceTest, OneQuarter) { OneQuarterTest(); }
296
297 const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
298 const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
299 const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
300 const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
301 const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
302 INSTANTIATE_TEST_CASE_P(
303 C, VP8VarianceTest,
304 ::testing::Values(make_tuple(2, 2, variance4x4_c),
305 make_tuple(3, 3, variance8x8_c),
306 make_tuple(3, 4, variance8x16_c),
307 make_tuple(4, 3, variance16x8_c),
308 make_tuple(4, 4, variance16x16_c)));
309
310 #if HAVE_NEON
311 const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
312 const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
313 const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
314 const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
315 INSTANTIATE_TEST_CASE_P(
316 NEON, VP8VarianceTest,
317 ::testing::Values(make_tuple(3, 3, variance8x8_neon),
318 make_tuple(3, 4, variance8x16_neon),
319 make_tuple(4, 3, variance16x8_neon),
320 make_tuple(4, 4, variance16x16_neon)));
321 #endif
322
323 #if HAVE_MMX
324 const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
325 const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
326 const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
327 const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
328 const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
329 INSTANTIATE_TEST_CASE_P(
330 MMX, VP8VarianceTest,
331 ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
332 make_tuple(3, 3, variance8x8_mmx),
333 make_tuple(3, 4, variance8x16_mmx),
334 make_tuple(4, 3, variance16x8_mmx),
335 make_tuple(4, 4, variance16x16_mmx)));
336 #endif
337
338 #if HAVE_SSE2
339 const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
340 const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
341 const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
342 const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
343 const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
344 INSTANTIATE_TEST_CASE_P(
345 SSE2, VP8VarianceTest,
346 ::testing::Values(make_tuple(2, 2, variance4x4_wmt),
347 make_tuple(3, 3, variance8x8_wmt),
348 make_tuple(3, 4, variance8x16_wmt),
349 make_tuple(4, 3, variance16x8_wmt),
350 make_tuple(4, 4, variance16x16_wmt)));
351 #endif
352 #endif // CONFIG_VP8_ENCODER
353
354 } // namespace vp8
355
356 // -----------------------------------------------------------------------------
357 // VP9 test cases.
358
359 namespace vp9 {
360
361 #if CONFIG_VP9_ENCODER
362 typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
363 typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
364 typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
365
TEST_P(VP9VarianceTest,Zero)366 TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
TEST_P(VP9VarianceTest,Ref)367 TEST_P(VP9VarianceTest, Ref) { RefTest(); }
TEST_P(VP9SubpelVarianceTest,Ref)368 TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
TEST_P(VP9SubpelAvgVarianceTest,Ref)369 TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
TEST_P(VP9VarianceTest,OneQuarter)370 TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
371
372 const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
373 const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
374 const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
375 const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
376 const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
377 const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
378 const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
379 const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
380 const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
381 const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
382 const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
383 const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
384 const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
385 INSTANTIATE_TEST_CASE_P(
386 C, VP9VarianceTest,
387 ::testing::Values(make_tuple(2, 2, variance4x4_c),
388 make_tuple(2, 3, variance4x8_c),
389 make_tuple(3, 2, variance8x4_c),
390 make_tuple(3, 3, variance8x8_c),
391 make_tuple(3, 4, variance8x16_c),
392 make_tuple(4, 3, variance16x8_c),
393 make_tuple(4, 4, variance16x16_c),
394 make_tuple(4, 5, variance16x32_c),
395 make_tuple(5, 4, variance32x16_c),
396 make_tuple(5, 5, variance32x32_c),
397 make_tuple(5, 6, variance32x64_c),
398 make_tuple(6, 5, variance64x32_c),
399 make_tuple(6, 6, variance64x64_c)));
400
401 const vp9_subpixvariance_fn_t subpel_variance4x4_c =
402 vp9_sub_pixel_variance4x4_c;
403 const vp9_subpixvariance_fn_t subpel_variance4x8_c =
404 vp9_sub_pixel_variance4x8_c;
405 const vp9_subpixvariance_fn_t subpel_variance8x4_c =
406 vp9_sub_pixel_variance8x4_c;
407 const vp9_subpixvariance_fn_t subpel_variance8x8_c =
408 vp9_sub_pixel_variance8x8_c;
409 const vp9_subpixvariance_fn_t subpel_variance8x16_c =
410 vp9_sub_pixel_variance8x16_c;
411 const vp9_subpixvariance_fn_t subpel_variance16x8_c =
412 vp9_sub_pixel_variance16x8_c;
413 const vp9_subpixvariance_fn_t subpel_variance16x16_c =
414 vp9_sub_pixel_variance16x16_c;
415 const vp9_subpixvariance_fn_t subpel_variance16x32_c =
416 vp9_sub_pixel_variance16x32_c;
417 const vp9_subpixvariance_fn_t subpel_variance32x16_c =
418 vp9_sub_pixel_variance32x16_c;
419 const vp9_subpixvariance_fn_t subpel_variance32x32_c =
420 vp9_sub_pixel_variance32x32_c;
421 const vp9_subpixvariance_fn_t subpel_variance32x64_c =
422 vp9_sub_pixel_variance32x64_c;
423 const vp9_subpixvariance_fn_t subpel_variance64x32_c =
424 vp9_sub_pixel_variance64x32_c;
425 const vp9_subpixvariance_fn_t subpel_variance64x64_c =
426 vp9_sub_pixel_variance64x64_c;
427 INSTANTIATE_TEST_CASE_P(
428 C, VP9SubpelVarianceTest,
429 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c),
430 make_tuple(2, 3, subpel_variance4x8_c),
431 make_tuple(3, 2, subpel_variance8x4_c),
432 make_tuple(3, 3, subpel_variance8x8_c),
433 make_tuple(3, 4, subpel_variance8x16_c),
434 make_tuple(4, 3, subpel_variance16x8_c),
435 make_tuple(4, 4, subpel_variance16x16_c),
436 make_tuple(4, 5, subpel_variance16x32_c),
437 make_tuple(5, 4, subpel_variance32x16_c),
438 make_tuple(5, 5, subpel_variance32x32_c),
439 make_tuple(5, 6, subpel_variance32x64_c),
440 make_tuple(6, 5, subpel_variance64x32_c),
441 make_tuple(6, 6, subpel_variance64x64_c)));
442
443 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
444 vp9_sub_pixel_avg_variance4x4_c;
445 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
446 vp9_sub_pixel_avg_variance4x8_c;
447 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
448 vp9_sub_pixel_avg_variance8x4_c;
449 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
450 vp9_sub_pixel_avg_variance8x8_c;
451 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
452 vp9_sub_pixel_avg_variance8x16_c;
453 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
454 vp9_sub_pixel_avg_variance16x8_c;
455 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
456 vp9_sub_pixel_avg_variance16x16_c;
457 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
458 vp9_sub_pixel_avg_variance16x32_c;
459 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
460 vp9_sub_pixel_avg_variance32x16_c;
461 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
462 vp9_sub_pixel_avg_variance32x32_c;
463 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
464 vp9_sub_pixel_avg_variance32x64_c;
465 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
466 vp9_sub_pixel_avg_variance64x32_c;
467 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
468 vp9_sub_pixel_avg_variance64x64_c;
469 INSTANTIATE_TEST_CASE_P(
470 C, VP9SubpelAvgVarianceTest,
471 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c),
472 make_tuple(2, 3, subpel_avg_variance4x8_c),
473 make_tuple(3, 2, subpel_avg_variance8x4_c),
474 make_tuple(3, 3, subpel_avg_variance8x8_c),
475 make_tuple(3, 4, subpel_avg_variance8x16_c),
476 make_tuple(4, 3, subpel_avg_variance16x8_c),
477 make_tuple(4, 4, subpel_avg_variance16x16_c),
478 make_tuple(4, 5, subpel_avg_variance16x32_c),
479 make_tuple(5, 4, subpel_avg_variance32x16_c),
480 make_tuple(5, 5, subpel_avg_variance32x32_c),
481 make_tuple(5, 6, subpel_avg_variance32x64_c),
482 make_tuple(6, 5, subpel_avg_variance64x32_c),
483 make_tuple(6, 6, subpel_avg_variance64x64_c)));
484
485 #if HAVE_MMX
486 const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
487 const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
488 const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
489 const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
490 const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
491 INSTANTIATE_TEST_CASE_P(
492 MMX, VP9VarianceTest,
493 ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
494 make_tuple(3, 3, variance8x8_mmx),
495 make_tuple(3, 4, variance8x16_mmx),
496 make_tuple(4, 3, variance16x8_mmx),
497 make_tuple(4, 4, variance16x16_mmx)));
498 #endif
499
500 #if HAVE_SSE2
501 #if CONFIG_USE_X86INC
502 const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
503 const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
504 const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
505 const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
506 const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
507 const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
508 const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
509 const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
510 const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
511 const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
512 const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
513 const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
514 const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
515 INSTANTIATE_TEST_CASE_P(
516 SSE2, VP9VarianceTest,
517 ::testing::Values(make_tuple(2, 2, variance4x4_sse2),
518 make_tuple(2, 3, variance4x8_sse2),
519 make_tuple(3, 2, variance8x4_sse2),
520 make_tuple(3, 3, variance8x8_sse2),
521 make_tuple(3, 4, variance8x16_sse2),
522 make_tuple(4, 3, variance16x8_sse2),
523 make_tuple(4, 4, variance16x16_sse2),
524 make_tuple(4, 5, variance16x32_sse2),
525 make_tuple(5, 4, variance32x16_sse2),
526 make_tuple(5, 5, variance32x32_sse2),
527 make_tuple(5, 6, variance32x64_sse2),
528 make_tuple(6, 5, variance64x32_sse2),
529 make_tuple(6, 6, variance64x64_sse2)));
530
531 const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
532 vp9_sub_pixel_variance4x4_sse;
533 const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
534 vp9_sub_pixel_variance4x8_sse;
535 const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
536 vp9_sub_pixel_variance8x4_sse2;
537 const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
538 vp9_sub_pixel_variance8x8_sse2;
539 const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
540 vp9_sub_pixel_variance8x16_sse2;
541 const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
542 vp9_sub_pixel_variance16x8_sse2;
543 const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
544 vp9_sub_pixel_variance16x16_sse2;
545 const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
546 vp9_sub_pixel_variance16x32_sse2;
547 const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
548 vp9_sub_pixel_variance32x16_sse2;
549 const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
550 vp9_sub_pixel_variance32x32_sse2;
551 const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
552 vp9_sub_pixel_variance32x64_sse2;
553 const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
554 vp9_sub_pixel_variance64x32_sse2;
555 const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
556 vp9_sub_pixel_variance64x64_sse2;
557 INSTANTIATE_TEST_CASE_P(
558 SSE2, VP9SubpelVarianceTest,
559 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse),
560 make_tuple(2, 3, subpel_variance4x8_sse),
561 make_tuple(3, 2, subpel_variance8x4_sse2),
562 make_tuple(3, 3, subpel_variance8x8_sse2),
563 make_tuple(3, 4, subpel_variance8x16_sse2),
564 make_tuple(4, 3, subpel_variance16x8_sse2),
565 make_tuple(4, 4, subpel_variance16x16_sse2),
566 make_tuple(4, 5, subpel_variance16x32_sse2),
567 make_tuple(5, 4, subpel_variance32x16_sse2),
568 make_tuple(5, 5, subpel_variance32x32_sse2),
569 make_tuple(5, 6, subpel_variance32x64_sse2),
570 make_tuple(6, 5, subpel_variance64x32_sse2),
571 make_tuple(6, 6, subpel_variance64x64_sse2)));
572
573 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
574 vp9_sub_pixel_avg_variance4x4_sse;
575 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
576 vp9_sub_pixel_avg_variance4x8_sse;
577 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
578 vp9_sub_pixel_avg_variance8x4_sse2;
579 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
580 vp9_sub_pixel_avg_variance8x8_sse2;
581 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
582 vp9_sub_pixel_avg_variance8x16_sse2;
583 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
584 vp9_sub_pixel_avg_variance16x8_sse2;
585 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
586 vp9_sub_pixel_avg_variance16x16_sse2;
587 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
588 vp9_sub_pixel_avg_variance16x32_sse2;
589 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
590 vp9_sub_pixel_avg_variance32x16_sse2;
591 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
592 vp9_sub_pixel_avg_variance32x32_sse2;
593 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
594 vp9_sub_pixel_avg_variance32x64_sse2;
595 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
596 vp9_sub_pixel_avg_variance64x32_sse2;
597 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
598 vp9_sub_pixel_avg_variance64x64_sse2;
599 INSTANTIATE_TEST_CASE_P(
600 SSE2, VP9SubpelAvgVarianceTest,
601 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse),
602 make_tuple(2, 3, subpel_avg_variance4x8_sse),
603 make_tuple(3, 2, subpel_avg_variance8x4_sse2),
604 make_tuple(3, 3, subpel_avg_variance8x8_sse2),
605 make_tuple(3, 4, subpel_avg_variance8x16_sse2),
606 make_tuple(4, 3, subpel_avg_variance16x8_sse2),
607 make_tuple(4, 4, subpel_avg_variance16x16_sse2),
608 make_tuple(4, 5, subpel_avg_variance16x32_sse2),
609 make_tuple(5, 4, subpel_avg_variance32x16_sse2),
610 make_tuple(5, 5, subpel_avg_variance32x32_sse2),
611 make_tuple(5, 6, subpel_avg_variance32x64_sse2),
612 make_tuple(6, 5, subpel_avg_variance64x32_sse2),
613 make_tuple(6, 6, subpel_avg_variance64x64_sse2)));
614 #endif
615 #endif
616
617 #if HAVE_SSSE3
618 #if CONFIG_USE_X86INC
619
620 const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
621 vp9_sub_pixel_variance4x4_ssse3;
622 const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
623 vp9_sub_pixel_variance4x8_ssse3;
624 const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
625 vp9_sub_pixel_variance8x4_ssse3;
626 const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
627 vp9_sub_pixel_variance8x8_ssse3;
628 const vp9_subpixvariance_fn_t subpel_variance8x16_ssse3 =
629 vp9_sub_pixel_variance8x16_ssse3;
630 const vp9_subpixvariance_fn_t subpel_variance16x8_ssse3 =
631 vp9_sub_pixel_variance16x8_ssse3;
632 const vp9_subpixvariance_fn_t subpel_variance16x16_ssse3 =
633 vp9_sub_pixel_variance16x16_ssse3;
634 const vp9_subpixvariance_fn_t subpel_variance16x32_ssse3 =
635 vp9_sub_pixel_variance16x32_ssse3;
636 const vp9_subpixvariance_fn_t subpel_variance32x16_ssse3 =
637 vp9_sub_pixel_variance32x16_ssse3;
638 const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
639 vp9_sub_pixel_variance32x32_ssse3;
640 const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
641 vp9_sub_pixel_variance32x64_ssse3;
642 const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
643 vp9_sub_pixel_variance64x32_ssse3;
644 const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
645 vp9_sub_pixel_variance64x64_ssse3;
646 INSTANTIATE_TEST_CASE_P(
647 SSSE3, VP9SubpelVarianceTest,
648 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3),
649 make_tuple(2, 3, subpel_variance4x8_ssse3),
650 make_tuple(3, 2, subpel_variance8x4_ssse3),
651 make_tuple(3, 3, subpel_variance8x8_ssse3),
652 make_tuple(3, 4, subpel_variance8x16_ssse3),
653 make_tuple(4, 3, subpel_variance16x8_ssse3),
654 make_tuple(4, 4, subpel_variance16x16_ssse3),
655 make_tuple(4, 5, subpel_variance16x32_ssse3),
656 make_tuple(5, 4, subpel_variance32x16_ssse3),
657 make_tuple(5, 5, subpel_variance32x32_ssse3),
658 make_tuple(5, 6, subpel_variance32x64_ssse3),
659 make_tuple(6, 5, subpel_variance64x32_ssse3),
660 make_tuple(6, 6, subpel_variance64x64_ssse3)));
661
662 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
663 vp9_sub_pixel_avg_variance4x4_ssse3;
664 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
665 vp9_sub_pixel_avg_variance4x8_ssse3;
666 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
667 vp9_sub_pixel_avg_variance8x4_ssse3;
668 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
669 vp9_sub_pixel_avg_variance8x8_ssse3;
670 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
671 vp9_sub_pixel_avg_variance8x16_ssse3;
672 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
673 vp9_sub_pixel_avg_variance16x8_ssse3;
674 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
675 vp9_sub_pixel_avg_variance16x16_ssse3;
676 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
677 vp9_sub_pixel_avg_variance16x32_ssse3;
678 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
679 vp9_sub_pixel_avg_variance32x16_ssse3;
680 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
681 vp9_sub_pixel_avg_variance32x32_ssse3;
682 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
683 vp9_sub_pixel_avg_variance32x64_ssse3;
684 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
685 vp9_sub_pixel_avg_variance64x32_ssse3;
686 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
687 vp9_sub_pixel_avg_variance64x64_ssse3;
688 INSTANTIATE_TEST_CASE_P(
689 SSSE3, VP9SubpelAvgVarianceTest,
690 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3),
691 make_tuple(2, 3, subpel_avg_variance4x8_ssse3),
692 make_tuple(3, 2, subpel_avg_variance8x4_ssse3),
693 make_tuple(3, 3, subpel_avg_variance8x8_ssse3),
694 make_tuple(3, 4, subpel_avg_variance8x16_ssse3),
695 make_tuple(4, 3, subpel_avg_variance16x8_ssse3),
696 make_tuple(4, 4, subpel_avg_variance16x16_ssse3),
697 make_tuple(4, 5, subpel_avg_variance16x32_ssse3),
698 make_tuple(5, 4, subpel_avg_variance32x16_ssse3),
699 make_tuple(5, 5, subpel_avg_variance32x32_ssse3),
700 make_tuple(5, 6, subpel_avg_variance32x64_ssse3),
701 make_tuple(6, 5, subpel_avg_variance64x32_ssse3),
702 make_tuple(6, 6, subpel_avg_variance64x64_ssse3)));
703 #endif
704 #endif
705 #endif // CONFIG_VP9_ENCODER
706
707 } // namespace vp9
708
709 } // namespace
710