1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <algorithm>
12 #include <ostream>
13
14 #include "third_party/googletest/src/googletest/include/gtest/gtest.h"
15
16 #include "config/aom_dsp_rtcd.h"
17
18 #include "test/acm_random.h"
19 #include "test/register_state_check.h"
20 #include "test/util.h"
21
22 namespace {
23
24 using libaom_test::ACMRandom;
25
26 using HadamardFunc = void (*)(const int16_t *a, ptrdiff_t a_stride,
27 tran_low_t *b);
28 // Low precision version of Hadamard Transform
29 using HadamardLPFunc = void (*)(const int16_t *a, ptrdiff_t a_stride,
30 int16_t *b);
31 // Low precision version of Hadamard Transform 8x8 - Dual
32 using HadamardLP8x8DualFunc = void (*)(const int16_t *a, ptrdiff_t a_stride,
33 int16_t *b);
34
35 template <typename OutputType>
Hadamard4x4(const OutputType * a,OutputType * out)36 void Hadamard4x4(const OutputType *a, OutputType *out) {
37 OutputType b[8];
38 for (int i = 0; i < 4; i += 2) {
39 b[i + 0] = (a[i * 4] + a[(i + 1) * 4]) >> 1;
40 b[i + 1] = (a[i * 4] - a[(i + 1) * 4]) >> 1;
41 }
42
43 out[0] = b[0] + b[2];
44 out[1] = b[1] + b[3];
45 out[2] = b[0] - b[2];
46 out[3] = b[1] - b[3];
47 }
48
49 template <typename OutputType>
ReferenceHadamard4x4(const int16_t * a,int a_stride,OutputType * b)50 void ReferenceHadamard4x4(const int16_t *a, int a_stride, OutputType *b) {
51 OutputType input[16];
52 OutputType buf[16];
53 for (int i = 0; i < 4; ++i) {
54 for (int j = 0; j < 4; ++j) {
55 input[i * 4 + j] = static_cast<OutputType>(a[i * a_stride + j]);
56 }
57 }
58 for (int i = 0; i < 4; ++i) Hadamard4x4(input + i, buf + i * 4);
59 for (int i = 0; i < 4; ++i) Hadamard4x4(buf + i, b + i * 4);
60
61 // Extra transpose to match C and SSE2 behavior(i.e., aom_hadamard_4x4).
62 for (int i = 0; i < 4; i++) {
63 for (int j = i + 1; j < 4; j++) {
64 OutputType temp = b[j * 4 + i];
65 b[j * 4 + i] = b[i * 4 + j];
66 b[i * 4 + j] = temp;
67 }
68 }
69 }
70
71 template <typename OutputType>
HadamardLoop(const OutputType * a,OutputType * out)72 void HadamardLoop(const OutputType *a, OutputType *out) {
73 OutputType b[8];
74 for (int i = 0; i < 8; i += 2) {
75 b[i + 0] = a[i * 8] + a[(i + 1) * 8];
76 b[i + 1] = a[i * 8] - a[(i + 1) * 8];
77 }
78 OutputType c[8];
79 for (int i = 0; i < 8; i += 4) {
80 c[i + 0] = b[i + 0] + b[i + 2];
81 c[i + 1] = b[i + 1] + b[i + 3];
82 c[i + 2] = b[i + 0] - b[i + 2];
83 c[i + 3] = b[i + 1] - b[i + 3];
84 }
85 out[0] = c[0] + c[4];
86 out[7] = c[1] + c[5];
87 out[3] = c[2] + c[6];
88 out[4] = c[3] + c[7];
89 out[2] = c[0] - c[4];
90 out[6] = c[1] - c[5];
91 out[1] = c[2] - c[6];
92 out[5] = c[3] - c[7];
93 }
94
95 template <typename OutputType>
ReferenceHadamard8x8(const int16_t * a,int a_stride,OutputType * b)96 void ReferenceHadamard8x8(const int16_t *a, int a_stride, OutputType *b) {
97 OutputType input[64];
98 OutputType buf[64];
99 for (int i = 0; i < 8; ++i) {
100 for (int j = 0; j < 8; ++j) {
101 input[i * 8 + j] = static_cast<OutputType>(a[i * a_stride + j]);
102 }
103 }
104 for (int i = 0; i < 8; ++i) HadamardLoop(input + i, buf + i * 8);
105 for (int i = 0; i < 8; ++i) HadamardLoop(buf + i, b + i * 8);
106
107 // Extra transpose to match SSE2 behavior (i.e., aom_hadamard_8x8 and
108 // aom_hadamard_lp_8x8).
109 for (int i = 0; i < 8; i++) {
110 for (int j = i + 1; j < 8; j++) {
111 OutputType temp = b[j * 8 + i];
112 b[j * 8 + i] = b[i * 8 + j];
113 b[i * 8 + j] = temp;
114 }
115 }
116 }
117
118 template <typename OutputType>
ReferenceHadamard8x8Dual(const int16_t * a,int a_stride,OutputType * b)119 void ReferenceHadamard8x8Dual(const int16_t *a, int a_stride, OutputType *b) {
120 /* The source is a 8x16 block. The destination is rearranged to 8x16.
121 * Input is 9 bit. */
122 ReferenceHadamard8x8(a, a_stride, b);
123 ReferenceHadamard8x8(a + 8, a_stride, b + 64);
124 }
125
126 template <typename OutputType>
ReferenceHadamard16x16(const int16_t * a,int a_stride,OutputType * b,bool shift)127 void ReferenceHadamard16x16(const int16_t *a, int a_stride, OutputType *b,
128 bool shift) {
129 /* The source is a 16x16 block. The destination is rearranged to 8x32.
130 * Input is 9 bit. */
131 ReferenceHadamard8x8(a + 0 + 0 * a_stride, a_stride, b + 0);
132 ReferenceHadamard8x8(a + 8 + 0 * a_stride, a_stride, b + 64);
133 ReferenceHadamard8x8(a + 0 + 8 * a_stride, a_stride, b + 128);
134 ReferenceHadamard8x8(a + 8 + 8 * a_stride, a_stride, b + 192);
135
136 /* Overlay the 8x8 blocks and combine. */
137 for (int i = 0; i < 64; ++i) {
138 /* 8x8 steps the range up to 15 bits. */
139 const OutputType a0 = b[0];
140 const OutputType a1 = b[64];
141 const OutputType a2 = b[128];
142 const OutputType a3 = b[192];
143
144 /* Prevent the result from escaping int16_t. */
145 const OutputType b0 = (a0 + a1) >> 1;
146 const OutputType b1 = (a0 - a1) >> 1;
147 const OutputType b2 = (a2 + a3) >> 1;
148 const OutputType b3 = (a2 - a3) >> 1;
149
150 /* Store a 16 bit value. */
151 b[0] = b0 + b2;
152 b[64] = b1 + b3;
153 b[128] = b0 - b2;
154 b[192] = b1 - b3;
155
156 ++b;
157 }
158
159 if (shift) {
160 b -= 64;
161 // Extra shift to match aom_hadamard_16x16_c and aom_hadamard_16x16_avx2.
162 for (int i = 0; i < 16; i++) {
163 for (int j = 0; j < 4; j++) {
164 OutputType temp = b[i * 16 + 4 + j];
165 b[i * 16 + 4 + j] = b[i * 16 + 8 + j];
166 b[i * 16 + 8 + j] = temp;
167 }
168 }
169 }
170 }
171
172 template <typename OutputType>
ReferenceHadamard32x32(const int16_t * a,int a_stride,OutputType * b,bool shift)173 void ReferenceHadamard32x32(const int16_t *a, int a_stride, OutputType *b,
174 bool shift) {
175 ReferenceHadamard16x16(a + 0 + 0 * a_stride, a_stride, b + 0, shift);
176 ReferenceHadamard16x16(a + 16 + 0 * a_stride, a_stride, b + 256, shift);
177 ReferenceHadamard16x16(a + 0 + 16 * a_stride, a_stride, b + 512, shift);
178 ReferenceHadamard16x16(a + 16 + 16 * a_stride, a_stride, b + 768, shift);
179
180 for (int i = 0; i < 256; ++i) {
181 const OutputType a0 = b[0];
182 const OutputType a1 = b[256];
183 const OutputType a2 = b[512];
184 const OutputType a3 = b[768];
185
186 const OutputType b0 = (a0 + a1) >> 2;
187 const OutputType b1 = (a0 - a1) >> 2;
188 const OutputType b2 = (a2 + a3) >> 2;
189 const OutputType b3 = (a2 - a3) >> 2;
190
191 b[0] = b0 + b2;
192 b[256] = b1 + b3;
193 b[512] = b0 - b2;
194 b[768] = b1 - b3;
195
196 ++b;
197 }
198 }
199
200 template <typename OutputType>
ReferenceHadamard(const int16_t * a,int a_stride,OutputType * b,int bw,int bh,bool shift)201 void ReferenceHadamard(const int16_t *a, int a_stride, OutputType *b, int bw,
202 int bh, bool shift) {
203 if (bw == 32 && bh == 32) {
204 ReferenceHadamard32x32(a, a_stride, b, shift);
205 } else if (bw == 16 && bh == 16) {
206 ReferenceHadamard16x16(a, a_stride, b, shift);
207 } else if (bw == 8 && bh == 8) {
208 ReferenceHadamard8x8(a, a_stride, b);
209 } else if (bw == 4 && bh == 4) {
210 ReferenceHadamard4x4(a, a_stride, b);
211 } else if (bw == 8 && bh == 16) {
212 ReferenceHadamard8x8Dual(a, a_stride, b);
213 } else {
214 GTEST_FAIL() << "Invalid Hadamard transform size " << bw << bh << std::endl;
215 }
216 }
217
218 template <typename HadamardFuncType>
219 struct FuncWithSize {
FuncWithSize__anon06eafcdc0111::FuncWithSize220 FuncWithSize(HadamardFuncType f, int bw, int bh)
221 : func(f), block_width(bw), block_height(bh) {}
222 HadamardFuncType func;
223 int block_width;
224 int block_height;
225 };
226
227 using HadamardFuncWithSize = FuncWithSize<HadamardFunc>;
228 using HadamardLPFuncWithSize = FuncWithSize<HadamardLPFunc>;
229 using HadamardLP8x8DualFuncWithSize = FuncWithSize<HadamardLP8x8DualFunc>;
230
231 template <typename OutputType, typename HadamardFuncType>
232 class HadamardTestBase
233 : public ::testing::TestWithParam<FuncWithSize<HadamardFuncType>> {
234 public:
HadamardTestBase(const FuncWithSize<HadamardFuncType> & func_param,bool do_shift)235 HadamardTestBase(const FuncWithSize<HadamardFuncType> &func_param,
236 bool do_shift) {
237 h_func_ = func_param.func;
238 bw_ = func_param.block_width;
239 bh_ = func_param.block_height;
240 shift_ = do_shift;
241 }
242
SetUp()243 void SetUp() override { rnd_.Reset(ACMRandom::DeterministicSeed()); }
244
245 // The Rand() function generates values in the range [-((1 << BitDepth) - 1),
246 // (1 << BitDepth) - 1]. This is because the input to the Hadamard transform
247 // is the residual pixel, which is defined as 'source pixel - predicted
248 // pixel'. Source pixel and predicted pixel take values in the range
249 // [0, (1 << BitDepth) - 1] and thus the residual pixel ranges from
250 // -((1 << BitDepth) - 1) to ((1 << BitDepth) - 1).
251 virtual int16_t Rand() = 0;
252
CompareReferenceRandom()253 void CompareReferenceRandom() {
254 const int kMaxBlockSize = 32 * 32;
255 const int block_size = bw_ * bh_;
256
257 DECLARE_ALIGNED(16, int16_t, a[kMaxBlockSize]);
258 DECLARE_ALIGNED(16, OutputType, b[kMaxBlockSize]);
259 memset(a, 0, sizeof(a));
260 memset(b, 0, sizeof(b));
261
262 OutputType b_ref[kMaxBlockSize];
263 memset(b_ref, 0, sizeof(b_ref));
264
265 for (int i = 0; i < block_size; ++i) a[i] = Rand();
266 ReferenceHadamard(a, bw_, b_ref, bw_, bh_, shift_);
267 API_REGISTER_STATE_CHECK(h_func_(a, bw_, b));
268
269 // The order of the output is not important. Sort before checking.
270 std::sort(b, b + block_size);
271 std::sort(b_ref, b_ref + block_size);
272 EXPECT_EQ(memcmp(b, b_ref, sizeof(b)), 0);
273 }
274
CompareReferenceExtreme()275 void CompareReferenceExtreme() {
276 const int kMaxBlockSize = 32 * 32;
277 const int block_size = bw_ * bh_;
278 const int kBitDepth = 8;
279 DECLARE_ALIGNED(16, int16_t, a[kMaxBlockSize]);
280 DECLARE_ALIGNED(16, OutputType, b[kMaxBlockSize]);
281 memset(b, 0, sizeof(b));
282
283 OutputType b_ref[kMaxBlockSize];
284 memset(b_ref, 0, sizeof(b_ref));
285 for (int i = 0; i < 2; ++i) {
286 const int sign = (i == 0) ? 1 : -1;
287 for (int j = 0; j < block_size; ++j) a[j] = sign * ((1 << kBitDepth) - 1);
288
289 ReferenceHadamard(a, bw_, b_ref, bw_, bh_, shift_);
290 API_REGISTER_STATE_CHECK(h_func_(a, bw_, b));
291
292 // The order of the output is not important. Sort before checking.
293 std::sort(b, b + block_size);
294 std::sort(b_ref, b_ref + block_size);
295 EXPECT_EQ(memcmp(b, b_ref, sizeof(b)), 0);
296 }
297 }
298
VaryStride()299 void VaryStride() {
300 const int kMaxBlockSize = 32 * 32;
301 const int block_size = bw_ * bh_;
302
303 DECLARE_ALIGNED(16, int16_t, a[kMaxBlockSize * 8]);
304 DECLARE_ALIGNED(16, OutputType, b[kMaxBlockSize]);
305 memset(a, 0, sizeof(a));
306 for (int i = 0; i < block_size * 8; ++i) a[i] = Rand();
307
308 OutputType b_ref[kMaxBlockSize];
309 for (int i = 8; i < 64; i += 8) {
310 memset(b, 0, sizeof(b));
311 memset(b_ref, 0, sizeof(b_ref));
312
313 ReferenceHadamard(a, i, b_ref, bw_, bh_, shift_);
314 API_REGISTER_STATE_CHECK(h_func_(a, i, b));
315
316 // The order of the output is not important. Sort before checking.
317 std::sort(b, b + block_size);
318 std::sort(b_ref, b_ref + block_size);
319 EXPECT_EQ(0, memcmp(b, b_ref, sizeof(b)));
320 }
321 }
322
SpeedTest(int times)323 void SpeedTest(int times) {
324 const int kMaxBlockSize = 32 * 32;
325 DECLARE_ALIGNED(16, int16_t, input[kMaxBlockSize]);
326 DECLARE_ALIGNED(16, OutputType, output[kMaxBlockSize]);
327 memset(input, 1, sizeof(input));
328 memset(output, 0, sizeof(output));
329
330 aom_usec_timer timer;
331 aom_usec_timer_start(&timer);
332 for (int i = 0; i < times; ++i) {
333 h_func_(input, bw_, output);
334 }
335 aom_usec_timer_mark(&timer);
336
337 const int elapsed_time = static_cast<int>(aom_usec_timer_elapsed(&timer));
338 printf("Hadamard%dx%d[%12d runs]: %d us\n", bw_, bh_, times, elapsed_time);
339 }
340
341 protected:
342 ACMRandom rnd_;
343
344 private:
345 HadamardFuncType h_func_;
346 int bw_;
347 int bh_;
348 bool shift_;
349 };
350
351 class HadamardLowbdTest : public HadamardTestBase<tran_low_t, HadamardFunc> {
352 public:
HadamardLowbdTest()353 HadamardLowbdTest() : HadamardTestBase(GetParam(), /*do_shift=*/true) {}
354 // Use values between -255 (0xFF01) and 255 (0x00FF)
Rand()355 int16_t Rand() override {
356 int16_t src = rnd_.Rand8();
357 int16_t pred = rnd_.Rand8();
358 return src - pred;
359 }
360 };
361
TEST_P(HadamardLowbdTest,CompareReferenceRandom)362 TEST_P(HadamardLowbdTest, CompareReferenceRandom) { CompareReferenceRandom(); }
363
TEST_P(HadamardLowbdTest,CompareReferenceExtreme)364 TEST_P(HadamardLowbdTest, CompareReferenceExtreme) {
365 CompareReferenceExtreme();
366 }
367
TEST_P(HadamardLowbdTest,VaryStride)368 TEST_P(HadamardLowbdTest, VaryStride) { VaryStride(); }
369
TEST_P(HadamardLowbdTest,DISABLED_SpeedTest)370 TEST_P(HadamardLowbdTest, DISABLED_SpeedTest) { SpeedTest(1000000); }
371
372 INSTANTIATE_TEST_SUITE_P(
373 C, HadamardLowbdTest,
374 ::testing::Values(HadamardFuncWithSize(&aom_hadamard_4x4_c, 4, 4),
375 HadamardFuncWithSize(&aom_hadamard_8x8_c, 8, 8),
376 HadamardFuncWithSize(&aom_hadamard_16x16_c, 16, 16),
377 HadamardFuncWithSize(&aom_hadamard_32x32_c, 32, 32)));
378
379 #if HAVE_SSE2
380 INSTANTIATE_TEST_SUITE_P(
381 SSE2, HadamardLowbdTest,
382 ::testing::Values(HadamardFuncWithSize(&aom_hadamard_4x4_sse2, 4, 4),
383 HadamardFuncWithSize(&aom_hadamard_8x8_sse2, 8, 8),
384 HadamardFuncWithSize(&aom_hadamard_16x16_sse2, 16, 16),
385 HadamardFuncWithSize(&aom_hadamard_32x32_sse2, 32, 32)));
386 #endif // HAVE_SSE2
387
388 #if HAVE_AVX2
389 INSTANTIATE_TEST_SUITE_P(
390 AVX2, HadamardLowbdTest,
391 ::testing::Values(HadamardFuncWithSize(&aom_hadamard_16x16_avx2, 16, 16),
392 HadamardFuncWithSize(&aom_hadamard_32x32_avx2, 32, 32)));
393 #endif // HAVE_AVX2
394
395 // TODO(aomedia:3314): Disable NEON unit test for now, since hadamard 16x16 NEON
396 // need modifications to match C/AVX2 behavior.
397 #if HAVE_NEON
398 INSTANTIATE_TEST_SUITE_P(
399 NEON, HadamardLowbdTest,
400 ::testing::Values(HadamardFuncWithSize(&aom_hadamard_4x4_neon, 4, 4),
401 HadamardFuncWithSize(&aom_hadamard_8x8_neon, 8, 8),
402 HadamardFuncWithSize(&aom_hadamard_16x16_neon, 16, 16),
403 HadamardFuncWithSize(&aom_hadamard_32x32_neon, 32, 32)));
404 #endif // HAVE_NEON
405
406 #if CONFIG_AV1_HIGHBITDEPTH
407 class HadamardHighbdTest : public HadamardTestBase<tran_low_t, HadamardFunc> {
408 protected:
HadamardHighbdTest()409 HadamardHighbdTest() : HadamardTestBase(GetParam(), /*do_shift=*/true) {}
410 // Use values between -4095 (0xF001) and 4095 (0x0FFF)
Rand()411 int16_t Rand() override {
412 int16_t src = rnd_.Rand12();
413 int16_t pred = rnd_.Rand12();
414 return src - pred;
415 }
416 };
417
TEST_P(HadamardHighbdTest,CompareReferenceRandom)418 TEST_P(HadamardHighbdTest, CompareReferenceRandom) { CompareReferenceRandom(); }
419
TEST_P(HadamardHighbdTest,VaryStride)420 TEST_P(HadamardHighbdTest, VaryStride) { VaryStride(); }
421
TEST_P(HadamardHighbdTest,DISABLED_Speed)422 TEST_P(HadamardHighbdTest, DISABLED_Speed) {
423 SpeedTest(10);
424 SpeedTest(10000);
425 SpeedTest(10000000);
426 }
427
428 INSTANTIATE_TEST_SUITE_P(
429 C, HadamardHighbdTest,
430 ::testing::Values(
431 HadamardFuncWithSize(&aom_highbd_hadamard_8x8_c, 8, 8),
432 HadamardFuncWithSize(&aom_highbd_hadamard_16x16_c, 16, 16),
433 HadamardFuncWithSize(&aom_highbd_hadamard_32x32_c, 32, 32)));
434
435 #if HAVE_AVX2
436 INSTANTIATE_TEST_SUITE_P(
437 AVX2, HadamardHighbdTest,
438 ::testing::Values(
439 HadamardFuncWithSize(&aom_highbd_hadamard_8x8_avx2, 8, 8),
440 HadamardFuncWithSize(&aom_highbd_hadamard_16x16_avx2, 16, 16),
441 HadamardFuncWithSize(&aom_highbd_hadamard_32x32_avx2, 32, 32)));
442 #endif // HAVE_AVX2
443
444 #if HAVE_NEON
445 INSTANTIATE_TEST_SUITE_P(
446 NEON, HadamardHighbdTest,
447 ::testing::Values(
448 HadamardFuncWithSize(&aom_highbd_hadamard_8x8_neon, 8, 8),
449 HadamardFuncWithSize(&aom_highbd_hadamard_16x16_neon, 16, 16),
450 HadamardFuncWithSize(&aom_highbd_hadamard_32x32_neon, 32, 32)));
451 #endif // HAVE_NEON
452
453 #endif // CONFIG_AV1_HIGHBITDEPTH
454
455 // Tests for low precision
456 class HadamardLowbdLPTest : public HadamardTestBase<int16_t, HadamardLPFunc> {
457 public:
HadamardLowbdLPTest()458 HadamardLowbdLPTest() : HadamardTestBase(GetParam(), /*do_shift=*/false) {}
459 // Use values between -255 (0xFF01) and 255 (0x00FF)
Rand()460 int16_t Rand() override {
461 int16_t src = rnd_.Rand8();
462 int16_t pred = rnd_.Rand8();
463 return src - pred;
464 }
465 };
466
TEST_P(HadamardLowbdLPTest,CompareReferenceRandom)467 TEST_P(HadamardLowbdLPTest, CompareReferenceRandom) {
468 CompareReferenceRandom();
469 }
470
TEST_P(HadamardLowbdLPTest,VaryStride)471 TEST_P(HadamardLowbdLPTest, VaryStride) { VaryStride(); }
472
TEST_P(HadamardLowbdLPTest,DISABLED_SpeedTest)473 TEST_P(HadamardLowbdLPTest, DISABLED_SpeedTest) { SpeedTest(1000000); }
474
475 INSTANTIATE_TEST_SUITE_P(
476 C, HadamardLowbdLPTest,
477 ::testing::Values(HadamardLPFuncWithSize(&aom_hadamard_lp_8x8_c, 8, 8),
478 HadamardLPFuncWithSize(&aom_hadamard_lp_16x16_c, 16,
479 16)));
480
481 #if HAVE_SSE2
482 INSTANTIATE_TEST_SUITE_P(
483 SSE2, HadamardLowbdLPTest,
484 ::testing::Values(HadamardLPFuncWithSize(&aom_hadamard_lp_8x8_sse2, 8, 8),
485 HadamardLPFuncWithSize(&aom_hadamard_lp_16x16_sse2, 16,
486 16)));
487 #endif // HAVE_SSE2
488
489 #if HAVE_AVX2
490 INSTANTIATE_TEST_SUITE_P(AVX2, HadamardLowbdLPTest,
491 ::testing::Values(HadamardLPFuncWithSize(
492 &aom_hadamard_lp_16x16_avx2, 16, 16)));
493 #endif // HAVE_AVX2
494
495 #if HAVE_NEON
496 INSTANTIATE_TEST_SUITE_P(
497 NEON, HadamardLowbdLPTest,
498 ::testing::Values(HadamardLPFuncWithSize(&aom_hadamard_lp_8x8_neon, 8, 8),
499 HadamardLPFuncWithSize(&aom_hadamard_lp_16x16_neon, 16,
500 16)));
501 #endif // HAVE_NEON
502
503 // Tests for 8x8 dual low precision
504 class HadamardLowbdLP8x8DualTest
505 : public HadamardTestBase<int16_t, HadamardLP8x8DualFunc> {
506 public:
HadamardLowbdLP8x8DualTest()507 HadamardLowbdLP8x8DualTest()
508 : HadamardTestBase(GetParam(), /*do_shift=*/false) {}
509 // Use values between -255 (0xFF01) and 255 (0x00FF)
Rand()510 int16_t Rand() override {
511 int16_t src = rnd_.Rand8();
512 int16_t pred = rnd_.Rand8();
513 return src - pred;
514 }
515 };
516
TEST_P(HadamardLowbdLP8x8DualTest,CompareReferenceRandom)517 TEST_P(HadamardLowbdLP8x8DualTest, CompareReferenceRandom) {
518 CompareReferenceRandom();
519 }
520
TEST_P(HadamardLowbdLP8x8DualTest,VaryStride)521 TEST_P(HadamardLowbdLP8x8DualTest, VaryStride) { VaryStride(); }
522
TEST_P(HadamardLowbdLP8x8DualTest,DISABLED_SpeedTest)523 TEST_P(HadamardLowbdLP8x8DualTest, DISABLED_SpeedTest) { SpeedTest(1000000); }
524
525 INSTANTIATE_TEST_SUITE_P(C, HadamardLowbdLP8x8DualTest,
526 ::testing::Values(HadamardLP8x8DualFuncWithSize(
527 &aom_hadamard_lp_8x8_dual_c, 8, 16)));
528
529 #if HAVE_SSE2
530 INSTANTIATE_TEST_SUITE_P(SSE2, HadamardLowbdLP8x8DualTest,
531 ::testing::Values(HadamardLP8x8DualFuncWithSize(
532 &aom_hadamard_lp_8x8_dual_sse2, 8, 16)));
533 #endif // HAVE_SSE2
534
535 #if HAVE_AVX2
536 INSTANTIATE_TEST_SUITE_P(AVX2, HadamardLowbdLP8x8DualTest,
537 ::testing::Values(HadamardLP8x8DualFuncWithSize(
538 &aom_hadamard_lp_8x8_dual_avx2, 8, 16)));
539 #endif // HAVE_AVX2
540
541 #if HAVE_NEON
542 INSTANTIATE_TEST_SUITE_P(NEON, HadamardLowbdLP8x8DualTest,
543 ::testing::Values(HadamardLP8x8DualFuncWithSize(
544 &aom_hadamard_lp_8x8_dual_neon, 8, 16)));
545 #endif // HAVE_NEON
546
547 } // namespace
548