• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <tuple>
13 
14 #include "third_party/googletest/src/googletest/include/gtest/gtest.h"
15 
16 #include "aom/aom_integer.h"
17 #include "aom_ports/aom_timer.h"
18 #include "av1/encoder/ml.h"
19 #include "config/aom_config.h"
20 #include "config/aom_dsp_rtcd.h"
21 #include "config/av1_rtcd.h"
22 #include "test/util.h"
23 #include "test/register_state_check.h"
24 #include "test/acm_random.h"
25 #include "test/clear_system_state.h"
26 
27 namespace {
28 typedef void (*NnPredict_Func)(const float *const input_nodes,
29                                const NN_CONFIG *const nn_config,
30                                int reduce_prec, float *const output);
31 
32 typedef std::tuple<const NnPredict_Func> NnPredictTestParam;
33 
34 const float epsilon = 1e-3f;  // Error threshold for functional equivalence
35 
36 class NnPredictTest : public ::testing::TestWithParam<NnPredictTestParam> {
37  public:
SetUp()38   virtual void SetUp() {
39     const int MAX_NODES2 = NN_MAX_NODES_PER_LAYER * NN_MAX_NODES_PER_LAYER;
40     // Allocate two massive buffers on the heap for edge weights and node bias
41     // Then set-up the double-dimension arrays pointing into the big buffers
42     weights_buf = (float *)aom_malloc(MAX_NODES2 * (NN_MAX_HIDDEN_LAYERS + 1) *
43                                       sizeof(*weights_buf));
44     bias_buf =
45         (float *)aom_malloc(NN_MAX_NODES_PER_LAYER *
46                             (NN_MAX_HIDDEN_LAYERS + 1) * sizeof(*bias_buf));
47     ASSERT_NE(weights_buf, nullptr);
48     ASSERT_NE(bias_buf, nullptr);
49     for (int i = 0; i < NN_MAX_HIDDEN_LAYERS + 1; i++) {
50       weights[i] = &weights_buf[i * MAX_NODES2];
51       bias[i] = &bias_buf[i * NN_MAX_NODES_PER_LAYER];
52     }
53     target_func_ = GET_PARAM(0);
54   }
TearDown()55   virtual void TearDown() {
56     aom_free(weights_buf);
57     aom_free(bias_buf);
58   }
59   void RunNnPredictTest(const NN_CONFIG *const shape);
60   void RunNnPredictSpeedTest(const NN_CONFIG *const shape, const int run_times);
61   void RunNnPredictTest_all(const NN_CONFIG *const shapes,
62                             const int num_shapes);
63   void RunNnPredictSpeedTest_all(const NN_CONFIG *const shapes,
64                                  const int num_shapes, const int run_times);
65 
66  private:
67   NnPredict_Func target_func_;
68   libaom_test::ACMRandom rng_;
69   float *weights[NN_MAX_HIDDEN_LAYERS + 1] = { 0 };
70   float *bias[NN_MAX_HIDDEN_LAYERS + 1] = { 0 };
71   float *weights_buf = nullptr, *bias_buf = nullptr;
72 };
73 
RunNnPredictTest(const NN_CONFIG * const shape)74 void NnPredictTest::RunNnPredictTest(const NN_CONFIG *const shape) {
75   libaom_test::ClearSystemState();
76   float inputs[NN_MAX_NODES_PER_LAYER] = { 0 };
77   float outputs_test[NN_MAX_NODES_PER_LAYER] = { 0 };
78   float outputs_ref[NN_MAX_NODES_PER_LAYER] = { 0 };
79 
80   NN_CONFIG nn_config;
81   memcpy(&nn_config, shape, sizeof(nn_config));
82 
83   char shape_str[32] = { 0 };
84   snprintf(shape_str, sizeof(shape_str), "%d", shape->num_inputs);
85   for (int layer = 0; layer < shape->num_hidden_layers; layer++)
86     snprintf(&shape_str[strlen(shape_str)],
87              sizeof(shape_str) - strlen(shape_str), "x%d",
88              shape->num_hidden_nodes[layer]);
89   snprintf(&shape_str[strlen(shape_str)], sizeof(shape_str) - strlen(shape_str),
90            "x%d", shape->num_outputs);
91 
92   for (int i = 0; i < NN_MAX_HIDDEN_LAYERS + 1; i++) {
93     nn_config.weights[i] = weights[i];
94     nn_config.bias[i] = bias[i];
95   }
96 
97   for (int iter = 0; iter < 10000 && !HasFatalFailure(); ++iter) {
98     for (int node = 0; node < shape->num_inputs; node++) {
99       inputs[node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
100     }
101     for (int layer = 0; layer < shape->num_hidden_layers; layer++) {
102       for (int node = 0; node < NN_MAX_NODES_PER_LAYER; node++) {
103         bias[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
104       }
105       for (int node = 0; node < NN_MAX_NODES_PER_LAYER * NN_MAX_NODES_PER_LAYER;
106            node++) {
107         weights[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
108       }
109     }
110     // Now the outputs:
111     int layer = shape->num_hidden_layers;
112     for (int node = 0; node < NN_MAX_NODES_PER_LAYER; node++) {
113       bias[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
114     }
115     for (int node = 0; node < NN_MAX_NODES_PER_LAYER * NN_MAX_NODES_PER_LAYER;
116          node++) {
117       weights[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
118     }
119 
120     av1_nn_predict_c(inputs, &nn_config, 0, outputs_ref);
121     target_func_(inputs, &nn_config, 0, outputs_test);
122     libaom_test::ClearSystemState();
123 
124     for (int node = 0; node < shape->num_outputs; node++) {
125       if (outputs_ref[node] < epsilon) {
126         ASSERT_LE(outputs_test[node], epsilon)
127             << "Reference output was near-zero, test output was not ("
128             << shape_str << ")";
129       } else {
130         const float error = outputs_ref[node] - outputs_test[node];
131         const float relative_error = fabsf(error / outputs_ref[node]);
132         ASSERT_LE(relative_error, epsilon)
133             << "Excessive relative error between reference and test ("
134             << shape_str << ")";
135       }
136     }
137   }
138 }
139 
RunNnPredictSpeedTest(const NN_CONFIG * const shape,const int run_times)140 void NnPredictTest::RunNnPredictSpeedTest(const NN_CONFIG *const shape,
141                                           const int run_times) {
142   libaom_test::ClearSystemState();
143   float inputs[NN_MAX_NODES_PER_LAYER] = { 0 };
144   float outputs_test[NN_MAX_NODES_PER_LAYER] = { 0 };
145   float outputs_ref[NN_MAX_NODES_PER_LAYER] = { 0 };
146 
147   NN_CONFIG nn_config;
148   memcpy(&nn_config, shape, sizeof(nn_config));
149 
150   for (int i = 0; i < NN_MAX_HIDDEN_LAYERS; i++) {
151     nn_config.weights[i] = weights[i];
152     nn_config.bias[i] = bias[i];
153   }
154   // Don't bother actually changing the values for inputs/weights/bias: it
155   // shouldn't make any difference for a speed test.
156 
157   aom_usec_timer timer;
158   aom_usec_timer_start(&timer);
159   for (int i = 0; i < run_times; ++i) {
160     av1_nn_predict_c(inputs, &nn_config, 0, outputs_ref);
161   }
162   aom_usec_timer_mark(&timer);
163   const double time1 = static_cast<double>(aom_usec_timer_elapsed(&timer));
164   aom_usec_timer_start(&timer);
165   for (int i = 0; i < run_times; ++i) {
166     target_func_(inputs, &nn_config, 0, outputs_test);
167   }
168   aom_usec_timer_mark(&timer);
169   libaom_test::ClearSystemState();
170   const double time2 = static_cast<double>(aom_usec_timer_elapsed(&timer));
171 
172   printf("%d", shape->num_inputs);
173   for (int layer = 0; layer < shape->num_hidden_layers; layer++)
174     printf("x%d", shape->num_hidden_nodes[layer]);
175   printf("x%d: ", shape->num_outputs);
176   printf("%7.2f/%7.2fns (%3.2f)\n", time1, time2, time1 / time2);
177 }
178 
179 // This is all the neural network shapes observed executed in a few different
180 // runs of the encoder.  It also conveniently covers all the kernels
181 // implemented.
182 static const NN_CONFIG shapes[] = {
183   { 10, 16, 1, { 64 }, { 0 }, { 0 } }, { 12, 1, 1, { 12 }, { 0 }, { 0 } },
184   { 12, 1, 1, { 24 }, { 0 }, { 0 } },  { 12, 1, 1, { 32 }, { 0 }, { 0 } },
185   { 18, 4, 1, { 24 }, { 0 }, { 0 } },  { 18, 4, 1, { 32 }, { 0 }, { 0 } },
186   { 4, 1, 1, { 16 }, { 0 }, { 0 } },   { 8, 1, 1, { 16 }, { 0 }, { 0 } },
187   { 8, 4, 1, { 16 }, { 0 }, { 0 } },   { 8, 1, 1, { 24 }, { 0 }, { 0 } },
188   { 8, 1, 1, { 32 }, { 0 }, { 0 } },   { 8, 1, 1, { 64 }, { 0 }, { 0 } },
189   { 9, 3, 1, { 32 }, { 0 }, { 0 } },   { 4, 4, 1, { 8 }, { 0 }, { 0 } },
190 };
191 
RunNnPredictTest_all(const NN_CONFIG * const shapes,const int num_shapes)192 void NnPredictTest::RunNnPredictTest_all(const NN_CONFIG *const shapes,
193                                          const int num_shapes) {
194   for (int i = 0; i < num_shapes; i++) RunNnPredictTest(&shapes[i]);
195 }
196 
RunNnPredictSpeedTest_all(const NN_CONFIG * const shapes,const int num_shapes,const int run_times)197 void NnPredictTest::RunNnPredictSpeedTest_all(const NN_CONFIG *const shapes,
198                                               const int num_shapes,
199                                               const int run_times) {
200   for (int i = 0; i < num_shapes; i++)
201     NnPredictTest::RunNnPredictSpeedTest(&shapes[i], run_times);
202 }
203 
TEST_P(NnPredictTest,RandomValues)204 TEST_P(NnPredictTest, RandomValues) {
205   RunNnPredictTest_all(shapes, sizeof(shapes) / sizeof(*shapes));
206 }
207 
TEST_P(NnPredictTest,DISABLED_Speed)208 TEST_P(NnPredictTest, DISABLED_Speed) {
209   RunNnPredictSpeedTest_all(shapes, sizeof(shapes) / sizeof(*shapes), 10000000);
210 }
211 
212 #if HAVE_SSE3
213 INSTANTIATE_TEST_SUITE_P(SSE3, NnPredictTest,
214                          ::testing::Values(av1_nn_predict_sse3));
215 #endif
216 
217 }  // namespace
218