1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <cmath>
20 #include <string>
21 #include <tuple>
22 #include <vector>
23
24 #include "TestNeuralNetworksWrapper.h"
25
26 using namespace android::nn::test_wrapper;
27
28 namespace {
29
30 const uint32_t INTENDED_SIZE = 3;
31 const uint32_t OTHER_SIZE = 2;
32 const uint32_t UNKNOWN_SIZE = 0;
33
34 // We test three basic scenarios for each tensor dimension:
35 // INTENDED_AT_COMPILE_AND_EXECUTE: set the dimension at compile
36 // (addOperand) time to INTENDED_SIZE, use same size at execution
37 // (setInput/setOutput) time. This should always work.
38 //
39 // INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE: set the dimension at compile
40 // (addOperand) time to INTENDED_SIZE, give no size at execution time.
41 // This should always work.
42 //
43 // UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE: don't set the dimension at
44 // compile (addOperand) time, use INTENDED_SIZE at execute
45 // (setInput/setOutput) time. Note for constants, this just means using an
46 // unknown dimension at addOperand as there is no type parameter to
47 // setOperandValue. This should work for inputs and outputs and give an
48 // error for constants at compile time.
49 //
50 // UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE: don't set the dimension at compile
51 // (addOperand) time, use OTHER_SIZE at execute (setInput/setOutput) time.
52 // This should give an error at execute time (as the constant value will
53 // have a different size).
54 //
55 // All relevant combinations of the basic scenarios are then iterated over in
56 // TestAll. Note that we don't want to just use googletest's parametrized tests (TEST_P) as
57 // the 16k combinations generated too many lines of output for the test
58 // infrastructure to handle correctly. However, running all 16k in one test
59 // makes the ASAN version take so long that the automatic test runner things the
60 // command has become unresponsinve, so we split on the first level.
61 enum class DimensionKind {
62 INTENDED_AT_COMPILE_AND_EXECUTE,
63 INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
64 UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
65 UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE
66 };
67 typedef std::tuple<DimensionKind, DimensionKind> OperandParams;
68 std::vector<DimensionKind> ioDimensionValues = {
69 DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE,
70 DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
71 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
72 DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE};
73 std::vector<DimensionKind> constantDimensionValues = {
74 DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
75 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE};
76 std::vector<OperandParams> Combine(const std::vector<DimensionKind>& firsts,
77 const std::vector<DimensionKind>& seconds);
78 auto ioValues = Combine(ioDimensionValues, ioDimensionValues);
79 auto constantValues = Combine(constantDimensionValues, constantDimensionValues);
80 std::vector<Execution::ComputeMode> computeModes = {
81 Execution::ComputeMode::SYNC,
82 Execution::ComputeMode::FENCED};
83
84 class UnknownDimensionsTest : public ::testing::TestWithParam<OperandParams> {
85 protected:
86 template <class T, Type TensorType>
87 void TestOne(const OperandParams& paramsForInput0, const OperandParams& paramsForInput1,
88 const OperandParams& paramsForConst, const OperandParams& paramsForOutput,
89 Execution::ComputeMode computeMode);
90 template <class T, Type TensorType>
91 void TestAll();
92
93 template <typename T>
94 void CompareResults(const std::vector<T>& expected, const std::vector<T>& actual);
95 };
96
97 template <typename T>
CompareGeneric(const std::vector<T> & golden,const std::vector<T> & test,std::function<void (T,T)> cmp)98 void CompareGeneric(const std::vector<T>& golden, const std::vector<T>& test,
99 std::function<void(T, T)> cmp) {
100 ASSERT_EQ(golden.size(), test.size());
101 for (uint32_t i = 0; i < golden.size(); i++) {
102 SCOPED_TRACE(testing::Message() << "When comparing element " << i);
103 cmp(golden[i], test[i]);
104 }
105 }
106
107 constexpr size_t gMaximumNumberOfErrorMessages = 10;
108
109 template <>
CompareResults(const std::vector<float> & golden,const std::vector<float> & test)110 void UnknownDimensionsTest::CompareResults<float>(const std::vector<float>& golden,
111 const std::vector<float>& test) {
112 size_t totalNumberOfErrors = 0;
113 float fpAtol = 1e-5f, fpRtol = 1e-5f;
114 CompareGeneric<float>(golden, test,
115 [&totalNumberOfErrors, fpAtol, fpRtol](float expected, float actual) {
116 // Compute the range based on both absolute tolerance and relative
117 // tolerance
118 float fpRange = fpAtol + fpRtol * std::abs(expected);
119 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
120 EXPECT_NEAR(expected, actual, fpRange);
121 }
122 if (std::abs(expected - actual) > fpRange) {
123 totalNumberOfErrors++;
124 }
125 });
126 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
127 }
128
129 template <>
CompareResults(const std::vector<uint8_t> & golden,const std::vector<uint8_t> & test)130 void UnknownDimensionsTest::CompareResults<uint8_t>(const std::vector<uint8_t>& golden,
131 const std::vector<uint8_t>& test) {
132 size_t totalNumberOfErrors = 0;
133 CompareGeneric<uint8_t>(golden, test, [&totalNumberOfErrors](uint8_t expected, uint8_t actual) {
134 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
135 EXPECT_NEAR(expected, actual, 1);
136 }
137 if (std::abs(expected - actual) > 1) {
138 totalNumberOfErrors++;
139 }
140 });
141 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
142 }
143
144 template <>
CompareResults(const std::vector<_Float16> & golden,const std::vector<_Float16> & test)145 void UnknownDimensionsTest::CompareResults<_Float16>(const std::vector<_Float16>& golden,
146 const std::vector<_Float16>& test) {
147 size_t totalNumberOfErrors = 0;
148 float fpAtol = 5.0f * 0.0009765625f, fpRtol = 5.0f * 0.0009765625f;
149 CompareGeneric<_Float16>(
150 golden, test,
151 [&totalNumberOfErrors, fpAtol, fpRtol](_Float16 expected, _Float16 actual) {
152 // Compute the range based on both absolute tolerance and relative
153 // tolerance
154 float fpRange = fpAtol + fpRtol * std::abs(static_cast<float>(expected));
155 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
156 EXPECT_NEAR(expected, actual, fpRange);
157 }
158 if (std::abs(static_cast<float>(expected - actual)) > fpRange) {
159 totalNumberOfErrors++;
160 }
161 });
162 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
163 }
164
165 template <class T, Type TensorType>
TestOne(const OperandParams & paramsForInput0,const OperandParams & paramsForInput1,const OperandParams & paramsForConst,const OperandParams & paramsForOutput,Execution::ComputeMode computeMode)166 void UnknownDimensionsTest::TestOne(const OperandParams& paramsForInput0,
167 const OperandParams& paramsForInput1,
168 const OperandParams& paramsForConst,
169 const OperandParams& paramsForOutput,
170 Execution::ComputeMode computeMode) {
171 typedef T IntendedMatrix[INTENDED_SIZE][INTENDED_SIZE];
172 static const IntendedMatrix ones = {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}};
173 static const IntendedMatrix twos = {{2, 2, 2}, {2, 2, 2}, {2, 2, 2}};
174 static const IntendedMatrix fives = {{5, 5, 5}, {5, 5, 5}, {5, 5, 5}};
175 const float scale = TensorType == Type::TENSOR_QUANT8_ASYMM ? 1.f : 0.f;
176
177 Model model;
178 std::string input0Scope("Input 0:"), input1Scope("Input 1:"), constantScope("Constant:"),
179 outputScope("Output:");
180
181 auto getDimForCompile = [](DimensionKind kind, std::string* scope) {
182 switch (kind) {
183 case DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE:
184 if (scope) scope->append(" INTENDED_AT_COMPILE_AND_EXECUTE");
185 return INTENDED_SIZE;
186 case DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE:
187 if (scope) scope->append(" INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE");
188 return INTENDED_SIZE;
189 case DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE:
190 if (scope) scope->append(" UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE");
191 return UNKNOWN_SIZE;
192 case DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE:
193 if (scope) scope->append(" UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE");
194 return UNKNOWN_SIZE;
195 }
196 };
197 auto addOperand = [&model, &getDimForCompile, scale](OperandParams params,
198 std::string* scope = nullptr) {
199 OperandType matrixTypeWithPotentiallyUnknownDims(
200 TensorType,
201 {getDimForCompile(std::get<0>(params), scope),
202 getDimForCompile(std::get<1>(params), scope)},
203 scale);
204 return model.addOperand(&matrixTypeWithPotentiallyUnknownDims);
205 };
206 auto inputOpd0 = addOperand(paramsForInput0, &input0Scope);
207 auto inputOpd1 = addOperand(paramsForInput1, &input1Scope);
208 auto intermediateOpd0 = addOperand(OperandParams{
209 // Dimensions for intermediate operand actually deduced at execution time
210 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
211 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE});
212 auto constantOpd0 = addOperand(paramsForConst, &constantScope);
213 auto outputOpd0 = addOperand(paramsForOutput, &outputScope);
214
215 // Make the gtest failure easier to read
216 SCOPED_TRACE(input0Scope);
217 SCOPED_TRACE(input1Scope);
218 SCOPED_TRACE(constantScope);
219 SCOPED_TRACE(outputScope);
220
221 OperandType scalarType(Type::INT32, {});
222 int32_t activation(ANEURALNETWORKS_FUSED_NONE);
223 auto activationOpd0 = model.addOperand(&scalarType);
224
225 model.setOperandValue(activationOpd0, &activation, sizeof(activation));
226 model.setOperandValue(constantOpd0, twos, sizeof(twos));
227 model.addOperation(ANEURALNETWORKS_ADD, {inputOpd0, inputOpd1, activationOpd0},
228 {intermediateOpd0});
229 model.addOperation(ANEURALNETWORKS_ADD, {intermediateOpd0, constantOpd0, activationOpd0},
230 {outputOpd0});
231 model.identifyInputsAndOutputs({inputOpd0, inputOpd1}, {outputOpd0});
232 if (std::get<0>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
233 std::get<1>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
234 ASSERT_TRUE(model.isValid());
235 ASSERT_EQ(model.finish(), Result::NO_ERROR);
236 } else {
237 ASSERT_FALSE(model.isValid());
238 // There is no contract (yet) for specific errors in NeuralNetworks.h,
239 // so we just assert on not being successful.
240 ASSERT_NE(model.finish(), Result::NO_ERROR);
241 return;
242 }
243
244 Compilation compilation(&model);
245 ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
246
247 IntendedMatrix actual = {{10, 10, 10}, {10, 10, 10}, {10, 10, 10}};
248 Execution execution(&compilation);
249
250 OperandType matrixTypeIntended(TensorType, {INTENDED_SIZE, INTENDED_SIZE}, scale);
251 OperandType matrixTypeFirstOther(TensorType, {OTHER_SIZE, INTENDED_SIZE}, scale);
252 OperandType matrixTypeSecondOther(TensorType, {INTENDED_SIZE, OTHER_SIZE}, scale);
253 OperandType matrixTypeBothOther(TensorType, {OTHER_SIZE, OTHER_SIZE}, scale);
254 bool allAreIntendedSizeAtExecution = true;
255
256 // Helper to return appropriate "type" parameter to setInput/setOutput based
257 // on OperandParams
258 auto typeAtSet = [&](OperandParams params) {
259 auto first = std::get<0>(params), second = std::get<1>(params);
260 if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE &&
261 second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
262 allAreIntendedSizeAtExecution = false;
263 return &matrixTypeBothOther.operandType;
264 } else if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
265 allAreIntendedSizeAtExecution = false;
266 return &matrixTypeFirstOther.operandType;
267 } else if (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
268 allAreIntendedSizeAtExecution = false;
269 return &matrixTypeSecondOther.operandType;
270 } else if (first == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE &&
271 second == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE) {
272 return &matrixTypeIntended.operandType;
273 } else if (first == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
274 second == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
275 return static_cast<ANeuralNetworksOperandType*>(nullptr);
276 } else {
277 return &matrixTypeIntended.operandType;
278 }
279 };
280 // Helper to return appropriate "size" parameter to setInput/setOutput based
281 // on OperandParams
282 auto sizeAtSet = [](OperandParams params) {
283 auto first = std::get<0>(params), second = std::get<1>(params);
284 size_t firstDim = (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE)
285 ? OTHER_SIZE
286 : INTENDED_SIZE;
287 size_t secondDim = (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE)
288 ? OTHER_SIZE
289 : INTENDED_SIZE;
290 return firstDim * secondDim * sizeof(fives[0][0]);
291 };
292 ASSERT_EQ(execution.setInput(0, ones, sizeAtSet(paramsForInput0), typeAtSet(paramsForInput0)),
293 Result::NO_ERROR);
294 ASSERT_EQ(execution.setInput(1, twos, sizeAtSet(paramsForInput1), typeAtSet(paramsForInput1)),
295 Result::NO_ERROR);
296 ASSERT_EQ(
297 execution.setOutput(0, actual, sizeAtSet(paramsForOutput), typeAtSet(paramsForOutput)),
298 Result::NO_ERROR);
299
300 if (allAreIntendedSizeAtExecution) {
301 ASSERT_EQ(execution.compute(computeMode), Result::NO_ERROR);
302 } else {
303 // There is no contract (yet) for specific errors in NeuralNetworks.h,
304 // so we just assert on not being successful.
305 ASSERT_NE(execution.compute(), Result::NO_ERROR);
306 return;
307 }
308
309 constexpr size_t count = sizeof(fives) / sizeof(fives[0][0]);
310 std::vector<T> expected_opds(&fives[0][0], &fives[0][0] + count);
311 std::vector<T> actual_opds(&actual[0][0], &actual[0][0] + count);
312 CompareResults(expected_opds, actual_opds);
313 }
314
Combine(const std::vector<DimensionKind> & firsts,const std::vector<DimensionKind> & seconds)315 std::vector<OperandParams> Combine(const std::vector<DimensionKind>& firsts,
316 const std::vector<DimensionKind>& seconds) {
317 std::vector<OperandParams> ret;
318 for (auto first : firsts) {
319 for (auto second : seconds) {
320 ret.push_back({first, second});
321 }
322 }
323 return ret;
324 }
325
326 template <class T, Type TensorType>
TestAll()327 void UnknownDimensionsTest::TestAll() {
328 const OperandParams paramsForInput0 = GetParam();
329 for (auto paramsForInput1 : ioValues) {
330 for (auto paramsForConst : constantValues) {
331 for (auto paramsForOutput : ioValues) {
332 for (auto computeMode : computeModes) {
333 TestOne<T, TensorType>(paramsForInput0, paramsForInput1, paramsForConst,
334 paramsForOutput, computeMode);
335 }
336 }
337 }
338 }
339 }
340
TEST_P(UnknownDimensionsTest,Float)341 TEST_P(UnknownDimensionsTest, Float) {
342 TestAll<float, Type::TENSOR_FLOAT32>();
343 }
344
TEST_P(UnknownDimensionsTest,Quantized)345 TEST_P(UnknownDimensionsTest, Quantized) {
346 TestAll<uint8_t, Type::TENSOR_QUANT8_ASYMM>();
347 }
348
TEST_P(UnknownDimensionsTest,Float16)349 TEST_P(UnknownDimensionsTest, Float16) {
350 TestAll<_Float16, Type::TENSOR_FLOAT16>();
351 }
352
353 INSTANTIATE_TEST_SUITE_P(UnknownCombinationsTest, UnknownDimensionsTest,
354 ::testing::ValuesIn(ioValues));
355 } // end namespace
356