1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "TestGenerated.h"
18 #include "TestHarness.h"
19
20 #include <gtest/gtest.h>
21
22 #include <ftw.h>
23 #include <unistd.h>
24 #include <cassert>
25 #include <cmath>
26 #include <fstream>
27 #include <iostream>
28 #include <map>
29 #include <thread>
30
31 // Systrace is not available from CTS tests due to platform layering
32 // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
33 // the case for CTS (public APIs only).
34 #ifndef NNTEST_ONLY_PUBLIC_API
35 #include "Tracing.h"
36 #else
37 #define NNTRACE_FULL_RAW(...)
38 #define NNTRACE_APP(...)
39 #define NNTRACE_APP_SWITCH(...)
40 #endif
41
42 namespace generated_tests {
43 using namespace android::nn::test_wrapper;
44 using namespace test_helper;
45
46 namespace {
47 template <typename T>
print(std::ostream & os,const std::map<int,std::vector<T>> & test)48 void print(std::ostream& os, const std::map<int, std::vector<T>>& test) {
49 // dump T-typed inputs
50 for_each<T>(test, [&os](int idx, const std::vector<T>& f) {
51 os << " aliased_output" << idx << ": [";
52 for (size_t i = 0; i < f.size(); ++i) {
53 os << (i == 0 ? "" : ", ") << +f[i];
54 }
55 os << "],\n";
56 });
57 }
58
59 // Specialized for _Float16 because it requires explicit conversion.
60 template <>
print(std::ostream & os,const std::map<int,std::vector<_Float16>> & test)61 void print<_Float16>(std::ostream& os, const std::map<int, std::vector<_Float16>>& test) {
62 for_each<_Float16>(test, [&os](int idx, const std::vector<_Float16>& f) {
63 os << " aliased_output" << idx << ": [";
64 for (size_t i = 0; i < f.size(); ++i) {
65 os << (i == 0 ? "" : ", ") << +static_cast<float>(f[i]);
66 }
67 os << "],\n";
68 });
69 }
70
printAll(std::ostream & os,const MixedTyped & test)71 void printAll(std::ostream& os, const MixedTyped& test) {
72 print(os, test.float32Operands);
73 print(os, test.int32Operands);
74 print(os, test.quant8AsymmOperands);
75 print(os, test.quant16SymmOperands);
76 print(os, test.float16Operands);
77 print(os, test.bool8Operands);
78 print(os, test.quant8ChannelOperands);
79 print(os, test.quant16AsymmOperands);
80 print(os, test.quant8SymmOperands);
81 static_assert(9 == MixedTyped::kNumTypes,
82 "Number of types in MixedTyped changed, but printAll function wasn't updated");
83 }
84 } // namespace
85
compileModel(const Model * model)86 Compilation GeneratedTests::compileModel(const Model* model) {
87 NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
88 if (mTestCompilationCaching) {
89 // Compile the model twice with the same token, so that compilation caching will be
90 // exercised if supported by the driver.
91 Compilation compilation1(model);
92 compilation1.setCaching(mCacheDir, mToken);
93 compilation1.finish();
94 Compilation compilation2(model);
95 compilation2.setCaching(mCacheDir, mToken);
96 compilation2.finish();
97 return compilation2;
98 } else {
99 Compilation compilation(model);
100 compilation.finish();
101 return compilation;
102 }
103 }
104
executeWithCompilation(const Model * model,Compilation * compilation,std::function<bool (int)> isIgnored,std::vector<MixedTypedExample> & examples,std::string dumpFile)105 void GeneratedTests::executeWithCompilation(const Model* model, Compilation* compilation,
106 std::function<bool(int)> isIgnored,
107 std::vector<MixedTypedExample>& examples,
108 std::string dumpFile) {
109 bool dumpToFile = !dumpFile.empty();
110 std::ofstream s;
111 if (dumpToFile) {
112 s.open(dumpFile, std::ofstream::trunc);
113 ASSERT_TRUE(s.is_open());
114 }
115
116 int exampleNo = 0;
117 float fpAtol = 1e-5f;
118 float fpRtol = 5.0f * 1.1920928955078125e-7f;
119 for (auto& example : examples) {
120 NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example");
121 SCOPED_TRACE(exampleNo);
122 // TODO: We leave it as a copy here.
123 // Should verify if the input gets modified by the test later.
124 MixedTyped inputs = example.operands.first;
125 const MixedTyped& golden = example.operands.second;
126
127 const bool hasFloat16Inputs = !inputs.float16Operands.empty();
128 if (model->isRelaxed() || hasFloat16Inputs) {
129 // TODO: Adjust the error limit based on testing.
130 // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
131 fpAtol = 5.0f * 0.0009765625f;
132 // Set the relative tolerance to be 5ULP of the corresponding FP precision.
133 fpRtol = 5.0f * 0.0009765625f;
134 }
135
136 Execution execution(compilation);
137 MixedTyped test;
138 {
139 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "executeWithCompilation example");
140 // Set all inputs
141 for_all(inputs, [&execution](int idx, const void* p, size_t s) {
142 const void* buffer = s == 0 ? nullptr : p;
143 ASSERT_EQ(Result::NO_ERROR, execution.setInput(idx, buffer, s));
144 });
145
146 // Go through all typed outputs
147 resize_accordingly(golden, test);
148 for_all(test, [&execution](int idx, void* p, size_t s) {
149 void* buffer = s == 0 ? nullptr : p;
150 ASSERT_EQ(Result::NO_ERROR, execution.setOutput(idx, buffer, s));
151 });
152 }
153
154 Result r = execution.compute();
155 ASSERT_EQ(Result::NO_ERROR, r);
156 {
157 NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example");
158
159 // Get output dimensions
160 for_each<uint32_t>(
161 test.operandDimensions, [&execution](int idx, std::vector<uint32_t>& t) {
162 ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(idx, &t));
163 });
164
165 // Dump all outputs for the slicing tool
166 if (dumpToFile) {
167 s << "output" << exampleNo << " = {\n";
168 printAll(s, test);
169 // all outputs are done
170 s << "}\n";
171 }
172
173 // Filter out don't cares
174 MixedTyped filteredGolden = filter(golden, isIgnored);
175 MixedTyped filteredTest = filter(test, isIgnored);
176 // We want "close-enough" results for float
177
178 compare(filteredGolden, filteredTest, fpAtol, fpRtol);
179 }
180 exampleNo++;
181
182 if (example.expectedMultinomialDistributionTolerance > 0) {
183 expectMultinomialDistributionWithinTolerance(test, example);
184 }
185 }
186 }
187
executeOnce(const Model * model,std::function<bool (int)> isIgnored,std::vector<MixedTypedExample> & examples,std::string dumpFile)188 void GeneratedTests::executeOnce(const Model* model, std::function<bool(int)> isIgnored,
189 std::vector<MixedTypedExample>& examples, std::string dumpFile) {
190 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
191 Compilation compilation = compileModel(model);
192 executeWithCompilation(model, &compilation, isIgnored, examples, dumpFile);
193 }
194
executeMultithreadedOwnCompilation(const Model * model,std::function<bool (int)> isIgnored,std::vector<MixedTypedExample> & examples)195 void GeneratedTests::executeMultithreadedOwnCompilation(const Model* model,
196 std::function<bool(int)> isIgnored,
197 std::vector<MixedTypedExample>& examples) {
198 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
199 SCOPED_TRACE("MultithreadedOwnCompilation");
200 std::vector<std::thread> threads;
201 for (int i = 0; i < 10; i++) {
202 threads.push_back(std::thread([&]() { executeOnce(model, isIgnored, examples, ""); }));
203 }
204 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
205 }
206
executeMultithreadedSharedCompilation(const Model * model,std::function<bool (int)> isIgnored,std::vector<MixedTypedExample> & examples)207 void GeneratedTests::executeMultithreadedSharedCompilation(
208 const Model* model, std::function<bool(int)> isIgnored,
209 std::vector<MixedTypedExample>& examples) {
210 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
211 SCOPED_TRACE("MultithreadedSharedCompilation");
212 Compilation compilation = compileModel(model);
213 std::vector<std::thread> threads;
214 for (int i = 0; i < 10; i++) {
215 threads.push_back(std::thread(
216 [&]() { executeWithCompilation(model, &compilation, isIgnored, examples, ""); }));
217 }
218 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
219 }
220
221 // Test driver for those generated from ml/nn/runtime/test/spec
execute(std::function<void (Model *)> createModel,std::function<bool (int)> isIgnored,std::vector<MixedTypedExample> & examples,std::string dumpFile)222 void GeneratedTests::execute(std::function<void(Model*)> createModel,
223 std::function<bool(int)> isIgnored,
224 std::vector<MixedTypedExample>& examples,
225 [[maybe_unused]] std::string dumpFile) {
226 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
227 Model model;
228 createModel(&model);
229 model.finish();
230 auto executeInternal = [&model, &isIgnored, &examples,
231 this]([[maybe_unused]] std::string dumpFile) {
232 SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
233 #ifndef NNTEST_MULTITHREADED
234 executeOnce(&model, isIgnored, examples, dumpFile);
235 #else // defined(NNTEST_MULTITHREADED)
236 executeMultithreadedOwnCompilation(&model, isIgnored, examples);
237 executeMultithreadedSharedCompilation(&model, isIgnored, examples);
238 #endif // !defined(NNTEST_MULTITHREADED)
239 };
240 mTestCompilationCaching = false;
241 executeInternal(dumpFile);
242 mTestCompilationCaching = true;
243 executeInternal("");
244 }
245
SetUp()246 void GeneratedTests::SetUp() {
247 #ifdef NNTEST_COMPUTE_MODE
248 mOldComputeMode = Execution::setComputeMode(GetParam());
249 #endif
250 char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
251 char* cacheDir = mkdtemp(cacheDirTemp);
252 ASSERT_NE(cacheDir, nullptr);
253 mCacheDir = cacheDir;
254 mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
255 }
256
TearDown()257 void GeneratedTests::TearDown() {
258 #ifdef NNTEST_COMPUTE_MODE
259 Execution::setComputeMode(mOldComputeMode);
260 #endif
261 if (!::testing::Test::HasFailure()) {
262 // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
263 // Remove the cache directory specified by path recursively.
264 auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
265 return remove(child);
266 };
267 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
268 }
269 }
270
271 #ifdef NNTEST_COMPUTE_MODE
272 INSTANTIATE_TEST_SUITE_P(ComputeMode, GeneratedTests,
273 testing::Values(Execution::ComputeMode::SYNC,
274 Execution::ComputeMode::ASYNC,
275 Execution::ComputeMode::BURST));
276 #endif
277
278 } // namespace generated_tests
279