1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <android-base/unique_fd.h>
20 #include <ftw.h>
21 #include <gtest/gtest.h>
22 #include <unistd.h>
23
24 #include <algorithm>
25 #include <cassert>
26 #include <cmath>
27 #include <fstream>
28 #include <iostream>
29 #include <map>
30 #include <memory>
31 #include <random>
32 #include <set>
33 #include <string>
34 #include <thread>
35 #include <utility>
36 #include <vector>
37
38 #include "GeneratedTestUtils.h"
39 #include "SupportLibraryTestUtils.h"
40 #include "SupportLibraryWrapper.h"
41
42 // Systrace is not available from CTS tests due to platform layering
43 // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
44 // the case for CTS (public APIs only).
45 #ifndef NNTEST_ONLY_PUBLIC_API
46 #include <Tracing.h>
47 #else
48 #define NNTRACE_FULL_RAW(...)
49 #define NNTRACE_APP(...)
50 #define NNTRACE_APP_SWITCH(...)
51 #endif
52
53 extern std::string SUPPORT_LIBRARY_NAME;
54
55 namespace android::nn::generated_tests {
56 using namespace sl_wrapper;
57 using namespace test_helper;
58
59 class GeneratedTests : public GeneratedTestBase {
60 protected:
61 void SetUp() override;
62 void TearDown() override;
63
64 bool shouldSkipTest();
65
66 ANeuralNetworksMemory* createDeviceMemoryForInput(const Compilation& compilation,
67 uint32_t index);
68 ANeuralNetworksMemory* createDeviceMemoryForOutput(const Compilation& compilation,
69 uint32_t index);
70 void computeWithDeviceMemories(const Compilation& compilation, const TestModel& testModel,
71 Execution* execution, Execution::ComputeMode computeMode,
72 Result* result, std::vector<TestBuffer>* outputs);
73 bool checkSupported(const Model& model, ANeuralNetworksDevice* device);
74 std::optional<Compilation> compileModel(const Model& model, ANeuralNetworksDevice* device);
75 void executeWithCompilation(const Compilation& compilation, const TestModel& testModel);
76 void executeOnce(const Model& model, const TestModel& testModel);
77 void executeMultithreadedOwnCompilation(const Model& model, const TestModel& testModel);
78 void executeMultithreadedSharedCompilation(const Model& model, const TestModel& testModel);
79 // Test driver for those generated from ml/nn/runtime/test/spec
80 void execute(const TestModel& testModel);
81
82 // VNDK version of the device under test.
83 static int mVndkVersion;
84
85 std::string mCacheDir;
86 std::vector<uint8_t> mToken;
87 bool mTestCompilationCaching = false;
88 bool mTestDynamicOutputShape = false;
89 bool mExpectFailure = false;
90 bool mTestQuantizationCoupling = false;
91 bool mTestDeviceMemory = false;
92 Execution::ComputeMode mComputeMode = Execution::getComputeMode();
93
94 std::unique_ptr<const NnApiSupportLibrary> mNnApi =
95 loadNnApiSupportLibrary(SUPPORT_LIBRARY_NAME);
96 };
97
98 int GeneratedTests::mVndkVersion = __ANDROID_API_FUTURE__;
99
100 // Tag for the dynamic output shape tests
101 class DynamicOutputShapeTest : public GeneratedTests {
102 protected:
DynamicOutputShapeTest()103 DynamicOutputShapeTest() { mTestDynamicOutputShape = true; }
104 };
105
106 // Tag for the fenced execute tests
107 class FencedComputeTest : public GeneratedTests {};
108
109 // Tag for the generated validation tests
110 class GeneratedValidationTests : public GeneratedTests {
111 protected:
GeneratedValidationTests()112 GeneratedValidationTests() { mExpectFailure = true; }
113 };
114
115 class QuantizationCouplingTest : public GeneratedTests {
116 protected:
QuantizationCouplingTest()117 QuantizationCouplingTest() { mTestQuantizationCoupling = true; }
118 };
119
120 class DeviceMemoryTest : public GeneratedTests {
121 protected:
DeviceMemoryTest()122 DeviceMemoryTest() { mTestDeviceMemory = true; }
123 };
124
checkSupported(const Model & model,ANeuralNetworksDevice * device)125 bool GeneratedTests::checkSupported(const Model& model, ANeuralNetworksDevice* device) {
126 constexpr static int MAX_NUM_OPS = 256;
127 std::array<bool, MAX_NUM_OPS> supportedOps;
128 for (int i = 0; i < MAX_NUM_OPS; ++i) {
129 supportedOps[i] = true;
130 }
131 EXPECT_EQ(mNnApi->ANeuralNetworksModel_getSupportedOperationsForDevices(
132 model.getHandle(), &device, /*numDevices=*/1, supportedOps.data()),
133 ANEURALNETWORKS_NO_ERROR);
134 const bool fullySupportedModel =
135 std::all_of(supportedOps.begin(), supportedOps.end(), [](bool v) { return v; });
136 return fullySupportedModel;
137 }
138
createCacheFds(const std::vector<std::string> & files)139 static std::vector<base::unique_fd> createCacheFds(const std::vector<std::string>& files) {
140 std::vector<base::unique_fd> fds;
141 fds.reserve(files.size());
142 for (const auto& file : files) {
143 auto fd = base::unique_fd(open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR));
144 if (fd.get() == -1) {
145 [] { FAIL(); }();
146 return {};
147 }
148 fds.push_back(std::move(fd));
149 }
150 return fds;
151 }
152
compileModel(const Model & model,ANeuralNetworksDevice * device)153 std::optional<Compilation> GeneratedTests::compileModel(const Model& model,
154 ANeuralNetworksDevice* device) {
155 NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
156
157 if (mTestCompilationCaching) {
158 // Compile the model twice with the same token, so that compilation caching will be
159 // exercised if supported by the driver.
160 // No invalid model will be passed to this branch.
161 EXPECT_FALSE(mExpectFailure);
162
163 std::string mode = ::android::base::GetProperty("debug.nn.slts.caching", "random");
164 bool useSetCachingFromFds;
165 if (mode == "path") {
166 useSetCachingFromFds = false;
167 } else if (mode == "fds") {
168 useSetCachingFromFds = true;
169 } else if (mode == "random") {
170 std::string testName = ::testing::UnitTest::GetInstance()->current_test_info()->name();
171 std::seed_seq seq(testName.begin(), testName.end());
172 std::mt19937 gen(seq);
173 std::bernoulli_distribution d(0.5);
174 useSetCachingFromFds = d(gen);
175 } else {
176 [&mode] {
177 FAIL() << "System property debug.nn.slts.caching should be one of \"path\", "
178 "\"fds\", or \"random\"; got \""
179 << mode << "\"";
180 }();
181 return {};
182 }
183 SCOPED_TRACE("Use setCachingFromFds = " + std::to_string(useSetCachingFromFds) + " (" +
184 mode + ")");
185 std::cout << "\nUse setCachingFromFds = " << std::boolalpha << useSetCachingFromFds << " ("
186 << mode << ")" << std::endl;
187
188 std::vector<std::string> modelCacheFilenames, dataCacheFilenames;
189 if (useSetCachingFromFds) {
190 uint32_t numModelCacheFiles, numDataCacheFiles;
191 EXPECT_EQ(mNnApi->SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded(
192 device, &numModelCacheFiles, &numDataCacheFiles),
193 ANEURALNETWORKS_NO_ERROR);
194 for (uint32_t i = 0; i < numModelCacheFiles; i++) {
195 modelCacheFilenames.push_back({mCacheDir + "/model" + std::to_string(i)});
196 }
197 for (uint32_t i = 0; i < numDataCacheFiles; i++) {
198 dataCacheFilenames.push_back({mCacheDir + "/data" + std::to_string(i)});
199 }
200 }
201
202 auto resultCompilation1 = Compilation::createForDevice(mNnApi.get(), &model, device);
203 EXPECT_EQ(resultCompilation1.first, Result::NO_ERROR);
204 auto compilation1 = std::move(resultCompilation1.second);
205 if (useSetCachingFromFds) {
206 auto modelCacheFds = createCacheFds(modelCacheFilenames);
207 auto dataCacheFds = createCacheFds(dataCacheFilenames);
208 EXPECT_EQ(compilation1.setCachingFromFds(modelCacheFds, dataCacheFds, mToken),
209 Result::NO_ERROR);
210 } else {
211 EXPECT_EQ(compilation1.setCaching(mCacheDir, mToken), Result::NO_ERROR);
212 }
213 EXPECT_EQ(compilation1.finish(), Result::NO_ERROR);
214
215 auto resultCompilation2 = Compilation::createForDevice(mNnApi.get(), &model, device);
216 EXPECT_EQ(resultCompilation2.first, Result::NO_ERROR);
217 auto compilation2 = std::move(resultCompilation2.second);
218 if (useSetCachingFromFds) {
219 auto modelCacheFds = createCacheFds(modelCacheFilenames);
220 auto dataCacheFds = createCacheFds(dataCacheFilenames);
221 EXPECT_EQ(compilation2.setCachingFromFds(modelCacheFds, dataCacheFds, mToken),
222 Result::NO_ERROR);
223 } else {
224 EXPECT_EQ(compilation2.setCaching(mCacheDir, mToken), Result::NO_ERROR);
225 }
226 EXPECT_EQ(compilation2.finish(), Result::NO_ERROR);
227
228 return compilation2;
229 } else {
230 auto resultCompilation = Compilation::createForDevice(mNnApi.get(), &model, device);
231 EXPECT_EQ(resultCompilation.first, Result::NO_ERROR);
232 auto compilation = std::move(resultCompilation.second);
233 Result result = compilation.finish();
234
235 // For valid model, we check the compilation result == NO_ERROR.
236 // For invalid model, the driver may fail at compilation or execution, so any result code is
237 // permitted at this point.
238 if (mExpectFailure && result != Result::NO_ERROR) return std::nullopt;
239 EXPECT_EQ(result, Result::NO_ERROR);
240 return compilation;
241 }
242 }
243
computeWithPtrs(const TestModel & testModel,Execution * execution,Execution::ComputeMode computeMode,Result * result,std::vector<TestBuffer> * outputs)244 void computeWithPtrs(const TestModel& testModel, Execution* execution,
245 Execution::ComputeMode computeMode, Result* result,
246 std::vector<TestBuffer>* outputs) {
247 {
248 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithPtrs example");
249 createRequest(testModel, execution, outputs);
250 }
251 *result = execution->compute(computeMode);
252 }
253
createDeviceMemoryForInput(const Compilation & compilation,uint32_t index)254 ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForInput(const Compilation& compilation,
255 uint32_t index) {
256 ANeuralNetworksMemoryDesc* desc = nullptr;
257 EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
258 EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_addInputRole(desc, compilation.getHandle(), index,
259 1.0f),
260 ANEURALNETWORKS_NO_ERROR);
261 EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
262 ANeuralNetworksMemory* memory = nullptr;
263 mNnApi->ANeuralNetworksMemory_createFromDesc(desc, &memory);
264 mNnApi->ANeuralNetworksMemoryDesc_free(desc);
265 return memory;
266 }
267
createDeviceMemoryForOutput(const Compilation & compilation,uint32_t index)268 ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForOutput(const Compilation& compilation,
269 uint32_t index) {
270 ANeuralNetworksMemoryDesc* desc = nullptr;
271 EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
272 EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_addOutputRole(desc, compilation.getHandle(), index,
273 1.0f),
274 ANEURALNETWORKS_NO_ERROR);
275 EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
276 ANeuralNetworksMemory* memory = nullptr;
277 mNnApi->ANeuralNetworksMemory_createFromDesc(desc, &memory);
278 mNnApi->ANeuralNetworksMemoryDesc_free(desc);
279 return memory;
280 }
281
282 // Set result = Result::NO_ERROR and outputs = {} if the test should be skipped.
computeWithDeviceMemories(const Compilation & compilation,const TestModel & testModel,Execution * execution,Execution::ComputeMode computeMode,Result * result,std::vector<TestBuffer> * outputs)283 void GeneratedTests::computeWithDeviceMemories(const Compilation& compilation,
284 const TestModel& testModel, Execution* execution,
285 Execution::ComputeMode computeMode, Result* result,
286 std::vector<TestBuffer>* outputs) {
287 ASSERT_NE(execution, nullptr);
288 ASSERT_NE(result, nullptr);
289 ASSERT_NE(outputs, nullptr);
290 outputs->clear();
291 std::vector<Memory> inputMemories, outputMemories;
292
293 {
294 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithDeviceMemories example");
295 // Model inputs.
296 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
297 SCOPED_TRACE("Input index: " + std::to_string(i));
298 const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
299 // Omitted input.
300 if (operand.data.size() == 0) {
301 ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
302 continue;
303 }
304
305 // Create device memory.
306 ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i);
307 ASSERT_NE(memory, nullptr);
308 auto& wrapperMemory = inputMemories.emplace_back(Memory(mNnApi.get(), memory));
309
310 // Copy data from TestBuffer to device memory.
311 auto ashmem = TestAshmem::createFrom(mNnApi.get(), operand.data);
312 ASSERT_NE(ashmem, nullptr);
313 ASSERT_EQ(mNnApi->ANeuralNetworksMemory_copy(ashmem->get()->get(), memory),
314 ANEURALNETWORKS_NO_ERROR);
315 ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0));
316 }
317
318 // Model outputs.
319 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
320 SCOPED_TRACE("Output index: " + std::to_string(i));
321 ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i);
322 ASSERT_NE(memory, nullptr);
323 auto& wrapperMemory = outputMemories.emplace_back(Memory(mNnApi.get(), memory));
324 ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0));
325 }
326 }
327
328 *result = execution->compute(computeMode);
329
330 // Copy out output results.
331 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
332 SCOPED_TRACE("Output index: " + std::to_string(i));
333 const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
334 const size_t bufferSize = operand.data.size();
335 auto& output = outputs->emplace_back(bufferSize);
336
337 auto ashmem = TestAshmem::createFrom(mNnApi.get(), output);
338 ASSERT_NE(ashmem, nullptr);
339 ASSERT_EQ(mNnApi->ANeuralNetworksMemory_copy(outputMemories[i].get(), ashmem->get()->get()),
340 ANEURALNETWORKS_NO_ERROR);
341 std::copy(ashmem->dataAs<uint8_t>(), ashmem->dataAs<uint8_t>() + bufferSize,
342 output.getMutable<uint8_t>());
343 }
344 }
345
executeWithCompilation(const Compilation & compilation,const TestModel & testModel)346 void GeneratedTests::executeWithCompilation(const Compilation& compilation,
347 const TestModel& testModel) {
348 NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example");
349
350 Execution execution(mNnApi.get(), &compilation);
351 Result result;
352 std::vector<TestBuffer> outputs;
353
354 if (mTestDeviceMemory) {
355 computeWithDeviceMemories(compilation, testModel, &execution, mComputeMode, &result,
356 &outputs);
357 } else {
358 computeWithPtrs(testModel, &execution, mComputeMode, &result, &outputs);
359 }
360
361 if (result == Result::NO_ERROR && outputs.empty()) {
362 return;
363 }
364
365 {
366 NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example");
367 if (mExpectFailure) {
368 ASSERT_NE(result, Result::NO_ERROR);
369 return;
370 } else {
371 ASSERT_EQ(result, Result::NO_ERROR);
372 }
373
374 // Check output dimensions.
375 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
376 SCOPED_TRACE("Output index: " + std::to_string(i));
377 const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]];
378 if (output.isIgnored) continue;
379 std::vector<uint32_t> actualDimensions;
380 ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(i, &actualDimensions));
381 ASSERT_EQ(output.dimensions, actualDimensions);
382 }
383
384 checkResults(testModel, outputs);
385 }
386 }
387
executeOnce(const Model & model,const TestModel & testModel)388 void GeneratedTests::executeOnce(const Model& model, const TestModel& testModel) {
389 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
390 uint32_t numDevices = 0;
391 mNnApi->ANeuralNetworks_getDeviceCount(&numDevices);
392 bool modelSupported = false;
393 for (uint32_t i = 0; i < numDevices; ++i) {
394 ANeuralNetworksDevice* device = nullptr;
395 mNnApi->ANeuralNetworks_getDevice(i, &device);
396 const char* deviceName = nullptr;
397 mNnApi->ANeuralNetworksDevice_getName(device, &deviceName);
398 SCOPED_TRACE("Device = " + std::string(deviceName));
399 std::cout << "\nDevice = " << deviceName << std::endl;
400 if (!checkSupported(model, device)) {
401 std::cout << "\nModel not supported by device " << deviceName << ". Skipping"
402 << std::endl;
403 continue;
404 }
405 modelSupported = true;
406 std::cout << "\nModel supported" << std::endl;
407 std::optional<Compilation> compilation = compileModel(model, device);
408 // Early return if compilation fails. The compilation result code is
409 // checked in compileModel.
410 if (!compilation) return;
411 executeWithCompilation(compilation.value(), testModel);
412 std::cout << "\nExecution completed" << std::endl;
413 }
414 if (!modelSupported) {
415 std::cout << "\nModel not supported by any device\n"
416 << "SKIPPED" << std::endl;
417 }
418 }
419
executeMultithreadedOwnCompilation(const Model & model,const TestModel & testModel)420 void GeneratedTests::executeMultithreadedOwnCompilation(const Model& model,
421 const TestModel& testModel) {
422 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
423 SCOPED_TRACE("MultithreadedOwnCompilation");
424 std::cout << "\nMultithreadedOwnCompilation" << std::endl;
425 std::vector<std::thread> threads;
426 for (int i = 0; i < 10; i++) {
427 threads.push_back(std::thread([&]() { executeOnce(model, testModel); }));
428 }
429 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
430 }
431
executeMultithreadedSharedCompilation(const Model & model,const TestModel & testModel)432 void GeneratedTests::executeMultithreadedSharedCompilation(const Model& model,
433 const TestModel& testModel) {
434 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
435 SCOPED_TRACE("MultithreadedSharedCompilation");
436 std::cout << "\nMultithreadedSharedCompilation" << std::endl;
437 uint32_t numDevices = 0;
438 mNnApi->ANeuralNetworks_getDeviceCount(&numDevices);
439 bool modelSupported = false;
440 for (uint32_t i = 0; i < numDevices; ++i) {
441 ANeuralNetworksDevice* device = nullptr;
442 mNnApi->ANeuralNetworks_getDevice(i, &device);
443 const char* deviceName = nullptr;
444 mNnApi->ANeuralNetworksDevice_getName(device, &deviceName);
445 SCOPED_TRACE("Device = " + std::string(deviceName));
446 std::cout << "\nDevice = " << deviceName << std::endl;
447 if (!checkSupported(model, device)) {
448 std::cout << "\nModel not supported by device " << deviceName << ". Skipping"
449 << std::endl;
450 continue;
451 }
452 modelSupported = true;
453 std::cout << "\nModel supported" << std::endl;
454 std::optional<Compilation> compilation = compileModel(model, device);
455 // Early return if compilation fails. The ompilation result code is
456 // checked in compileModel.
457 if (!compilation) return;
458 std::vector<std::thread> threads;
459 for (int i = 0; i < 10; i++) {
460 threads.push_back(
461 std::thread([&]() { executeWithCompilation(compilation.value(), testModel); }));
462 }
463 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
464 std::cout << "\nExecution completed" << std::endl;
465 }
466 if (!modelSupported) {
467 std::cout << "\nModel not supported by any device\n"
468 << "SKIPPED" << std::endl;
469 }
470 }
471
472 // Test driver for those generated from ml/nn/runtime/test/spec
execute(const TestModel & testModel)473 void GeneratedTests::execute(const TestModel& testModel) {
474 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
475 GeneratedModel model(mNnApi.get());
476 createModel(mNnApi.get(), testModel, mTestDynamicOutputShape, &model);
477 if (testModel.expectFailure && !model.isValid()) {
478 return;
479 }
480 ASSERT_EQ(model.finish(), Result::NO_ERROR);
481 ASSERT_TRUE(model.isValid());
482 auto executeInternal = [&testModel, &model, this]() {
483 SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
484 std::cout << "\nCompilationCaching = " << std::boolalpha << mTestCompilationCaching
485 << std::endl;
486 #ifndef NNTEST_MULTITHREADED
487 executeOnce(model, testModel);
488 #else // defined(NNTEST_MULTITHREADED)
489 executeMultithreadedOwnCompilation(model, testModel);
490 executeMultithreadedSharedCompilation(model, testModel);
491 #endif // !defined(NNTEST_MULTITHREADED)
492 };
493 mTestCompilationCaching = false;
494 executeInternal();
495 if (!mExpectFailure) {
496 mTestCompilationCaching = true;
497 executeInternal();
498 }
499 }
500
shouldSkipTest()501 bool GeneratedTests::shouldSkipTest() {
502 // A map of {min VNDK version -> tests that should be skipped with earlier VNDK versions}.
503 // The listed tests are added in a later release, but exercising old APIs. They should be
504 // skipped if the device has a mixed build of system and vendor partitions.
505 static const std::map<int, std::set<std::string>> kMapOfMinVndkVersionToTests = {
506 {
507 __ANDROID_API_R__,
508 {
509 "add_broadcast_quant8_all_inputs_as_internal",
510 },
511 },
512 };
513 for (const auto& [minVersion, names] : kMapOfMinVndkVersionToTests) {
514 if (mVndkVersion < minVersion && names.count(kTestName) > 0) {
515 return true;
516 }
517 }
518 return false;
519 }
520
SetUp()521 void GeneratedTests::SetUp() {
522 GeneratedTestBase::SetUp();
523
524 mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
525 if (shouldSkipTest()) {
526 GTEST_SKIP();
527 return;
528 }
529
530 char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
531 char* cacheDir = mkdtemp(cacheDirTemp);
532 ASSERT_NE(cacheDir, nullptr);
533 mCacheDir = cacheDir;
534 mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
535 }
536
TearDown()537 void GeneratedTests::TearDown() {
538 mNnApi.reset(nullptr);
539
540 if (!::testing::Test::HasFailure()) {
541 // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
542 // Remove the cache directory specified by path recursively.
543 auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
544 return remove(child);
545 };
546 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
547 }
548 GeneratedTestBase::TearDown();
549 }
550
551 #ifdef NNTEST_COMPUTE_MODE
TEST_P(GeneratedTests,Sync)552 TEST_P(GeneratedTests, Sync) {
553 std::cout << "\nComputeMode = SYNC" << std::endl;
554 mComputeMode = Execution::ComputeMode::SYNC;
555 execute(testModel);
556 }
557
TEST_P(GeneratedTests,Burst)558 TEST_P(GeneratedTests, Burst) {
559 std::cout << "\nComputeMode = BURST" << std::endl;
560 mComputeMode = Execution::ComputeMode::BURST;
561 execute(testModel);
562 }
563 #else
TEST_P(GeneratedTests,Test)564 TEST_P(GeneratedTests, Test) {
565 execute(testModel);
566 }
567 #endif
568
TEST_P(DynamicOutputShapeTest,Test)569 TEST_P(DynamicOutputShapeTest, Test) {
570 execute(testModel);
571 }
572
TEST_P(GeneratedValidationTests,Test)573 TEST_P(GeneratedValidationTests, Test) {
574 execute(testModel);
575 }
576
TEST_P(QuantizationCouplingTest,Test)577 TEST_P(QuantizationCouplingTest, Test) {
578 execute(convertQuant8AsymmOperandsToSigned(testModel));
579 }
580
TEST_P(DeviceMemoryTest,Test)581 TEST_P(DeviceMemoryTest, Test) {
582 execute(testModel);
583 }
584
TEST_P(FencedComputeTest,Test)585 TEST_P(FencedComputeTest, Test) {
586 mComputeMode = Execution::ComputeMode::FENCED;
587 execute(testModel);
588 }
589
590 INSTANTIATE_GENERATED_TEST(GeneratedTests,
__anonc55b0d030a02(const TestModel& testModel) 591 [](const TestModel& testModel) { return !testModel.expectFailure; });
592
__anonc55b0d030b02(const TestModel& testModel) 593 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
594 return !testModel.expectFailure && !testModel.hasScalarOutputs();
595 });
596
__anonc55b0d030c02(const TestModel& testModel) 597 INSTANTIATE_GENERATED_TEST(GeneratedValidationTests, [](const TestModel& testModel) {
598 return testModel.expectFailure && !testModel.isInfiniteLoopTimeoutTest();
599 });
600
__anonc55b0d030d02(const TestModel& testModel) 601 INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
602 return !testModel.expectFailure && testModel.main.operations.size() == 1 &&
603 testModel.referenced.size() == 0 && testModel.hasQuant8CoupledOperands();
604 });
605
__anonc55b0d030e02(const TestModel& testModel) 606 INSTANTIATE_GENERATED_TEST(DeviceMemoryTest, [](const TestModel& testModel) {
607 return !testModel.expectFailure &&
608 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
609 [&testModel](uint32_t index) {
610 return testModel.main.operands[index].data.size() > 0;
611 });
612 });
613
__anonc55b0d031002(const TestModel& testModel) 614 INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
615 return !testModel.expectFailure &&
616 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
617 [&testModel](uint32_t index) {
618 return testModel.main.operands[index].data.size() > 0;
619 });
620 });
621
622 } // namespace android::nn::generated_tests
623