1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <android-base/unique_fd.h>
20 #include <ftw.h>
21 #include <gtest/gtest.h>
22 #include <libgen.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25
26 #include <algorithm>
27 #include <cassert>
28 #include <cmath>
29 #include <fstream>
30 #include <iostream>
31 #include <map>
32 #include <memory>
33 #include <random>
34 #include <set>
35 #include <string>
36 #include <thread>
37 #include <utility>
38 #include <vector>
39
40 #include "GeneratedTestUtils.h"
41 #include "SupportLibraryTestUtils.h"
42 #include "SupportLibraryWrapper.h"
43 #include "TmpDirectoryUtils.h"
44
45 // Systrace is not available from CTS tests due to platform layering
46 // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
47 // the case for CTS (public APIs only).
48 #ifndef NNTEST_ONLY_PUBLIC_API
49 #include <Tracing.h>
50 #else
51 #define NNTRACE_FULL_RAW(...)
52 #define NNTRACE_APP(...)
53 #define NNTRACE_APP_SWITCH(...)
54 #endif
55
56 const char* kQCDspLoadPathEnv = "ADSP_LIBRARY_PATH";
57
58 extern std::string SUPPORT_LIBRARY_NAME;
59
60 namespace android::nn::generated_tests {
61 using namespace sl_wrapper;
62 using namespace test_helper;
63
64 class GeneratedTests : public GeneratedTestBase {
65 protected:
66 void SetUp() override;
67 void TearDown() override;
68
69 bool shouldSkipTest();
70
71 ANeuralNetworksMemory* createDeviceMemoryForInput(const Compilation& compilation,
72 uint32_t index);
73 ANeuralNetworksMemory* createDeviceMemoryForOutput(const Compilation& compilation,
74 uint32_t index);
75 void computeWithDeviceMemories(const Compilation& compilation, const TestModel& testModel,
76 Execution* execution, Execution::ComputeMode computeMode,
77 Result* result, std::vector<TestBuffer>* outputs);
78 bool checkSupported(const Model& model, ANeuralNetworksDevice* device);
79 std::optional<Compilation> compileModel(const Model& model, ANeuralNetworksDevice* device);
80 void executeWithCompilation(const Compilation& compilation, const TestModel& testModel);
81 void executeOnce(const Model& model, const TestModel& testModel);
82 void executeMultithreadedOwnCompilation(const Model& model, const TestModel& testModel);
83 void executeMultithreadedSharedCompilation(const Model& model, const TestModel& testModel);
84 // Test driver for those generated from ml/nn/runtime/test/spec
85 void execute(const TestModel& testModel);
86
87 // VNDK version of the device under test.
88 static int mVndkVersion;
89
90 std::string mCacheDir;
91 std::vector<uint8_t> mToken;
92 bool mTestCompilationCaching = false;
93 bool mTestDynamicOutputShape = false;
94 bool mExpectFailure = false;
95 bool mTestQuantizationCoupling = false;
96 bool mTestDeviceMemory = false;
97 Execution::ComputeMode mComputeMode = Execution::getComputeMode();
98
99 std::unique_ptr<const NnApiSupportLibrary> mNnApi;
100 };
101
102 int GeneratedTests::mVndkVersion = __ANDROID_API_FUTURE__;
103
104 // Tag for the dynamic output shape tests
105 class DynamicOutputShapeTest : public GeneratedTests {
106 protected:
DynamicOutputShapeTest()107 DynamicOutputShapeTest() { mTestDynamicOutputShape = true; }
108 };
109
110 // Tag for the fenced execute tests
111 class FencedComputeTest : public GeneratedTests {};
112
113 // Tag for the generated validation tests
114 class GeneratedValidationTests : public GeneratedTests {
115 protected:
GeneratedValidationTests()116 GeneratedValidationTests() { mExpectFailure = true; }
117 };
118
119 class QuantizationCouplingTest : public GeneratedTests {
120 protected:
QuantizationCouplingTest()121 QuantizationCouplingTest() { mTestQuantizationCoupling = true; }
122 };
123
124 class DeviceMemoryTest : public GeneratedTests {
125 protected:
DeviceMemoryTest()126 DeviceMemoryTest() { mTestDeviceMemory = true; }
127 };
128
checkSupported(const Model & model,ANeuralNetworksDevice * device)129 bool GeneratedTests::checkSupported(const Model& model, ANeuralNetworksDevice* device) {
130 constexpr static int MAX_NUM_OPS = 256;
131 std::array<bool, MAX_NUM_OPS> supportedOps;
132 for (int i = 0; i < MAX_NUM_OPS; ++i) {
133 supportedOps[i] = true;
134 }
135 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksModel_getSupportedOperationsForDevices(
136 model.getHandle(), &device, /*numDevices=*/1, supportedOps.data()),
137 ANEURALNETWORKS_NO_ERROR);
138 const bool fullySupportedModel =
139 std::all_of(supportedOps.begin(), supportedOps.end(), [](bool v) { return v; });
140 return fullySupportedModel;
141 }
142
createCacheFds(const std::vector<std::string> & files)143 static std::vector<base::unique_fd> createCacheFds(const std::vector<std::string>& files) {
144 std::vector<base::unique_fd> fds;
145 fds.reserve(files.size());
146 for (const auto& file : files) {
147 auto fd = base::unique_fd(open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR));
148 if (fd.get() == -1) {
149 [] { FAIL(); }();
150 return {};
151 }
152 fds.push_back(std::move(fd));
153 }
154 return fds;
155 }
156
compileModel(const Model & model,ANeuralNetworksDevice * device)157 std::optional<Compilation> GeneratedTests::compileModel(const Model& model,
158 ANeuralNetworksDevice* device) {
159 NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
160
161 if (mTestCompilationCaching) {
162 // Compile the model twice with the same token, so that compilation caching will be
163 // exercised if supported by the driver.
164 // No invalid model will be passed to this branch.
165 EXPECT_FALSE(mExpectFailure);
166
167 std::string mode = ::android::base::GetProperty("debug.nn.slts.caching", "random");
168 bool useSetCachingFromFds;
169 if (mode == "path") {
170 useSetCachingFromFds = false;
171 } else if (mode == "fds") {
172 useSetCachingFromFds = true;
173 } else if (mode == "random") {
174 std::string testName = ::testing::UnitTest::GetInstance()->current_test_info()->name();
175 std::seed_seq seq(testName.begin(), testName.end());
176 std::mt19937 gen(seq);
177 std::bernoulli_distribution d(0.5);
178 useSetCachingFromFds = d(gen);
179 } else {
180 [&mode] {
181 FAIL() << "System property debug.nn.slts.caching should be one of \"path\", "
182 "\"fds\", or \"random\"; got \""
183 << mode << "\"";
184 }();
185 return {};
186 }
187 SCOPED_TRACE("Use setCachingFromFds = " + std::to_string(useSetCachingFromFds) + " (" +
188 mode + ")");
189 std::cout << "\nUse setCachingFromFds = " << std::boolalpha << useSetCachingFromFds << " ("
190 << mode << ")" << std::endl;
191
192 std::vector<std::string> modelCacheFilenames, dataCacheFilenames;
193 if (useSetCachingFromFds) {
194 uint32_t numModelCacheFiles, numDataCacheFiles;
195 EXPECT_EQ(mNnApi->getFL5()->SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded(
196 device, &numModelCacheFiles, &numDataCacheFiles),
197 ANEURALNETWORKS_NO_ERROR);
198 for (uint32_t i = 0; i < numModelCacheFiles; i++) {
199 modelCacheFilenames.push_back({mCacheDir + "/model" + std::to_string(i)});
200 }
201 for (uint32_t i = 0; i < numDataCacheFiles; i++) {
202 dataCacheFilenames.push_back({mCacheDir + "/data" + std::to_string(i)});
203 }
204 }
205
206 auto resultCompilation1 = Compilation::createForDevice(mNnApi.get(), &model, device);
207 EXPECT_EQ(resultCompilation1.first, Result::NO_ERROR);
208 auto compilation1 = std::move(resultCompilation1.second);
209 if (useSetCachingFromFds) {
210 auto modelCacheFds = createCacheFds(modelCacheFilenames);
211 auto dataCacheFds = createCacheFds(dataCacheFilenames);
212 EXPECT_EQ(compilation1.setCachingFromFds(modelCacheFds, dataCacheFds, mToken),
213 Result::NO_ERROR);
214 } else {
215 EXPECT_EQ(compilation1.setCaching(mCacheDir, mToken), Result::NO_ERROR);
216 }
217 EXPECT_EQ(compilation1.finish(), Result::NO_ERROR);
218
219 auto resultCompilation2 = Compilation::createForDevice(mNnApi.get(), &model, device);
220 EXPECT_EQ(resultCompilation2.first, Result::NO_ERROR);
221 auto compilation2 = std::move(resultCompilation2.second);
222 if (useSetCachingFromFds) {
223 auto modelCacheFds = createCacheFds(modelCacheFilenames);
224 auto dataCacheFds = createCacheFds(dataCacheFilenames);
225 EXPECT_EQ(compilation2.setCachingFromFds(modelCacheFds, dataCacheFds, mToken),
226 Result::NO_ERROR);
227 } else {
228 EXPECT_EQ(compilation2.setCaching(mCacheDir, mToken), Result::NO_ERROR);
229 }
230 EXPECT_EQ(compilation2.finish(), Result::NO_ERROR);
231
232 return compilation2;
233 } else {
234 auto resultCompilation = Compilation::createForDevice(mNnApi.get(), &model, device);
235 EXPECT_EQ(resultCompilation.first, Result::NO_ERROR);
236 auto compilation = std::move(resultCompilation.second);
237 Result result = compilation.finish();
238
239 // For valid model, we check the compilation result == NO_ERROR.
240 // For invalid model, the driver may fail at compilation or execution, so any result code is
241 // permitted at this point.
242 if (mExpectFailure && result != Result::NO_ERROR) return std::nullopt;
243 EXPECT_EQ(result, Result::NO_ERROR);
244 return compilation;
245 }
246 }
247
computeWithPtrs(const TestModel & testModel,Execution * execution,Execution::ComputeMode computeMode,Result * result,std::vector<TestBuffer> * outputs)248 void computeWithPtrs(const TestModel& testModel, Execution* execution,
249 Execution::ComputeMode computeMode, Result* result,
250 std::vector<TestBuffer>* outputs) {
251 {
252 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithPtrs example");
253 createRequest(testModel, execution, outputs);
254 }
255 *result = execution->compute(computeMode);
256 }
257
createDeviceMemoryForInput(const Compilation & compilation,uint32_t index)258 ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForInput(const Compilation& compilation,
259 uint32_t index) {
260 ANeuralNetworksMemoryDesc* desc = nullptr;
261 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
262 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_addInputRole(
263 desc, compilation.getHandle(), index, 1.0f),
264 ANEURALNETWORKS_NO_ERROR);
265 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
266 ANeuralNetworksMemory* memory = nullptr;
267 mNnApi->getFL5()->ANeuralNetworksMemory_createFromDesc(desc, &memory);
268 mNnApi->getFL5()->ANeuralNetworksMemoryDesc_free(desc);
269 return memory;
270 }
271
createDeviceMemoryForOutput(const Compilation & compilation,uint32_t index)272 ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForOutput(const Compilation& compilation,
273 uint32_t index) {
274 ANeuralNetworksMemoryDesc* desc = nullptr;
275 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
276 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_addOutputRole(
277 desc, compilation.getHandle(), index, 1.0f),
278 ANEURALNETWORKS_NO_ERROR);
279 EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
280 ANeuralNetworksMemory* memory = nullptr;
281 mNnApi->getFL5()->ANeuralNetworksMemory_createFromDesc(desc, &memory);
282 mNnApi->getFL5()->ANeuralNetworksMemoryDesc_free(desc);
283 return memory;
284 }
285
286 // Set result = Result::NO_ERROR and outputs = {} if the test should be skipped.
computeWithDeviceMemories(const Compilation & compilation,const TestModel & testModel,Execution * execution,Execution::ComputeMode computeMode,Result * result,std::vector<TestBuffer> * outputs)287 void GeneratedTests::computeWithDeviceMemories(const Compilation& compilation,
288 const TestModel& testModel, Execution* execution,
289 Execution::ComputeMode computeMode, Result* result,
290 std::vector<TestBuffer>* outputs) {
291 ASSERT_NE(execution, nullptr);
292 ASSERT_NE(result, nullptr);
293 ASSERT_NE(outputs, nullptr);
294 outputs->clear();
295 std::vector<Memory> inputMemories, outputMemories;
296
297 {
298 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithDeviceMemories example");
299 // Model inputs.
300 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
301 SCOPED_TRACE("Input index: " + std::to_string(i));
302 const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
303 // Omitted input.
304 if (operand.data.size() == 0) {
305 ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
306 continue;
307 }
308
309 // Create device memory.
310 ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i);
311 ASSERT_NE(memory, nullptr);
312 auto& wrapperMemory = inputMemories.emplace_back(Memory(mNnApi.get(), memory));
313
314 // Copy data from TestBuffer to device memory.
315 auto ashmem = TestAshmem::createFrom(mNnApi.get(), operand.data);
316 ASSERT_NE(ashmem, nullptr);
317 ASSERT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(ashmem->get()->get(), memory),
318 ANEURALNETWORKS_NO_ERROR);
319 ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0));
320 }
321
322 // Model outputs.
323 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
324 SCOPED_TRACE("Output index: " + std::to_string(i));
325 ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i);
326 ASSERT_NE(memory, nullptr);
327 auto& wrapperMemory = outputMemories.emplace_back(Memory(mNnApi.get(), memory));
328 ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0));
329 }
330 }
331
332 *result = execution->compute(computeMode);
333
334 // Copy out output results.
335 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
336 SCOPED_TRACE("Output index: " + std::to_string(i));
337 const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
338 const size_t bufferSize = operand.data.size();
339 auto& output = outputs->emplace_back(bufferSize);
340
341 auto ashmem = TestAshmem::createFrom(mNnApi.get(), output);
342 ASSERT_NE(ashmem, nullptr);
343 ASSERT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(outputMemories[i].get(),
344 ashmem->get()->get()),
345 ANEURALNETWORKS_NO_ERROR);
346 std::copy(ashmem->dataAs<uint8_t>(), ashmem->dataAs<uint8_t>() + bufferSize,
347 output.getMutable<uint8_t>());
348 }
349 }
350
executeWithCompilation(const Compilation & compilation,const TestModel & testModel)351 void GeneratedTests::executeWithCompilation(const Compilation& compilation,
352 const TestModel& testModel) {
353 NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example");
354
355 Execution execution(mNnApi.get(), &compilation);
356 Result result;
357 std::vector<TestBuffer> outputs;
358
359 if (mTestDeviceMemory) {
360 computeWithDeviceMemories(compilation, testModel, &execution, mComputeMode, &result,
361 &outputs);
362 } else {
363 computeWithPtrs(testModel, &execution, mComputeMode, &result, &outputs);
364 }
365
366 if (result == Result::NO_ERROR && outputs.empty()) {
367 return;
368 }
369
370 {
371 NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example");
372 if (mExpectFailure) {
373 ASSERT_NE(result, Result::NO_ERROR);
374 return;
375 } else {
376 ASSERT_EQ(result, Result::NO_ERROR);
377 }
378
379 // Check output dimensions.
380 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
381 SCOPED_TRACE("Output index: " + std::to_string(i));
382 const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]];
383 if (output.isIgnored) continue;
384 std::vector<uint32_t> actualDimensions;
385 ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(i, &actualDimensions));
386 ASSERT_EQ(output.dimensions, actualDimensions);
387 }
388
389 checkResults(testModel, outputs);
390 }
391 }
392
executeOnce(const Model & model,const TestModel & testModel)393 void GeneratedTests::executeOnce(const Model& model, const TestModel& testModel) {
394 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
395 uint32_t numDevices = 0;
396 mNnApi->getFL5()->ANeuralNetworks_getDeviceCount(&numDevices);
397 bool modelSupported = false;
398 for (uint32_t i = 0; i < numDevices; ++i) {
399 ANeuralNetworksDevice* device = nullptr;
400 mNnApi->getFL5()->ANeuralNetworks_getDevice(i, &device);
401 const char* deviceName = nullptr;
402 mNnApi->getFL5()->ANeuralNetworksDevice_getName(device, &deviceName);
403 SCOPED_TRACE("Device = " + std::string(deviceName));
404 std::cout << "\nDevice = " << deviceName << std::endl;
405 if (!checkSupported(model, device)) {
406 std::cout << "\nModel not supported by device " << deviceName << ". Skipping"
407 << std::endl;
408 continue;
409 }
410 modelSupported = true;
411 std::cout << "\nModel supported" << std::endl;
412 std::optional<Compilation> compilation = compileModel(model, device);
413 // Early return if compilation fails. The compilation result code is
414 // checked in compileModel.
415 if (!compilation) return;
416 executeWithCompilation(compilation.value(), testModel);
417 std::cout << "\nExecution completed" << std::endl;
418 }
419 if (!modelSupported) {
420 std::cout << "\nModel not supported by any device\n"
421 << "SKIPPED" << std::endl;
422 }
423 }
424
executeMultithreadedOwnCompilation(const Model & model,const TestModel & testModel)425 void GeneratedTests::executeMultithreadedOwnCompilation(const Model& model,
426 const TestModel& testModel) {
427 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
428 SCOPED_TRACE("MultithreadedOwnCompilation");
429 std::cout << "\nMultithreadedOwnCompilation" << std::endl;
430 std::vector<std::thread> threads;
431 for (int i = 0; i < 10; i++) {
432 threads.push_back(std::thread([&]() { executeOnce(model, testModel); }));
433 }
434 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
435 }
436
executeMultithreadedSharedCompilation(const Model & model,const TestModel & testModel)437 void GeneratedTests::executeMultithreadedSharedCompilation(const Model& model,
438 const TestModel& testModel) {
439 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
440 SCOPED_TRACE("MultithreadedSharedCompilation");
441 std::cout << "\nMultithreadedSharedCompilation" << std::endl;
442 uint32_t numDevices = 0;
443 mNnApi->getFL5()->ANeuralNetworks_getDeviceCount(&numDevices);
444 bool modelSupported = false;
445 for (uint32_t i = 0; i < numDevices; ++i) {
446 ANeuralNetworksDevice* device = nullptr;
447 mNnApi->getFL5()->ANeuralNetworks_getDevice(i, &device);
448 const char* deviceName = nullptr;
449 mNnApi->getFL5()->ANeuralNetworksDevice_getName(device, &deviceName);
450 SCOPED_TRACE("Device = " + std::string(deviceName));
451 std::cout << "\nDevice = " << deviceName << std::endl;
452 if (!checkSupported(model, device)) {
453 std::cout << "\nModel not supported by device " << deviceName << ". Skipping"
454 << std::endl;
455 continue;
456 }
457 modelSupported = true;
458 std::cout << "\nModel supported" << std::endl;
459 std::optional<Compilation> compilation = compileModel(model, device);
460 // Early return if compilation fails. The ompilation result code is
461 // checked in compileModel.
462 if (!compilation) return;
463 std::vector<std::thread> threads;
464 for (int i = 0; i < 10; i++) {
465 threads.push_back(
466 std::thread([&]() { executeWithCompilation(compilation.value(), testModel); }));
467 }
468 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
469 std::cout << "\nExecution completed" << std::endl;
470 }
471 if (!modelSupported) {
472 std::cout << "\nModel not supported by any device\n"
473 << "SKIPPED" << std::endl;
474 }
475 }
476
477 // Test driver for those generated from ml/nn/runtime/test/spec
execute(const TestModel & testModel)478 void GeneratedTests::execute(const TestModel& testModel) {
479 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
480 GeneratedModel model(mNnApi.get());
481 createModel(mNnApi.get(), testModel, mTestDynamicOutputShape, &model);
482 if (testModel.expectFailure && !model.isValid()) {
483 return;
484 }
485 ASSERT_EQ(model.finish(), Result::NO_ERROR);
486 ASSERT_TRUE(model.isValid());
487 auto executeInternal = [&testModel, &model, this]() {
488 SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
489 std::cout << "\nCompilationCaching = " << std::boolalpha << mTestCompilationCaching
490 << std::endl;
491 #ifndef NNTEST_MULTITHREADED
492 executeOnce(model, testModel);
493 #else // defined(NNTEST_MULTITHREADED)
494 executeMultithreadedOwnCompilation(model, testModel);
495 executeMultithreadedSharedCompilation(model, testModel);
496 #endif // !defined(NNTEST_MULTITHREADED)
497 };
498 mTestCompilationCaching = false;
499 executeInternal();
500 if (!mExpectFailure) {
501 mTestCompilationCaching = true;
502 executeInternal();
503 }
504 }
505
shouldSkipTest()506 bool GeneratedTests::shouldSkipTest() {
507 // A map of {min VNDK version -> tests that should be skipped with earlier VNDK versions}.
508 // The listed tests are added in a later release, but exercising old APIs. They should be
509 // skipped if the device has a mixed build of system and vendor partitions.
510 static const std::map<int, std::set<std::string>> kMapOfMinVndkVersionToTests = {
511 {
512 __ANDROID_API_R__,
513 {
514 "add_broadcast_quant8_all_inputs_as_internal",
515 },
516 },
517 };
518 for (const auto& [minVersion, names] : kMapOfMinVndkVersionToTests) {
519 if (mVndkVersion < minVersion && names.count(kTestName) > 0) {
520 return true;
521 }
522 }
523 return false;
524 }
525
SetUp()526 void GeneratedTests::SetUp() {
527 const char* libdir = dirname(SUPPORT_LIBRARY_NAME.c_str());
528 setenv(kQCDspLoadPathEnv, libdir, 1);
529 LOG(INFO) << "Overwritten system env variable " << kQCDspLoadPathEnv << " with " << libdir;
530 mNnApi = loadNnApiSupportLibrary(SUPPORT_LIBRARY_NAME);
531
532 GeneratedTestBase::SetUp();
533
534 mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
535 if (shouldSkipTest()) {
536 GTEST_SKIP();
537 return;
538 }
539
540 char cacheDirTemp[] = NN_TMP_DIR "/TestCompilationCachingXXXXXX";
541 char* cacheDir = mkdtemp(cacheDirTemp);
542 ASSERT_NE(cacheDir, nullptr);
543 mCacheDir = cacheDir;
544 mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
545 }
546
TearDown()547 void GeneratedTests::TearDown() {
548 mNnApi.reset(nullptr);
549
550 if (!::testing::Test::HasFailure()) {
551 // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
552 // Remove the cache directory specified by path recursively.
553 auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
554 return remove(child);
555 };
556 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
557 }
558 GeneratedTestBase::TearDown();
559 }
560
561 #ifdef NNTEST_COMPUTE_MODE
TEST_P(GeneratedTests,Sync)562 TEST_P(GeneratedTests, Sync) {
563 std::cout << "\nComputeMode = SYNC" << std::endl;
564 mComputeMode = Execution::ComputeMode::SYNC;
565 execute(testModel);
566 }
567
TEST_P(GeneratedTests,Burst)568 TEST_P(GeneratedTests, Burst) {
569 std::cout << "\nComputeMode = BURST" << std::endl;
570 mComputeMode = Execution::ComputeMode::BURST;
571 execute(testModel);
572 }
573 #else
TEST_P(GeneratedTests,Test)574 TEST_P(GeneratedTests, Test) {
575 execute(testModel);
576 }
577 #endif
578
TEST_P(DynamicOutputShapeTest,Test)579 TEST_P(DynamicOutputShapeTest, Test) {
580 execute(testModel);
581 }
582
TEST_P(GeneratedValidationTests,Test)583 TEST_P(GeneratedValidationTests, Test) {
584 execute(testModel);
585 }
586
TEST_P(QuantizationCouplingTest,Test)587 TEST_P(QuantizationCouplingTest, Test) {
588 execute(convertQuant8AsymmOperandsToSigned(testModel));
589 }
590
TEST_P(DeviceMemoryTest,Test)591 TEST_P(DeviceMemoryTest, Test) {
592 execute(testModel);
593 }
594
TEST_P(FencedComputeTest,Test)595 TEST_P(FencedComputeTest, Test) {
596 mComputeMode = Execution::ComputeMode::FENCED;
597 execute(testModel);
598 }
599
600 INSTANTIATE_GENERATED_TEST(GeneratedTests,
__anonae77eac80a02(const TestModel& testModel) 601 [](const TestModel& testModel) { return !testModel.expectFailure; });
602
__anonae77eac80b02(const TestModel& testModel) 603 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
604 return !testModel.expectFailure && !testModel.hasScalarOutputs();
605 });
606
__anonae77eac80c02(const TestModel& testModel) 607 INSTANTIATE_GENERATED_TEST(GeneratedValidationTests, [](const TestModel& testModel) {
608 return testModel.expectFailure && !testModel.isInfiniteLoopTimeoutTest();
609 });
610
__anonae77eac80d02(const TestModel& testModel) 611 INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
612 return !testModel.expectFailure && testModel.main.operations.size() == 1 &&
613 testModel.referenced.size() == 0 && testModel.hasQuant8CoupledOperands();
614 });
615
__anonae77eac80e02(const TestModel& testModel) 616 INSTANTIATE_GENERATED_TEST(DeviceMemoryTest, [](const TestModel& testModel) {
617 return !testModel.expectFailure &&
618 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
619 [&testModel](uint32_t index) {
620 return testModel.main.operands[index].data.size() > 0;
621 });
622 });
623
__anonae77eac81002(const TestModel& testModel) 624 INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
625 return !testModel.expectFailure &&
626 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
627 [&testModel](uint32_t index) {
628 return testModel.main.operands[index].data.size() > 0;
629 });
630 });
631
632 } // namespace android::nn::generated_tests
633