• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <ftw.h>
20 #include <gtest/gtest.h>
21 #include <unistd.h>
22 
23 #include <algorithm>
24 #include <cassert>
25 #include <cmath>
26 #include <fstream>
27 #include <iostream>
28 #include <map>
29 #include <memory>
30 #include <set>
31 #include <string>
32 #include <thread>
33 #include <utility>
34 #include <vector>
35 
36 #include "AndroidVersionUtil.h"
37 #include "GeneratedTestUtils.h"
38 #include "NeuralNetworks.h"
39 #include "NeuralNetworksTypes.h"
40 #include "TestHarness.h"
41 #include "TestNeuralNetworksWrapper.h"
42 #include "TestUtils.h"
43 #include "TmpDirectoryUtils.h"
44 
45 // Systrace is not available from CTS tests due to platform layering
46 // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
47 // the case for CTS (public APIs only).
48 #ifndef NNTEST_ONLY_PUBLIC_API
49 #include <Tracing.h>
50 #else
51 #define NNTRACE_FULL_RAW(...)
52 #define NNTRACE_APP(...)
53 #define NNTRACE_APP_SWITCH(...)
54 #endif
55 
56 #ifdef NNTEST_CTS
57 #define NNTEST_COMPUTE_MODE
58 #endif
59 
60 namespace android::nn::generated_tests {
61 using namespace test_wrapper;
62 using namespace test_helper;
63 
64 class GeneratedTests : public GeneratedTestBase {
65    protected:
66     void SetUp() override;
67     void TearDown() override;
68 
69     bool shouldSkipTest();
70 
71     std::optional<Compilation> compileModel(const Model& model);
72     void executeInternal(const Compilation& compilation, const TestModel& testModel,
73                          bool testReusableExecution);
74     void executeWithCompilation(const Compilation& compilation, const TestModel& testModel);
75     void executeOnce(const Model& model, const TestModel& testModel);
76     void executeMultithreadedOwnCompilation(const Model& model, const TestModel& testModel);
77     void executeMultithreadedSharedCompilation(const Model& model, const TestModel& testModel);
78     // Test driver for those generated from ml/nn/runtime/test/spec
79     void execute(const TestModel& testModel);
80 
81     // VNDK version of the device under test.
82     static int mVndkVersion;
83 
84     std::string mCacheDir;
85     std::vector<uint8_t> mToken;
86     bool mTestCompilationCaching = false;
87     bool mTestDynamicOutputShape = false;
88     bool mExpectFailure = false;
89     bool mTestQuantizationCoupling = false;
90     bool mTestDeviceMemory = false;
91     bool mTestReusableExecution = true;
92     Execution::ComputeMode mComputeMode = Execution::getComputeMode();
93 };
94 
95 int GeneratedTests::mVndkVersion = __ANDROID_API_FUTURE__;
96 
97 // Tag for the dynamic output shape tests
98 class DynamicOutputShapeTest : public GeneratedTests {
99    protected:
DynamicOutputShapeTest()100     DynamicOutputShapeTest() { mTestDynamicOutputShape = true; }
101 };
102 
103 // Tag for the fenced execute tests
104 class FencedComputeTest : public GeneratedTests {};
105 
106 // Tag for the generated validation tests
107 class GeneratedValidationTests : public GeneratedTests {
108    protected:
GeneratedValidationTests()109     GeneratedValidationTests() { mExpectFailure = true; }
110 };
111 
112 class QuantizationCouplingTest : public GeneratedTests {
113    protected:
QuantizationCouplingTest()114     QuantizationCouplingTest() {
115         mTestQuantizationCoupling = true;
116         // QuantizationCouplingTest is intended for verifying if a driver supports ASYMM quant8, it
117         // must support SYMM quant8. All the models in QuantizationCouplingTest will also be
118         // executed in other test suites, so there is no need to test reusable execution again.
119         mTestReusableExecution = false;
120     }
121 };
122 
123 class DeviceMemoryTest : public GeneratedTests {
124    protected:
DeviceMemoryTest()125     DeviceMemoryTest() { mTestDeviceMemory = true; }
126 };
127 
compileModel(const Model & model)128 std::optional<Compilation> GeneratedTests::compileModel(const Model& model) {
129     NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
130     if (mTestCompilationCaching) {
131         // Compile the model twice with the same token, so that compilation caching will be
132         // exercised if supported by the driver.
133         // No invalid model will be passed to this branch.
134         EXPECT_FALSE(mExpectFailure);
135         Compilation compilation1(&model);
136         EXPECT_EQ(compilation1.setCaching(mCacheDir, mToken), Result::NO_ERROR);
137         EXPECT_EQ(compilation1.finish(), Result::NO_ERROR);
138         Compilation compilation2(&model);
139         EXPECT_EQ(compilation2.setCaching(mCacheDir, mToken), Result::NO_ERROR);
140         EXPECT_EQ(compilation2.finish(), Result::NO_ERROR);
141         return compilation2;
142     } else {
143         Compilation compilation(&model);
144         Result result = compilation.finish();
145 
146         // For valid model, we check the compilation result == NO_ERROR.
147         // For invalid model, the driver may fail at compilation or execution, so any result code is
148         // permitted at this point.
149         if (mExpectFailure && result != Result::NO_ERROR) return std::nullopt;
150         EXPECT_EQ(result, Result::NO_ERROR);
151         return compilation;
152     }
153 }
154 
createDeviceMemoryForInput(const Compilation & compilation,uint32_t index)155 static ANeuralNetworksMemory* createDeviceMemoryForInput(const Compilation& compilation,
156                                                          uint32_t index) {
157     ANeuralNetworksMemoryDesc* desc = nullptr;
158     EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
159     EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, compilation.getHandle(), index, 1.0f),
160               ANEURALNETWORKS_NO_ERROR);
161     EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
162     ANeuralNetworksMemory* memory = nullptr;
163     EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
164     ANeuralNetworksMemoryDesc_free(desc);
165     return memory;
166 }
167 
createDeviceMemoryForOutput(const Compilation & compilation,uint32_t index)168 static ANeuralNetworksMemory* createDeviceMemoryForOutput(const Compilation& compilation,
169                                                           uint32_t index) {
170     ANeuralNetworksMemoryDesc* desc = nullptr;
171     EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
172     EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, compilation.getHandle(), index, 1.0f),
173               ANEURALNETWORKS_NO_ERROR);
174     EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
175     ANeuralNetworksMemory* memory = nullptr;
176     EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
177     ANeuralNetworksMemoryDesc_free(desc);
178     return memory;
179 }
180 
createRequestWithDeviceMemories(const Compilation & compilation,const TestModel & testModel,Execution * execution,std::vector<Memory> * inputMemories,std::vector<Memory> * outputMemories)181 static void createRequestWithDeviceMemories(const Compilation& compilation,
182                                             const TestModel& testModel, Execution* execution,
183                                             std::vector<Memory>* inputMemories,
184                                             std::vector<Memory>* outputMemories) {
185     ASSERT_NE(execution, nullptr);
186     ASSERT_NE(inputMemories, nullptr);
187     ASSERT_NE(outputMemories, nullptr);
188 
189     // Model inputs.
190     for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
191         SCOPED_TRACE("Input index: " + std::to_string(i));
192         const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
193         // Omitted input.
194         if (operand.data.size() == 0) {
195             ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
196             continue;
197         }
198 
199         // Create device memory.
200         ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i);
201         ASSERT_NE(memory, nullptr);
202         auto& wrapperMemory = inputMemories->emplace_back(memory);
203 
204         // Copy data from TestBuffer to device memory.
205         auto ashmem = TestAshmem::createFrom(operand.data);
206         ASSERT_NE(ashmem, nullptr);
207         ASSERT_EQ(ANeuralNetworksMemory_copy(ashmem->get()->get(), memory),
208                   ANEURALNETWORKS_NO_ERROR);
209         ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0));
210     }
211 
212     // Model outputs.
213     for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
214         SCOPED_TRACE("Output index: " + std::to_string(i));
215         ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i);
216         ASSERT_NE(memory, nullptr);
217         auto& wrapperMemory = outputMemories->emplace_back(memory);
218         ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0));
219     }
220 }
221 
copyResultsFromDeviceMemories(const TestModel & testModel,const std::vector<Memory> & outputMemories,std::vector<TestBuffer> * outputs)222 static void copyResultsFromDeviceMemories(const TestModel& testModel,
223                                           const std::vector<Memory>& outputMemories,
224                                           std::vector<TestBuffer>* outputs) {
225     ASSERT_NE(outputs, nullptr);
226     ASSERT_EQ(testModel.main.outputIndexes.size(), outputMemories.size());
227     outputs->clear();
228 
229     // Copy out output results.
230     for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
231         SCOPED_TRACE("Output index: " + std::to_string(i));
232         const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
233         const size_t bufferSize = operand.data.size();
234         auto& output = outputs->emplace_back(bufferSize);
235 
236         auto ashmem = TestAshmem::createFrom(output);
237         ASSERT_NE(ashmem, nullptr);
238         ASSERT_EQ(ANeuralNetworksMemory_copy(outputMemories[i].get(), ashmem->get()->get()),
239                   ANEURALNETWORKS_NO_ERROR);
240         std::copy(ashmem->dataAs<uint8_t>(), ashmem->dataAs<uint8_t>() + bufferSize,
241                   output.getMutable<uint8_t>());
242     }
243 }
244 
executeInternal(const Compilation & compilation,const TestModel & testModel,bool testReusableExecution)245 void GeneratedTests::executeInternal(const Compilation& compilation, const TestModel& testModel,
246                                      bool testReusableExecution) {
247     NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeInternal example");
248 
249     Execution execution(&compilation);
250     if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
251         execution.setReusable(testReusableExecution);
252     }
253 
254     std::vector<TestBuffer> outputs;
255     std::vector<Memory> inputMemories, outputMemories;
256 
257     if (mTestDeviceMemory) {
258         createRequestWithDeviceMemories(compilation, testModel, &execution, &inputMemories,
259                                         &outputMemories);
260     } else {
261         createRequest(testModel, &execution, &outputs);
262     }
263 
264     const auto computeAndCheckResults = [this, &testModel, &execution, &outputs, &outputMemories] {
265         Result result = execution.compute(mComputeMode);
266         if (mTestDeviceMemory) {
267             copyResultsFromDeviceMemories(testModel, outputMemories, &outputs);
268         }
269 
270         if (result == Result::NO_ERROR && outputs.empty()) {
271             return;
272         }
273 
274         {
275             NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeInternal example");
276             if (mExpectFailure) {
277                 ASSERT_NE(result, Result::NO_ERROR);
278                 return;
279             } else {
280                 ASSERT_EQ(result, Result::NO_ERROR);
281             }
282 
283             // Check output dimensions.
284             for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
285                 SCOPED_TRACE("Output index: " + std::to_string(i));
286                 const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]];
287                 if (output.isIgnored) continue;
288                 std::vector<uint32_t> actualDimensions;
289                 ASSERT_EQ(Result::NO_ERROR,
290                           execution.getOutputOperandDimensions(i, &actualDimensions));
291                 ASSERT_EQ(output.dimensions, actualDimensions);
292             }
293 
294             checkResults(testModel, outputs);
295         }
296     };
297 
298     computeAndCheckResults();
299     if (testReusableExecution) {
300         computeAndCheckResults();
301     }
302 }
303 
executeWithCompilation(const Compilation & compilation,const TestModel & testModel)304 void GeneratedTests::executeWithCompilation(const Compilation& compilation,
305                                             const TestModel& testModel) {
306     // Single-time and reusable executions have different code paths, so test both.
307     executeInternal(compilation, testModel, /*testReusableExecution=*/false);
308     if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
309         if (mTestReusableExecution) {
310             executeInternal(compilation, testModel, /*testReusableExecution=*/true);
311         }
312     }
313 }
314 
isPowerOfTwo(uint32_t x)315 static bool isPowerOfTwo(uint32_t x) {
316     return x > 0 && ((x & (x - 1)) == 0);
317 }
318 
validateCompilationMemoryPreferences(const Compilation & compilation,const TestModel & testModel)319 static void validateCompilationMemoryPreferences(const Compilation& compilation,
320                                                  const TestModel& testModel) {
321     if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
322         for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
323             SCOPED_TRACE("Input index: " + std::to_string(i));
324             uint32_t alignment = 0, padding = 0;
325             ASSERT_EQ(compilation.getPreferredMemoryAlignmentForInput(i, &alignment),
326                       Result::NO_ERROR);
327             ASSERT_EQ(compilation.getPreferredMemoryPaddingForInput(i, &padding), Result::NO_ERROR);
328             EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment;
329             EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding;
330         }
331         for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
332             SCOPED_TRACE("Output index: " + std::to_string(i));
333             uint32_t alignment = 0, padding = 0;
334             ASSERT_EQ(compilation.getPreferredMemoryAlignmentForOutput(i, &alignment),
335                       Result::NO_ERROR);
336             ASSERT_EQ(compilation.getPreferredMemoryPaddingForOutput(i, &padding),
337                       Result::NO_ERROR);
338             EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment;
339             EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding;
340         }
341     }
342 }
343 
executeOnce(const Model & model,const TestModel & testModel)344 void GeneratedTests::executeOnce(const Model& model, const TestModel& testModel) {
345     NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
346     std::optional<Compilation> compilation = compileModel(model);
347     // Early return if compilation fails. The compilation result code is checked in compileModel.
348     if (!compilation) return;
349     validateCompilationMemoryPreferences(compilation.value(), testModel);
350     executeWithCompilation(compilation.value(), testModel);
351 }
352 
executeMultithreadedOwnCompilation(const Model & model,const TestModel & testModel)353 void GeneratedTests::executeMultithreadedOwnCompilation(const Model& model,
354                                                         const TestModel& testModel) {
355     NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
356     SCOPED_TRACE("MultithreadedOwnCompilation");
357     std::vector<std::thread> threads;
358     for (int i = 0; i < 10; i++) {
359         threads.push_back(std::thread([&]() { executeOnce(model, testModel); }));
360     }
361     std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
362 }
363 
executeMultithreadedSharedCompilation(const Model & model,const TestModel & testModel)364 void GeneratedTests::executeMultithreadedSharedCompilation(const Model& model,
365                                                            const TestModel& testModel) {
366     NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
367     SCOPED_TRACE("MultithreadedSharedCompilation");
368     std::optional<Compilation> compilation = compileModel(model);
369     // Early return if compilation fails. The ompilation result code is checked in compileModel.
370     if (!compilation) return;
371     std::vector<std::thread> threads;
372     for (int i = 0; i < 10; i++) {
373         threads.push_back(
374                 std::thread([&]() { executeWithCompilation(compilation.value(), testModel); }));
375     }
376     std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
377 }
378 
379 // Test driver for those generated from ml/nn/runtime/test/spec
execute(const TestModel & testModel)380 void GeneratedTests::execute(const TestModel& testModel) {
381     NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
382     GeneratedModel model;
383     createModel(testModel, mTestDynamicOutputShape, &model);
384     if (testModel.expectFailure && !model.isValid()) {
385         return;
386     }
387     ASSERT_EQ(model.finish(), Result::NO_ERROR);
388     ASSERT_TRUE(model.isValid());
389     auto executeInternal = [&testModel, &model, this]() {
390         SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
391 #ifndef NNTEST_MULTITHREADED
392         executeOnce(model, testModel);
393 #else   // defined(NNTEST_MULTITHREADED)
394         executeMultithreadedOwnCompilation(model, testModel);
395         executeMultithreadedSharedCompilation(model, testModel);
396 #endif  // !defined(NNTEST_MULTITHREADED)
397     };
398     mTestCompilationCaching = false;
399     executeInternal();
400     if (!mExpectFailure) {
401         mTestCompilationCaching = true;
402         executeInternal();
403     }
404 }
405 
getRuntimeFeatureLevel()406 static int64_t getRuntimeFeatureLevel() {
407     if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
408         return ANeuralNetworks_getRuntimeFeatureLevel();
409     }
410 #if defined(__BIONIC__)
411     return android_get_device_api_level();
412 #else
413     return __ANDROID_API__;
414 #endif  // __BIONIC__
415 }
416 
halVersionToFeatureLevel(TestHalVersion halVersion)417 static std::optional<int64_t> halVersionToFeatureLevel(TestHalVersion halVersion) {
418     switch (halVersion) {
419         case TestHalVersion::UNKNOWN:
420             return std::nullopt;
421         case TestHalVersion::V1_0:
422             return ANEURALNETWORKS_FEATURE_LEVEL_1;
423         case TestHalVersion::V1_1:
424             return ANEURALNETWORKS_FEATURE_LEVEL_2;
425         case TestHalVersion::V1_2:
426             return ANEURALNETWORKS_FEATURE_LEVEL_3;
427         case TestHalVersion::V1_3:
428             return ANEURALNETWORKS_FEATURE_LEVEL_4;
429         case TestHalVersion::AIDL_V1:
430             return ANEURALNETWORKS_FEATURE_LEVEL_5;
431         case TestHalVersion::AIDL_V2:
432             return ANEURALNETWORKS_FEATURE_LEVEL_6;
433         case TestHalVersion::AIDL_V3:
434             return ANEURALNETWORKS_FEATURE_LEVEL_7;
435     }
436     LOG(FATAL) << "Unrecognized TestHalVersion "
437                << static_cast<std::underlying_type_t<TestHalVersion>>(halVersion);
438     return std::nullopt;
439 }
440 
shouldSkipTest()441 bool GeneratedTests::shouldSkipTest() {
442     // A map of {min VNDK version -> tests that should be skipped with earlier VNDK versions}.
443     // The listed tests are added in a later release, but exercising old APIs. They should be
444     // skipped if the device has a mixed build of system and vendor partitions.
445     static const std::map<int, std::set<std::string>> kMapOfMinVndkVersionToTests = {
446             {
447                     __ANDROID_API_R__,
448                     {
449                             "add_broadcast_quant8_all_inputs_as_internal",
450                     },
451             },
452     };
453     for (const auto& [minVersion, names] : kMapOfMinVndkVersionToTests) {
454         if (mVndkVersion < minVersion && names.count(kTestName) > 0) {
455             return true;
456         }
457     }
458 
459     // Skip test cases that are newer than what is allowed by
460     // ANeuralNetworks_getRuntimeFeatureLevel.
461     if (const auto featureLevelNeeded = halVersionToFeatureLevel(testModel.minSupportedVersion)) {
462         return featureLevelNeeded.value() > getRuntimeFeatureLevel();
463     }
464 
465     return false;
466 }
467 
SetUp()468 void GeneratedTests::SetUp() {
469     GeneratedTestBase::SetUp();
470 
471     mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
472     if (shouldSkipTest()) {
473         GTEST_SKIP();
474         return;
475     }
476 
477     char cacheDirTemp[] = NN_TMP_DIR "/TestCompilationCachingXXXXXX";
478     char* cacheDir = mkdtemp(cacheDirTemp);
479     ASSERT_NE(cacheDir, nullptr);
480     mCacheDir = cacheDir;
481     mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
482 }
483 
TearDown()484 void GeneratedTests::TearDown() {
485     if (!::testing::Test::HasFailure()) {
486         // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
487         // Remove the cache directory specified by path recursively.
488         auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
489             return remove(child);
490         };
491         nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
492     }
493     GeneratedTestBase::TearDown();
494 }
495 
496 #ifdef NNTEST_COMPUTE_MODE
TEST_P(GeneratedTests,Sync)497 TEST_P(GeneratedTests, Sync) {
498     mComputeMode = Execution::ComputeMode::SYNC;
499     execute(testModel);
500 }
501 
TEST_P(GeneratedTests,Async)502 TEST_P(GeneratedTests, Async) {
503     mComputeMode = Execution::ComputeMode::ASYNC;
504     execute(testModel);
505 }
506 
TEST_P(GeneratedTests,Burst)507 TEST_P(GeneratedTests, Burst) {
508     mComputeMode = Execution::ComputeMode::BURST;
509     execute(testModel);
510 }
511 #else
TEST_P(GeneratedTests,Test)512 TEST_P(GeneratedTests, Test) {
513     execute(testModel);
514 }
515 #endif
516 
TEST_P(DynamicOutputShapeTest,Test)517 TEST_P(DynamicOutputShapeTest, Test) {
518     execute(testModel);
519 }
520 
TEST_P(GeneratedValidationTests,Test)521 TEST_P(GeneratedValidationTests, Test) {
522     execute(testModel);
523 }
524 
TEST_P(QuantizationCouplingTest,Test)525 TEST_P(QuantizationCouplingTest, Test) {
526     execute(convertQuant8AsymmOperandsToSigned(testModel));
527 }
528 
TEST_P(DeviceMemoryTest,Test)529 TEST_P(DeviceMemoryTest, Test) {
530     execute(testModel);
531 }
532 
TEST_P(FencedComputeTest,Test)533 TEST_P(FencedComputeTest, Test) {
534     mComputeMode = Execution::ComputeMode::FENCED;
535     execute(testModel);
536 }
537 
538 INSTANTIATE_GENERATED_TEST(GeneratedTests,
__anon3fdb12960802(const TestModel& testModel) 539                            [](const TestModel& testModel) { return !testModel.expectFailure; });
540 
__anon3fdb12960902(const TestModel& testModel) 541 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
542     return !testModel.expectFailure && !testModel.hasScalarOutputs();
543 });
544 
__anon3fdb12960a02(const TestModel& testModel) 545 INSTANTIATE_GENERATED_TEST(GeneratedValidationTests, [](const TestModel& testModel) {
546     return testModel.expectFailure && !testModel.isInfiniteLoopTimeoutTest();
547 });
548 
__anon3fdb12960b02(const TestModel& testModel) 549 INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
550     return !testModel.expectFailure && testModel.main.operations.size() == 1 &&
551            testModel.referenced.size() == 0 && testModel.hasQuant8CoupledOperands();
552 });
553 
__anon3fdb12960c02(const TestModel& testModel) 554 INSTANTIATE_GENERATED_TEST(DeviceMemoryTest, [](const TestModel& testModel) {
555     return !testModel.expectFailure &&
556            std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
557                        [&testModel](uint32_t index) {
558                            return testModel.main.operands[index].data.size() > 0;
559                        });
560 });
561 
__anon3fdb12960e02(const TestModel& testModel) 562 INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
563     return !testModel.expectFailure &&
564            std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
565                        [&testModel](uint32_t index) {
566                            return testModel.main.operands[index].data.size() > 0;
567                        });
568 });
569 
570 }  // namespace android::nn::generated_tests
571