1 /* 2 * Copyright (C) 2019 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include <android-base/properties.h> 18 #include <gtest/gtest.h> 19 20 #include <algorithm> 21 #include <map> 22 #include <memory> 23 #include <set> 24 #include <string> 25 #include <utility> 26 27 #include "GeneratedTestUtils.h" 28 #include "TestHarness.h" 29 #include "TestNeuralNetworksWrapper.h" 30 #include "fuzzing/OperationManager.h" 31 #include "fuzzing/RandomGraphGenerator.h" 32 #include "fuzzing/RandomGraphGeneratorUtils.h" 33 34 #ifndef NNTEST_CTS 35 #include <HalInterfaces.h> 36 #include <SampleDriverFull.h> 37 #include <memunreachable/memunreachable.h> 38 39 #include <vector> 40 41 #include "HalUtils.h" 42 #include "Manager.h" 43 44 using android::nn::sample_driver::SampleDriverFull; 45 46 #endif 47 48 namespace android { 49 namespace nn { 50 namespace fuzzing_test { 51 52 using namespace test_helper; 53 using test_wrapper::Result; 54 constexpr char kRefDeviceName[] = "nnapi-reference"; 55 56 #ifndef NNTEST_CTS 57 class TestDriverV1_2 : public SampleDriverFull { 58 public: TestDriverV1_2()59 TestDriverV1_2() : SampleDriverFull(name, {.execTime = 0.9f, .powerUsage = 0.9f}) {} 60 static constexpr char name[] = "TestDriverV1_2"; 61 }; 62 63 // Like SampleDriverFull, but implementing 1.1 64 class TestDriverV1_1 : public V1_1::IDevice { 65 public: TestDriverV1_1()66 TestDriverV1_1() 67 : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.8f, .powerUsage = 0.8f})) {} 68 static constexpr char name[] = "TestDriverV1_1"; getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb)69 hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { 70 return mDriverV1_2->getCapabilities_1_1(_hidl_cb); 71 } getSupportedOperations_1_1(const V1_1::Model & model,getSupportedOperations_1_1_cb _hidl_cb)72 hardware::Return<void> getSupportedOperations_1_1( 73 const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override { 74 return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb); 75 } prepareModel_1_1(const V1_1::Model & model,V1_1::ExecutionPreference preference,const sp<V1_0::IPreparedModelCallback> & actualCallback)76 hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( 77 const V1_1::Model& model, V1_1::ExecutionPreference preference, 78 const sp<V1_0::IPreparedModelCallback>& actualCallback) override { 79 return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback); 80 } getStatus()81 hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); } getCapabilities(getCapabilities_cb _hidl_cb)82 hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { 83 return mDriverV1_2->getCapabilities(_hidl_cb); 84 } getSupportedOperations(const V1_0::Model & model,getSupportedOperations_cb _hidl_cb)85 hardware::Return<void> getSupportedOperations(const V1_0::Model& model, 86 getSupportedOperations_cb _hidl_cb) override { 87 return mDriverV1_2->getSupportedOperations(model, _hidl_cb); 88 } prepareModel(const V1_0::Model & model,const sp<V1_0::IPreparedModelCallback> & actualCallback)89 hardware::Return<V1_0::ErrorStatus> prepareModel( 90 const V1_0::Model& model, 91 const sp<V1_0::IPreparedModelCallback>& actualCallback) override { 92 return mDriverV1_2->prepareModel(model, actualCallback); 93 } 94 95 private: 96 const sp<V1_2::IDevice> mDriverV1_2; 97 }; 98 99 // Like SampleDriverFull, but implementing 1.0 100 class TestDriverV1_0 : public V1_0::IDevice { 101 public: TestDriverV1_0()102 TestDriverV1_0() 103 : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.7f, .powerUsage = 0.7f})) {} 104 static constexpr char name[] = "TestDriverV1_0"; getCapabilities(getCapabilities_cb _hidl_cb)105 hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { 106 return mDriverV1_2->getCapabilities(_hidl_cb); 107 } getSupportedOperations(const V1_0::Model & model,getSupportedOperations_cb _hidl_cb)108 hardware::Return<void> getSupportedOperations(const V1_0::Model& model, 109 getSupportedOperations_cb _hidl_cb) override { 110 return mDriverV1_2->getSupportedOperations(model, _hidl_cb); 111 } prepareModel(const V1_0::Model & model,const sp<V1_0::IPreparedModelCallback> & actualCallback)112 hardware::Return<V1_0::ErrorStatus> prepareModel( 113 const V1_0::Model& model, 114 const sp<V1_0::IPreparedModelCallback>& actualCallback) override { 115 return mDriverV1_2->prepareModel(model, actualCallback); 116 } getStatus()117 hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); } 118 119 private: 120 const sp<V1_2::IDevice> mDriverV1_2; 121 }; 122 123 #endif 124 125 // NN API fuzzer logging setting comes from system property debug.nn.fuzzer.log and 126 // debug.nn.fuzzer.dumpspec. 127 // * setprop debug.nn.fuzzer.log 1 : enable logging. 128 // * setprop debug.nn.fuzzer.log 0 : silence logging. 129 // * setprop debug.nn.fuzzer.dumpspec 1 : dump the randomly generated graph to a spec file. 130 // * setprop debug.nn.fuzzer.dumpspec 0 : do not dump the graph. 131 // 132 // Logs and spec files are dumped to /data/local/tmp/${testname}.{log,mod.py}, 133 // e.g. for test case TestRandomGraph/RandomGraphTest/Large/0, 134 // log : /data/local/tmp/TestRandomGraph_RandomGraphTest_Large_0.log 135 // spec: /data/local/tmp/TestRandomGraph_RandomGraphTest_Large_0.mod.py 136 // 137 class RandomGraphTest : public ::testing::TestWithParam<uint32_t> { 138 public: SetUpTestCase()139 static void SetUpTestCase() { 140 #ifndef NNTEST_CTS 141 mEnableLog = ::android::base::GetProperty("debug.nn.fuzzer.log", "") == "1"; 142 mDumpSpec = ::android::base::GetProperty("debug.nn.fuzzer.dumpspec", "") == "1"; 143 mDetectMemoryLeak = ::android::base::GetProperty("debug.nn.fuzzer.detectleak", "") == "1"; 144 145 mStandardDevices = DeviceManager::get()->forTest_getDevices(); 146 mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice( 147 makeSharedDevice(TestDriverV1_2::name, new TestDriverV1_2))); 148 mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice( 149 makeSharedDevice(TestDriverV1_1::name, new TestDriverV1_1))); 150 mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice( 151 makeSharedDevice(TestDriverV1_0::name, new TestDriverV1_0))); 152 #endif 153 mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__); 154 155 // Get all the devices and device names. 156 mStandardDevicesFeatureLevel = __ANDROID_API_FUTURE__; 157 uint32_t numDevices = 0; 158 ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR); 159 for (uint32_t i = 0; i < numDevices; i++) { 160 ANeuralNetworksDevice* device = nullptr; 161 const char* name = nullptr; 162 int64_t featureLevel; 163 ASSERT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR); 164 ASSERT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR); 165 ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel), 166 ANEURALNETWORKS_NO_ERROR); 167 mDevices.emplace(name, device); 168 mStandardDevicesFeatureLevel = std::min(mStandardDevicesFeatureLevel, featureLevel); 169 } 170 } 171 172 protected: SetUp()173 virtual void SetUp() override { 174 // Initialize logging. 175 const ::testing::TestInfo* const testInfo = 176 ::testing::UnitTest::GetInstance()->current_test_info(); 177 mTestName = mTestName + testInfo->test_case_name() + "_" + testInfo->name(); 178 std::replace(mTestName.begin(), mTestName.end(), '/', '_'); 179 if (mEnableLog) NN_FUZZER_LOG_INIT("/data/local/tmp/" + mTestName + ".log"); 180 } 181 TearDown()182 virtual void TearDown() override { 183 NN_FUZZER_LOG_CLOSE; 184 // Dump test results on failure for debugging. 185 if (::testing::Test::HasFailure() || mDumpSpec) { 186 dumpTestResults(); 187 } 188 #ifndef NNTEST_CTS 189 if (mDetectMemoryLeak) { 190 ASSERT_TRUE(NoLeaks()); 191 } 192 #endif 193 } 194 shouldSkipTest(int64_t featureLevel)195 bool shouldSkipTest(int64_t featureLevel) { 196 static const std::set<std::string> kDisabledTests = { 197 // In this test, the RGG produces a non-sensible graph with extreme large output 198 // gain and highly clamped output range. 199 // TODO: Currently quantized buffer values are uniformly distributed within 200 // [0, 255]. We should investigate on a better buffer value generation 201 // algorithm that represents the real-world cases. 202 "TestRandomGraph_SingleOperationTest_CONV_2D_V1_2_40", 203 "TestRandomGraph_SingleOperationTest_DEPTHWISE_CONV_2D_V1_0_32", 204 }; 205 if (kDisabledTests.find(mTestName) != kDisabledTests.end()) return true; 206 for (const auto& op : mTestModel.main.operations) { 207 // Skip if testing BATCH_TO_SPACE_ND with batch dimension == 1. 208 if (op.type == TestOperationType::BATCH_TO_SPACE_ND && 209 mTestModel.main.operands[op.inputs[0]].dimensions[0] == 1 && 210 featureLevel <= __ANDROID_API_Q__) { 211 return true; 212 } 213 // L2_NORMALIZATION on axis of all zeros is undefined before R. 214 if (op.type == TestOperationType::L2_NORMALIZATION && 215 featureLevel <= __ANDROID_API_Q__) { 216 return true; 217 } 218 // Skip the following operations for 1.2 and earlier devices. 219 if ((op.type == TestOperationType::ADD || op.type == TestOperationType::SUB || 220 op.type == TestOperationType::MAXIMUM || op.type == TestOperationType::MINIMUM || 221 op.type == TestOperationType::ROI_ALIGN) && 222 mTestModel.main.operands[op.inputs[0]].type == 223 TestOperandType::TENSOR_QUANT8_ASYMM && 224 featureLevel <= __ANDROID_API_Q__) { 225 return true; 226 } 227 // Skip the following operations when the VNDK version is earlier than R. 228 if (mVndkVersion < __ANDROID_API_R__ && 229 op.type == TestOperationType::HEATMAP_MAX_KEYPOINT) { 230 return true; 231 } 232 } 233 return false; 234 } 235 236 // Compute the golden output results of the test model on nnapi-reference. If possible, the 237 // golden results will be computed from an equivalent float32 model to avoid bias avoid bias 238 // from quantized CPU implementation. computeGoldenResults()239 void computeGoldenResults() { 240 SCOPED_TRACE("computeGoldenResults"); 241 242 // Convert the test model to an equivalent float32 model if possible. 243 auto fpModel = convertToFloat32Model(mTestModel); 244 const TestModel& goldenModel = fpModel.has_value() ? fpModel.value() : mTestModel; 245 246 // Create model. 247 generated_tests::GeneratedModel model; 248 generated_tests::createModel(goldenModel, &model); 249 ASSERT_TRUE(model.isValid()); 250 ASSERT_EQ(model.finish(), Result::NO_ERROR); 251 252 // Create compilation for nnapi-reference. 253 ASSERT_TRUE(mDevices.find(kRefDeviceName) != mDevices.end()); 254 const auto refDevice = mDevices[kRefDeviceName]; 255 auto [result, compilation] = test_wrapper::Compilation::createForDevice(&model, refDevice); 256 ASSERT_EQ(result, Result::NO_ERROR); 257 ASSERT_EQ(compilation.finish(), Result::NO_ERROR); 258 259 // Create request. 260 test_wrapper::Execution execution(&compilation); 261 std::vector<TestBuffer> outputs; 262 generated_tests::createRequest(goldenModel, &execution, &outputs); 263 264 // Compute result. 265 ASSERT_EQ(execution.compute(), Result::NO_ERROR); 266 267 if (fpModel.has_value()) { 268 // Quantize the execution results as golden values. 269 setExpectedOutputsFromFloat32Results(outputs, &mTestModel); 270 } else { 271 for (uint32_t i = 0; i < outputs.size(); i++) { 272 auto outputIndex = mTestModel.main.outputIndexes[i]; 273 mTestModel.main.operands[outputIndex].data = outputs[i]; 274 } 275 } 276 } 277 278 // Compile and execute the generated graph on a device selected by name. computeAndVerifyResultsForDevice(const test_wrapper::Model * model,uint32_t numOps,const std::string & name)279 void computeAndVerifyResultsForDevice(const test_wrapper::Model* model, uint32_t numOps, 280 const std::string& name) { 281 SCOPED_TRACE("Device: " + name); 282 std::cout << "[ ] - RUN: " << name << "\n"; 283 ASSERT_TRUE(mDevices.find(name) != mDevices.end()); 284 const auto device = mDevices[name]; 285 286 // Check if the device fully supports the graph. 287 constexpr int kMaxNumberOperations = 1000; 288 ASSERT_TRUE(numOps <= kMaxNumberOperations); 289 bool supported[kMaxNumberOperations] = {false}; 290 ASSERT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(model->getHandle(), &device, 291 1, supported), 292 ANEURALNETWORKS_NO_ERROR); 293 if (!std::all_of(supported, supported + numOps, [](bool v) { return v; })) { 294 std::cout << "[ ] SKIP: " << name << " does not support the graph.\n"; 295 return; 296 } 297 298 // Since this test is introduced in Android Q, we only check the accuracy of output results 299 // if the device has feature level >= Q (API level 29). For pre-Q devices, we allow 300 // them to produce less accurate results, but must not hang or crash. 301 int64_t featureLevel; 302 ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel), 303 ANEURALNETWORKS_NO_ERROR); 304 if (shouldSkipTest(featureLevel)) return; 305 306 // Create compilation for device. 307 auto [result, compilation] = test_wrapper::Compilation::createForDevice(model, device); 308 ASSERT_EQ(result, Result::NO_ERROR); 309 Result compileReturn = compilation.finish(); 310 // Even if the model is fully supported, the compilation may still fail, e.g. each operation 311 // is supported, but model is too big (too many operations and/or too-large constants) for 312 // device. 313 if (compileReturn == Result::OP_FAILED) { 314 std::cout << "[ ] SKIP: " << name << " failed at compilation step.\n"; 315 return; 316 } 317 ASSERT_EQ(compileReturn, Result::NO_ERROR); 318 319 // Create request. 320 test_wrapper::Execution execution(&compilation); 321 std::vector<TestBuffer> outputs; 322 generated_tests::createRequest(mTestModel, &execution, &outputs); 323 324 // Compute result. 325 Result executeReturn = execution.compute(); 326 // Even if the model is fully supported and the compilation succeeds, the execution may 327 // still fail, e.g. there may be operand shapes that are unknown until execution time, and 328 // at execution time turn out to be too big. 329 if (executeReturn == Result::OP_FAILED) { 330 std::cout << "[ ] SKIP: " << name << " failed at execution step.\n"; 331 return; 332 } 333 ASSERT_EQ(executeReturn, Result::NO_ERROR); 334 335 if (featureLevel >= __ANDROID_API_Q__) { 336 checkResults(mTestModel, outputs, mCriteria); 337 mResults.emplace_back(name, std::move(outputs)); 338 } 339 } 340 341 // Compile and execute the generated graph normally (i.e., allow runtime to 342 // distribute across devices). computeAndVerifyResults(const std::string & name,const test_wrapper::Model * model,bool shouldCheckResults)343 void computeAndVerifyResults(const std::string& name, const test_wrapper::Model* model, 344 bool shouldCheckResults) { 345 // Because we're not using the introspection/control API, the CpuDevice 346 // is available as a fallback, and hence we assume that compilation and 347 // execution will succeed. 348 SCOPED_TRACE(name); 349 std::cout << "[ ] - RUN: " << name << "\n"; 350 351 // Create compilation. 352 test_wrapper::Compilation compilation(model); 353 ASSERT_EQ(compilation.finish(), Result::NO_ERROR); 354 355 // Create request. 356 test_wrapper::Execution execution(&compilation); 357 std::vector<TestBuffer> outputs; 358 generated_tests::createRequest(mTestModel, &execution, &outputs); 359 360 // Compute and verify result. 361 ASSERT_EQ(execution.compute(), Result::NO_ERROR); 362 if (shouldCheckResults) { 363 checkResults(mTestModel, outputs, mCriteria); 364 mResults.emplace_back(name, std::move(outputs)); 365 } 366 } 367 368 // Main test entrance. testRandomGraph(uint32_t numOperations,uint32_t dimensionRange)369 void testRandomGraph(uint32_t numOperations, uint32_t dimensionRange) { 370 // Generate a random graph. 371 RandomGraph graph; 372 ASSERT_TRUE(graph.generate(kSeed, numOperations, dimensionRange)); 373 374 // Create a model from the random graph. 375 mTestModel = graph.createTestModel(); 376 377 generated_tests::GeneratedModel model; 378 generated_tests::createModel(mTestModel, &model); 379 ASSERT_TRUE(model.isValid()); 380 ASSERT_EQ(model.finish(), Result::NO_ERROR); 381 382 // Compute reference results. 383 computeGoldenResults(); 384 385 // Compute on each available device. 386 for (auto& pair : mDevices) { 387 computeAndVerifyResultsForDevice(&model, numOperations, pair.first); 388 } 389 390 if (numOperations > 1) { 391 if (!shouldSkipTest(mStandardDevicesFeatureLevel)) { 392 // Compute normally (i.e., allow runtime to distribute across devices). 393 computeAndVerifyResults("Compute normally", &model, 394 mStandardDevicesFeatureLevel >= __ANDROID_API_Q__); 395 } 396 397 #ifndef NNTEST_CTS 398 { 399 // Stress partitioner by allowing runtime to distribute across 400 // three synthetic devices. The synthetic devices use the 401 // CpuExecutor for execution, so we always check results, even 402 // though some are of feature level < __ANDROID_API_Q__: In this 403 // case, we don't take feature level as an indication of 404 // reliability, as we do with real devices. 405 DeviceManager::get()->forTest_setDevices(mSyntheticDevices); 406 computeAndVerifyResults("Compute across synthetic devices", &model, true); 407 DeviceManager::get()->forTest_setDevices(mStandardDevices); 408 } 409 #endif 410 } 411 } 412 dumpTestResults()413 void dumpTestResults() { 414 std::ofstream os("/data/local/tmp/" + mTestName + ".mod.py"); 415 ASSERT_TRUE(os.is_open()); 416 os << "# Generated from " << mTestName << ". Do not edit.\n\n"; 417 SpecDumper dumper(mTestModel, os); 418 dumper.dumpTestModel(); 419 for (const auto& [name, results] : mResults) { 420 dumper.dumpResults(name, results); 421 } 422 } 423 424 enum GraphSize : uint32_t { SINGLE = 1, SMALL = 5, LARGE = 40 }; 425 enum DimensionRange : uint32_t { NARROW = 10, WIDE = 1000 }; 426 427 static bool mEnableLog; 428 static bool mDumpSpec; 429 static bool mDetectMemoryLeak; 430 static std::map<std::string, ANeuralNetworksDevice*> mDevices; 431 432 const uint32_t kSeed = GetParam(); 433 std::string mTestName; 434 TestModel mTestModel; 435 AccuracyCriteria mCriteria; 436 437 // A vector of {name, output_results}. 438 std::vector<std::pair<std::string, std::vector<TestBuffer>>> mResults; 439 440 static int mVndkVersion; 441 static int64_t mStandardDevicesFeatureLevel; // minimum across all devices 442 #ifndef NNTEST_CTS 443 static std::vector<std::shared_ptr<Device>> mStandardDevices; 444 static std::vector<std::shared_ptr<Device>> mSyntheticDevices; 445 #endif 446 }; 447 448 bool RandomGraphTest::mEnableLog = false; 449 bool RandomGraphTest::mDumpSpec = false; 450 bool RandomGraphTest::mDetectMemoryLeak = false; 451 std::map<std::string, ANeuralNetworksDevice*> RandomGraphTest::mDevices; 452 453 int RandomGraphTest::mVndkVersion = __ANDROID_API_FUTURE__; 454 int64_t RandomGraphTest::mStandardDevicesFeatureLevel; 455 #ifndef NNTEST_CTS 456 std::vector<std::shared_ptr<Device>> RandomGraphTest::mStandardDevices; 457 std::vector<std::shared_ptr<Device>> RandomGraphTest::mSyntheticDevices; 458 #endif 459 460 // Single-op graph with dimensions in range [1, 1000]. 461 class SingleOperationTest : public RandomGraphTest {}; 462 #define TEST_SINGLE_OPERATION(operation, halVersion, criteria) \ 463 TEST_P(SingleOperationTest, operation##_##halVersion) { \ 464 OperationFilter filter = {.opcodes = {TestOperationType::operation}, \ 465 .versions = {TestHalVersion::halVersion}}; \ 466 OperationManager::get()->applyFilter(filter); \ 467 mCriteria = (criteria); \ 468 testRandomGraph(GraphSize::SINGLE, DimensionRange::WIDE); \ 469 } 470 471 // TODO: Adjust the accuracy criteria based on testing. 472 // We define three sets of accuracy criteria for single-operation tests. 473 474 // This is for operations that only copy buffers around without any computation on buffer values. 475 // Most of these operations fall into categories of reshape or selection, e.g. RESHAPE, GATHER. 476 // Additionally, operations with only logical or comparison arithmetic also use this criteria, e.g. 477 // EQUAL, ARGMAX, TOPK_V2. 478 const AccuracyCriteria kStrictCriteria = { 479 .float32 = {.bias = 1e-7f, .mse = 1e-10f, .atol = 1e-6f, .rtol = 1e-6f}, 480 .float16 = {.bias = 1e-4f, .mse = 1e-8f, .atol = 1e-3f, .rtol = 1e-3f}, 481 .int32 = {.atol = 1}, 482 .quant8Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1}, 483 .quant8AsymmSigned = {.bias = 0.1f, .mse = 0.1f, .atol = 1}, 484 .quant8Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1}, 485 .quant16Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1}, 486 .quant16Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1}, 487 }; 488 489 // This is for operations that only do simple and single computation on buffer values, such as 490 // addition, multiplication, or requantization. Most of these operations fall into categories of 491 // broadcast or elementwise, e.g ADD, FLOOR. 492 const AccuracyCriteria kMediumCriteria = { 493 .float32 = {.bias = 1e-6f, .mse = 1e-8f, .atol = 1e-5f, .rtol = 1e-5f}, 494 .float16 = {.bias = 1e-3f, .mse = 1e-5f, .atol = 1e-2f, .rtol = 1e-2f}, 495 .int32 = {.atol = 1}, 496 .quant8Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2}, 497 .quant8AsymmSigned = {.bias = 1.2, .mse = 1.2, .atol = 2}, 498 .quant8Symm = {.bias = 1.2, .mse = 1.2, .atol = 2}, 499 .quant16Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2}, 500 .quant16Symm = {.bias = 1.2, .mse = 1.2, .atol = 2}, 501 }; 502 503 // This is for operations that involve sophisticated computations on buffer values, either a single 504 // but complex transformation, e.g. LOGISTIC, or multiple transformations with accumulated errors, 505 // e.g. L2_NORMALIZATION, REDUCE_*. 506 const AccuracyCriteria kRelaxedCriteria = { 507 .float32 = {.bias = 3e-5f, .mse = 1e-6f, .atol = 1e-3f, .rtol = 1e-3f}, 508 .float16 = {.bias = 5e-3f, .mse = 1e-3f, .atol = 1.0f, .rtol = 1.0f}, 509 .int32 = {.atol = 1}, 510 .quant8Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 511 .quant8AsymmSigned = {.bias = 1.5, .mse = 1.5, .atol = 10}, 512 .quant8Symm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 513 .quant16Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 514 .quant16Symm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 515 }; 516 517 // This is for convolution operations with potentially large kernel size. 518 const AccuracyCriteria kConvCriteria = { 519 .float32 = {.bias = 4e-4f, .mse = 1e-5f, .atol = 2e-2f, .rtol = 2e-2f}, 520 .float16 = {.bias = 5e-2f, .mse = 1e-2f, .atol = 1.0f, .rtol = 1.0f}, 521 .int32 = {.atol = 1}, 522 .quant8Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 523 .quant8AsymmSigned = {.bias = 1.5, .mse = 1.5, .atol = 10}, 524 .quant8Symm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 525 .quant16Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 526 .quant16Symm = {.bias = 1.5, .mse = 1.5, .atol = 10}, 527 }; 528 529 /*-- NNAPI 1.0 Operations ---------------------------------------------------*/ 530 531 // TODO: The following 1.0 operation signatures are currently not defined: 532 // - ANEURALNETWORKS_LSH_PROJECTION 533 // - ANEURALNETWORKS_LSTM 534 // - ANEURALNETWORKS_RNN 535 // - ANEURALNETWORKS_SVDF 536 537 TEST_SINGLE_OPERATION(ADD, V1_0, kMediumCriteria); 538 TEST_SINGLE_OPERATION(MUL, V1_0, kMediumCriteria); 539 TEST_SINGLE_OPERATION(FLOOR, V1_0, kMediumCriteria); 540 TEST_SINGLE_OPERATION(LOGISTIC, V1_0, kRelaxedCriteria); 541 TEST_SINGLE_OPERATION(RELU, V1_0, kMediumCriteria); 542 TEST_SINGLE_OPERATION(RELU1, V1_0, kMediumCriteria); 543 TEST_SINGLE_OPERATION(RELU6, V1_0, kMediumCriteria); 544 TEST_SINGLE_OPERATION(TANH, V1_0, kRelaxedCriteria); 545 TEST_SINGLE_OPERATION(SOFTMAX, V1_0, kRelaxedCriteria); 546 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_0, kRelaxedCriteria); 547 TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_0, kRelaxedCriteria); 548 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_0, kRelaxedCriteria); 549 TEST_SINGLE_OPERATION(L2_POOL_2D, V1_0, kRelaxedCriteria); 550 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_0, kRelaxedCriteria); 551 TEST_SINGLE_OPERATION(CONV_2D, V1_0, kConvCriteria); 552 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_0, kConvCriteria); 553 TEST_SINGLE_OPERATION(CONCATENATION, V1_0, kMediumCriteria); 554 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_0, kRelaxedCriteria); 555 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_0, kStrictCriteria); 556 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_0, kStrictCriteria); 557 TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_0, kStrictCriteria); 558 TEST_SINGLE_OPERATION(HASHTABLE_LOOKUP, V1_0, kStrictCriteria); 559 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_0, kRelaxedCriteria); 560 TEST_SINGLE_OPERATION(RESHAPE, V1_0, kStrictCriteria); 561 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_0, kMediumCriteria); 562 563 /*-- NNAPI 1.1 Operations ---------------------------------------------------*/ 564 565 TEST_SINGLE_OPERATION(SUB, V1_1, kMediumCriteria); 566 TEST_SINGLE_OPERATION(DIV, V1_1, kRelaxedCriteria); 567 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_1, kStrictCriteria); 568 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_1, kStrictCriteria); 569 TEST_SINGLE_OPERATION(MEAN, V1_1, kRelaxedCriteria); 570 TEST_SINGLE_OPERATION(PAD, V1_1, kStrictCriteria); 571 TEST_SINGLE_OPERATION(TRANSPOSE, V1_1, kStrictCriteria); 572 TEST_SINGLE_OPERATION(SQUEEZE, V1_1, kStrictCriteria); 573 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_1, kStrictCriteria); 574 575 /*-- NNAPI 1.0 and 1.1 Operations with Extended Behavior in 1.2 -------------*/ 576 577 TEST_SINGLE_OPERATION(ADD, V1_2, kMediumCriteria); 578 TEST_SINGLE_OPERATION(MUL, V1_2, kMediumCriteria); 579 TEST_SINGLE_OPERATION(SUB, V1_2, kMediumCriteria); 580 TEST_SINGLE_OPERATION(DIV, V1_2, kRelaxedCriteria); 581 TEST_SINGLE_OPERATION(FLOOR, V1_2, kMediumCriteria); 582 TEST_SINGLE_OPERATION(LOGISTIC, V1_2, kRelaxedCriteria); 583 TEST_SINGLE_OPERATION(RELU, V1_2, kMediumCriteria); 584 TEST_SINGLE_OPERATION(RELU1, V1_2, kMediumCriteria); 585 TEST_SINGLE_OPERATION(RELU6, V1_2, kMediumCriteria); 586 TEST_SINGLE_OPERATION(TANH, V1_2, kRelaxedCriteria); 587 TEST_SINGLE_OPERATION(CONCATENATION, V1_2, kMediumCriteria); 588 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_2, kStrictCriteria); 589 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_2, kStrictCriteria); 590 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_2, kStrictCriteria); 591 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_2, kStrictCriteria); 592 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_2, kRelaxedCriteria); 593 TEST_SINGLE_OPERATION(RESHAPE, V1_2, kStrictCriteria); 594 TEST_SINGLE_OPERATION(MEAN, V1_2, kRelaxedCriteria); 595 TEST_SINGLE_OPERATION(PAD, V1_2, kStrictCriteria); 596 TEST_SINGLE_OPERATION(TRANSPOSE, V1_2, kStrictCriteria); 597 TEST_SINGLE_OPERATION(CONV_2D, V1_2, kConvCriteria); 598 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_2, kConvCriteria); 599 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_2, kRelaxedCriteria); 600 TEST_SINGLE_OPERATION(L2_POOL_2D, V1_2, kRelaxedCriteria); 601 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_2, kRelaxedCriteria); 602 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_2, kRelaxedCriteria); 603 TEST_SINGLE_OPERATION(SOFTMAX, V1_2, kRelaxedCriteria); 604 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_2, kRelaxedCriteria); 605 TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_2, kRelaxedCriteria); 606 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_2, kMediumCriteria); 607 TEST_SINGLE_OPERATION(SQUEEZE, V1_2, kStrictCriteria); 608 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_2, kStrictCriteria); 609 TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_2, kStrictCriteria); 610 611 /*-- NNAPI 1.2 Operations ---------------------------------------------------*/ 612 613 // TODO: The following 1.2 operation signatures are currently not defined: 614 // - ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM 615 // - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM 616 // - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN 617 // - ANEURALNETWORKS_BOX_WITH_NMS_LIMIT 618 // - ANEURALNETWORKS_DETECTION_POSTPROCESSING 619 // - ANEURALNETWORKS_GENERATE_PROPOSALS 620 // - ANEURALNETWORKS_QUANTIZED_16BIT_LSTM 621 // - ANEURALNETWORKS_RANDOM_MULTINOMIAL 622 // - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM 623 // - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN 624 625 TEST_SINGLE_OPERATION(ABS, V1_2, kMediumCriteria); 626 TEST_SINGLE_OPERATION(EXP, V1_2, kRelaxedCriteria); 627 TEST_SINGLE_OPERATION(LOG, V1_2, kRelaxedCriteria); 628 TEST_SINGLE_OPERATION(NEG, V1_2, kMediumCriteria); 629 TEST_SINGLE_OPERATION(RSQRT, V1_2, kRelaxedCriteria); 630 TEST_SINGLE_OPERATION(SIN, V1_2, kRelaxedCriteria); 631 TEST_SINGLE_OPERATION(SQRT, V1_2, kRelaxedCriteria); 632 TEST_SINGLE_OPERATION(ARGMAX, V1_2, kStrictCriteria); 633 TEST_SINGLE_OPERATION(ARGMIN, V1_2, kStrictCriteria); 634 TEST_SINGLE_OPERATION(EQUAL, V1_2, kStrictCriteria); 635 TEST_SINGLE_OPERATION(GREATER, V1_2, kStrictCriteria); 636 TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_2, kStrictCriteria); 637 TEST_SINGLE_OPERATION(LESS, V1_2, kStrictCriteria); 638 TEST_SINGLE_OPERATION(LESS_EQUAL, V1_2, kStrictCriteria); 639 TEST_SINGLE_OPERATION(LOGICAL_AND, V1_2, kStrictCriteria); 640 TEST_SINGLE_OPERATION(LOGICAL_NOT, V1_2, kStrictCriteria); 641 TEST_SINGLE_OPERATION(LOGICAL_OR, V1_2, kStrictCriteria); 642 TEST_SINGLE_OPERATION(NOT_EQUAL, V1_2, kStrictCriteria); 643 TEST_SINGLE_OPERATION(MAXIMUM, V1_2, kMediumCriteria); 644 TEST_SINGLE_OPERATION(MINIMUM, V1_2, kMediumCriteria); 645 TEST_SINGLE_OPERATION(POW, V1_2, kRelaxedCriteria); 646 TEST_SINGLE_OPERATION(PRELU, V1_2, kMediumCriteria); 647 TEST_SINGLE_OPERATION(REDUCE_ALL, V1_2, kRelaxedCriteria); 648 TEST_SINGLE_OPERATION(REDUCE_ANY, V1_2, kRelaxedCriteria); 649 TEST_SINGLE_OPERATION(REDUCE_MAX, V1_2, kRelaxedCriteria); 650 TEST_SINGLE_OPERATION(REDUCE_MIN, V1_2, kRelaxedCriteria); 651 TEST_SINGLE_OPERATION(REDUCE_PROD, V1_2, kRelaxedCriteria); 652 TEST_SINGLE_OPERATION(REDUCE_SUM, V1_2, kRelaxedCriteria); 653 TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_2, kStrictCriteria); 654 TEST_SINGLE_OPERATION(INSTANCE_NORMALIZATION, V1_2, kRelaxedCriteria); 655 TEST_SINGLE_OPERATION(LOG_SOFTMAX, V1_2, kRelaxedCriteria); 656 TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_2, kConvCriteria); 657 TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_2, kConvCriteria); 658 TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_2, kRelaxedCriteria); 659 TEST_SINGLE_OPERATION(PAD_V2, V1_2, kStrictCriteria); 660 TEST_SINGLE_OPERATION(QUANTIZE, V1_2, kMediumCriteria); 661 TEST_SINGLE_OPERATION(CAST, V1_2, kMediumCriteria); 662 TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_2, kStrictCriteria); 663 TEST_SINGLE_OPERATION(TILE, V1_2, kStrictCriteria); 664 TEST_SINGLE_OPERATION(GATHER, V1_2, kStrictCriteria); 665 TEST_SINGLE_OPERATION(SELECT, V1_2, kStrictCriteria); 666 TEST_SINGLE_OPERATION(TOPK_V2, V1_2, kStrictCriteria); 667 TEST_SINGLE_OPERATION(SLICE, V1_2, kStrictCriteria); 668 TEST_SINGLE_OPERATION(SPLIT, V1_2, kMediumCriteria); 669 TEST_SINGLE_OPERATION(ROI_ALIGN, V1_2, kRelaxedCriteria); 670 TEST_SINGLE_OPERATION(ROI_POOLING, V1_2, kRelaxedCriteria); 671 TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_2, kRelaxedCriteria); 672 673 /*-- NNAPI 1.0, 1.1, and 1.2 Operations with Extended Behavior in 1.3 -------------*/ 674 675 TEST_SINGLE_OPERATION(ADD, V1_3, kMediumCriteria); 676 TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_3, kRelaxedCriteria); 677 TEST_SINGLE_OPERATION(CONCATENATION, V1_3, kMediumCriteria); 678 TEST_SINGLE_OPERATION(CONV_2D, V1_3, kConvCriteria); 679 TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_3, kConvCriteria); 680 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_3, kStrictCriteria); 681 TEST_SINGLE_OPERATION(DEQUANTIZE, V1_3, kMediumCriteria); 682 TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_3, kStrictCriteria); 683 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_3, kRelaxedCriteria); 684 TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_3, kRelaxedCriteria); 685 TEST_SINGLE_OPERATION(LOGISTIC, V1_3, kRelaxedCriteria); 686 TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_3, kRelaxedCriteria); 687 TEST_SINGLE_OPERATION(MUL, V1_3, kMediumCriteria); 688 TEST_SINGLE_OPERATION(RELU, V1_3, kMediumCriteria); 689 TEST_SINGLE_OPERATION(RELU1, V1_3, kMediumCriteria); 690 TEST_SINGLE_OPERATION(RELU6, V1_3, kMediumCriteria); 691 TEST_SINGLE_OPERATION(RESHAPE, V1_3, kStrictCriteria); 692 TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_3, kRelaxedCriteria); 693 TEST_SINGLE_OPERATION(SOFTMAX, V1_3, kRelaxedCriteria); 694 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_3, kStrictCriteria); 695 TEST_SINGLE_OPERATION(TANH, V1_3, kRelaxedCriteria); 696 TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_3, kStrictCriteria); 697 TEST_SINGLE_OPERATION(DIV, V1_3, kMediumCriteria); 698 TEST_SINGLE_OPERATION(MEAN, V1_3, kRelaxedCriteria); 699 TEST_SINGLE_OPERATION(PAD, V1_3, kStrictCriteria); 700 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_3, kStrictCriteria); 701 TEST_SINGLE_OPERATION(SQUEEZE, V1_3, kStrictCriteria); 702 TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_3, kStrictCriteria); 703 TEST_SINGLE_OPERATION(SUB, V1_3, kMediumCriteria); 704 TEST_SINGLE_OPERATION(TRANSPOSE, V1_3, kStrictCriteria); 705 TEST_SINGLE_OPERATION(ABS, V1_3, kMediumCriteria); 706 TEST_SINGLE_OPERATION(ARGMAX, V1_3, kStrictCriteria); 707 TEST_SINGLE_OPERATION(ARGMIN, V1_3, kStrictCriteria); 708 TEST_SINGLE_OPERATION(CAST, V1_3, kMediumCriteria); 709 TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_3, kStrictCriteria); 710 TEST_SINGLE_OPERATION(EQUAL, V1_3, kStrictCriteria); 711 TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_3, kStrictCriteria); 712 TEST_SINGLE_OPERATION(GATHER, V1_3, kStrictCriteria); 713 TEST_SINGLE_OPERATION(GREATER, V1_3, kStrictCriteria); 714 TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_3, kStrictCriteria); 715 TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_3, kConvCriteria); 716 TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_3, kRelaxedCriteria); 717 TEST_SINGLE_OPERATION(LESS, V1_3, kStrictCriteria); 718 TEST_SINGLE_OPERATION(LESS_EQUAL, V1_3, kStrictCriteria); 719 TEST_SINGLE_OPERATION(MAXIMUM, V1_3, kMediumCriteria); 720 TEST_SINGLE_OPERATION(MINIMUM, V1_3, kMediumCriteria); 721 TEST_SINGLE_OPERATION(NOT_EQUAL, V1_3, kStrictCriteria); 722 TEST_SINGLE_OPERATION(PAD_V2, V1_3, kStrictCriteria); 723 TEST_SINGLE_OPERATION(PRELU, V1_3, kMediumCriteria); 724 TEST_SINGLE_OPERATION(QUANTIZE, V1_3, kMediumCriteria); 725 TEST_SINGLE_OPERATION(REDUCE_MAX, V1_3, kRelaxedCriteria); 726 TEST_SINGLE_OPERATION(REDUCE_MIN, V1_3, kRelaxedCriteria); 727 TEST_SINGLE_OPERATION(ROI_ALIGN, V1_3, kRelaxedCriteria); 728 TEST_SINGLE_OPERATION(ROI_POOLING, V1_3, kRelaxedCriteria); 729 TEST_SINGLE_OPERATION(SELECT, V1_3, kStrictCriteria); 730 TEST_SINGLE_OPERATION(SLICE, V1_3, kStrictCriteria); 731 TEST_SINGLE_OPERATION(SPLIT, V1_3, kMediumCriteria); 732 TEST_SINGLE_OPERATION(TILE, V1_3, kStrictCriteria); 733 TEST_SINGLE_OPERATION(TOPK_V2, V1_3, kStrictCriteria); 734 TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_3, kConvCriteria); 735 TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_3, kRelaxedCriteria); 736 737 /*-- NNAPI 1.3 Operations ---------------------------------------------------*/ 738 739 // TODO: The following 1.3 operation signatures are currently not defined: 740 // - ANEURALNETWORKS_QUANTIZED_LSTM 741 // - ANEURALNETWORKS_IF 742 // - ANEURALNETWORKS_WHILE 743 744 TEST_SINGLE_OPERATION(ELU, V1_3, kMediumCriteria); 745 TEST_SINGLE_OPERATION(HARD_SWISH, V1_3, kMediumCriteria); 746 TEST_SINGLE_OPERATION(FILL, V1_3, kStrictCriteria); 747 TEST_SINGLE_OPERATION(RANK, V1_3, kStrictCriteria); 748 749 const AccuracyCriteria kSmallGraphCriteria = { 750 .float32 = {.bias = 4e-4f, .mse = 1e-5f, .atol = 1e-2f, .rtol = 1e-2f}, 751 .float16 = {.bias = 5e-2f, .mse = 1e-2f, .atol = 1.0f, .rtol = 1.0f}, 752 .int32 = {.atol = 1}, 753 .quant8Asymm = {.bias = 2, .mse = 2, .atol = 12}, 754 .quant8AsymmSigned = {.bias = 2, .mse = 2, .atol = 12}, 755 .quant8Symm = {.bias = 2, .mse = 2, .atol = 12}, 756 .quant16Asymm = {.bias = 2, .mse = 2, .atol = 12}, 757 .quant16Symm = {.bias = 2, .mse = 2, .atol = 12}, 758 }; 759 760 const AccuracyCriteria kLargeGraphCriteria = { 761 .float32 = {.bias = 1e-2f, .mse = 1e-4f, .atol = 1e-1f, .rtol = 1e-1f}, 762 .float16 = {.bias = 1e-1f, .mse = 5e-2f, .atol = 1.0f, .rtol = 1.0f}, 763 .int32 = {.atol = 1}, 764 .quant8Asymm = {.bias = 2, .mse = 2, .atol = 12}, 765 .quant8AsymmSigned = {.bias = 2, .mse = 2, .atol = 12}, 766 .quant8Symm = {.bias = 2, .mse = 2, .atol = 12}, 767 .quant16Asymm = {.bias = 2, .mse = 2, .atol = 12}, 768 .quant16Symm = {.bias = 2, .mse = 2, .atol = 12}, 769 }; 770 771 // Due to the limitation of the random graph generator, graphs generated with mixed-type or 772 // mixed-rank operations are likely to result in a disconnected network. Thus, we filter the 773 // operation signatures by primary data type and rank first, then generate random graph tests for 774 // each combination. 775 // 776 // Two parameterized tests are created for each filter: 777 // * 5-op graph with dimensions in range [1, 1000]. 778 // * 40-op graph with dimensions in range [1, 10]. 779 // 780 #define TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(dataType, rank) \ 781 TEST_P(RandomGraphTest, SmallGraph_##dataType##_Rank##rank) { \ 782 OperationFilter filter = {.dataTypes = {TestOperandType::dataType}, .ranks = {rank}}; \ 783 OperationManager::get()->applyFilter(filter); \ 784 mCriteria = kSmallGraphCriteria; \ 785 testRandomGraph(GraphSize::SMALL, DimensionRange::WIDE); \ 786 } \ 787 TEST_P(RandomGraphTest, LargeGraph_##dataType##_Rank##rank) { \ 788 OperationFilter filter = {.dataTypes = {TestOperandType::dataType}, .ranks = {rank}}; \ 789 OperationManager::get()->applyFilter(filter); \ 790 mCriteria = kLargeGraphCriteria; \ 791 testRandomGraph(GraphSize::LARGE, DimensionRange::NARROW); \ 792 } 793 794 // Random graph test with TENSOR_QUANT8_ASYMM as the primary data type is currently not defined. 795 // The generated graph with TENSOR_QUANT8_ASYMM as the primary data type will likely to result in 796 // disconnected graphs due to the mismatch between quantized parameters. 797 798 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 4); 799 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 3); 800 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 2); 801 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT32, 1); 802 803 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 4); 804 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 3); 805 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 2); 806 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_FLOAT16, 1); 807 808 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 4); 809 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 3); 810 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 2); 811 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_INT32, 1); 812 813 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 4); 814 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 3); 815 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 2); 816 TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 1); 817 818 INSTANTIATE_TEST_SUITE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 50u)); 819 INSTANTIATE_TEST_SUITE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 50u)); 820 821 } // namespace fuzzing_test 822 } // namespace nn 823 } // namespace android 824