/hardware/interfaces/neuralnetworks/1.2/vts/functional/ |
D | CompilationCachingTests.cpp | 320 sp<IPreparedModel>* preparedModel = nullptr) { in saveModelToCache() argument 321 if (preparedModel != nullptr) *preparedModel = nullptr; in saveModelToCache() 335 if (preparedModel != nullptr) { in saveModelToCache() 336 *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) in saveModelToCache() 367 sp<IPreparedModel>* preparedModel, ErrorStatus* status) { in prepareModelFromCache() argument 375 *preparedModel = nullptr; in prepareModelFromCache() 383 *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) in prepareModelFromCache() 426 sp<IPreparedModel> preparedModel = nullptr; in TEST_P() local 438 preparedModel = nullptr; in TEST_P() 443 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); in TEST_P() [all …]
|
D | VtsHalNeuralnetworks.cpp | 39 sp<IPreparedModel>* preparedModel) { in createPreparedModel() argument 40 ASSERT_NE(nullptr, preparedModel); in createPreparedModel() 41 *preparedModel = nullptr; in createPreparedModel() 65 *preparedModel = getPreparedModel_1_2(preparedModelCallback); in createPreparedModel() 75 ASSERT_EQ(nullptr, preparedModel->get()); in createPreparedModel() 84 ASSERT_NE(nullptr, preparedModel->get()); in createPreparedModel() 125 void validateRequest(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request); 127 void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request); 129 void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request); 135 sp<IPreparedModel> preparedModel; in validateEverything() local [all …]
|
D | ValidateRequest.cpp | 44 static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message, in validate() argument 65 preparedModel->execute_1_2(request, measure, executionCallback); in validate() 82 Return<void> executeStatus = preparedModel->executeSynchronously( in validate() 99 android::nn::ExecutionBurstController::create(preparedModel, in validate() 133 static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeInputTest() argument 136 validate(preparedModel, message, request, in removeInputTest() 143 static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeOutputTest() argument 146 validate(preparedModel, message, request, in removeOutputTest() 153 void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) { in validateRequest() argument 154 removeInputTest(preparedModel, request); in validateRequest() [all …]
|
/hardware/interfaces/neuralnetworks/1.3/vts/functional/ |
D | CompilationCachingTests.cpp | 323 sp<IPreparedModel>* preparedModel = nullptr) { in saveModelToCache() argument 324 if (preparedModel != nullptr) *preparedModel = nullptr; in saveModelToCache() 338 if (preparedModel != nullptr) { in saveModelToCache() 339 *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) in saveModelToCache() 370 sp<IPreparedModel>* preparedModel, ErrorStatus* status) { in prepareModelFromCache() argument 378 *preparedModel = nullptr; in prepareModelFromCache() 386 *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel()) in prepareModelFromCache() 429 sp<IPreparedModel> preparedModel = nullptr; in TEST_P() local 441 preparedModel = nullptr; in TEST_P() 446 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status); in TEST_P() [all …]
|
D | MemoryDomainTests.cpp | 254 sp<IPreparedModel> preparedModel; in createConvPreparedModel() local 255 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false); in createConvPreparedModel() 256 return preparedModel; in createConvPreparedModel() 262 sp<IPreparedModel> preparedModel; in createAddPreparedModel() local 263 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false); in createAddPreparedModel() 264 return preparedModel; in createAddPreparedModel() 367 auto preparedModel = createConvPreparedModel(kTestOperand); in TEST_P() local 368 if (preparedModel == nullptr) return; in TEST_P() 372 .preparedModels = {preparedModel}, in TEST_P() 407 auto preparedModel = createConvPreparedModel(kTestOperand); in TEST_P() local [all …]
|
D | VtsHalNeuralnetworks.cpp | 40 sp<IPreparedModel>* preparedModel, bool reportSkipping) { in createPreparedModel() argument 41 ASSERT_NE(nullptr, preparedModel); in createPreparedModel() 42 *preparedModel = nullptr; in createPreparedModel() 66 *preparedModel = getPreparedModel_1_3(preparedModelCallback); in createPreparedModel() 76 ASSERT_EQ(nullptr, preparedModel->get()); in createPreparedModel() 89 ASSERT_NE(nullptr, preparedModel->get()); in createPreparedModel() 130 void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request); 132 void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request); 134 void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request); 137 void validateExecuteFenced(const sp<IPreparedModel>& preparedModel, const Request& request) { in validateExecuteFenced() argument [all …]
|
D | ValidateRequest.cpp | 48 static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message, in validate() argument 69 preparedModel->execute_1_3(request, measure, {}, {}, executionCallback); in validate() 86 Return<void> executeStatus = preparedModel->executeSynchronously_1_3( in validate() 107 android::nn::ExecutionBurstController::create(preparedModel, in validate() 142 preparedModel->executeFenced(request, {}, MeasureTiming::NO, {}, {}, {}, in validate() 155 static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeInputTest() argument 158 validate(preparedModel, message, request, in removeInputTest() 165 static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeOutputTest() argument 168 validate(preparedModel, message, request, in removeOutputTest() 175 void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) { in validateRequest() argument [all …]
|
D | QualityOfServiceTests.cpp | 59 std::function<MaybeResults(const sp<IPreparedModel>& preparedModel, const Request& request, 116 const sp<IPreparedModel> preparedModel = in runPrepareModelTest() local 127 ASSERT_EQ(nullptr, preparedModel.get()); in runPrepareModelTest() 152 ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr); in runPrepareModelTest() 170 static MaybeResults executeAsynchronously(const sp<IPreparedModel>& preparedModel, in executeAsynchronously() argument 178 Return<ErrorStatus> ret = preparedModel->execute_1_3(request, measure, deadline, {}, callback); in executeAsynchronously() 193 static MaybeResults executeSynchronously(const sp<IPreparedModel>& preparedModel, in executeSynchronously() argument 208 preparedModel->executeSynchronously_1_3(request, measure, deadline, {}, cb); in executeSynchronously() 216 void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel& testModel, in runExecutionTest() argument 223 const auto results = execute(preparedModel, request, deadline); in runExecutionTest() [all …]
|
/hardware/interfaces/neuralnetworks/aidl/vts/functional/ |
D | MemoryDomainTests.cpp | 272 std::shared_ptr<IPreparedModel> preparedModel; in createConvPreparedModel() local 273 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false); in createConvPreparedModel() 274 return preparedModel; in createConvPreparedModel() 280 std::shared_ptr<IPreparedModel> preparedModel; in createAddPreparedModel() local 281 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false); in createAddPreparedModel() 282 return preparedModel; in createAddPreparedModel() 353 preparedModelParcels.push_back({.preparedModel = model}); in validateAllocate() 391 auto preparedModel = createConvPreparedModel(kTestOperand); in TEST_P() local 392 if (preparedModel == nullptr) return; in TEST_P() 396 .preparedModels = {preparedModel}, in TEST_P() [all …]
|
D | VtsHalNeuralnetworks.cpp | 51 std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping, in createPreparedModel() argument 53 ASSERT_NE(nullptr, preparedModel); in createPreparedModel() 54 *preparedModel = nullptr; in createPreparedModel() 89 *preparedModel = preparedModelCallback->getPreparedModel(); in createPreparedModel() 97 ASSERT_EQ(nullptr, preparedModel->get()); in createPreparedModel() 110 ASSERT_NE(nullptr, preparedModel->get()); in createPreparedModel() 162 void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request); 164 void validateBurst(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request); 166 void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel, 174 std::shared_ptr<IPreparedModel> preparedModel; in validateEverything() local [all …]
|
D | ValidateRequest.cpp | 40 static void validateReusableExecution(const std::shared_ptr<IPreparedModel>& preparedModel, in validateReusableExecution() argument 47 const auto createStatus = preparedModel->createReusableExecution( in validateReusableExecution() 87 static void validate(const std::shared_ptr<IPreparedModel>& preparedModel, in validate() argument 107 const auto executeStatus = preparedModel->executeSynchronously( in validate() 119 const auto executeStatus = preparedModel->executeFenced(request, {}, false, kNoDeadline, in validate() 134 auto ret = preparedModel->configureExecutionBurst(&burst); in validate() 151 ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk()); in validate() 157 validateReusableExecution(preparedModel, message, request, measure); in validate() 163 const auto executeStatus = preparedModel->executeSynchronouslyWithConfig( in validate() 175 const auto executeStatus = preparedModel->executeFencedWithConfig( in validate() [all …]
|
D | CompilationCachingTests.cpp | 321 std::shared_ptr<IPreparedModel>* preparedModel = nullptr) { in saveModelToCache() argument 322 if (preparedModel != nullptr) *preparedModel = nullptr; in saveModelToCache() 336 if (preparedModel != nullptr) { in saveModelToCache() 337 *preparedModel = preparedModelCallback->getPreparedModel(); in saveModelToCache() 373 std::shared_ptr<IPreparedModel>* preparedModel, ErrorStatus* status, in prepareModelFromCache() argument 403 *preparedModel = nullptr; in prepareModelFromCache() 411 *preparedModel = preparedModelCallback->getPreparedModel(); in prepareModelFromCache() 422 std::shared_ptr<IPreparedModel> preparedModel; in verifyModelPreparationBehaviour() local 427 prepareModelFromCache(modelCache, dataCache, &preparedModel, &status, in verifyModelPreparationBehaviour() 432 ASSERT_EQ(preparedModel, nullptr); in verifyModelPreparationBehaviour() [all …]
|
D | QualityOfServiceTests.cpp | 55 std::function<MaybeResults(const std::shared_ptr<IPreparedModel>& preparedModel, 107 const std::shared_ptr<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); in runPrepareModelTest() local 115 ASSERT_EQ(nullptr, preparedModel.get()); in runPrepareModelTest() 140 ASSERT_EQ(prepareReturnStatus == ErrorStatus::NONE, preparedModel.get() != nullptr); in runPrepareModelTest() 158 static MaybeResults executeSynchronously(const std::shared_ptr<IPreparedModel>& preparedModel, in executeSynchronously() argument 165 const auto ret = preparedModel->executeSynchronously(request, measure, deadlineNs, in executeSynchronously() 184 static MaybeResults executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel, in executeBurst() argument 191 auto ret = preparedModel->configureExecutionBurst(&burst); in executeBurst() 222 void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel, in runExecutionTest() argument 230 const auto results = execute(preparedModel, request, deadlineNs); in runExecutionTest() [all …]
|
/hardware/interfaces/neuralnetworks/utils/common/src/ |
D | ResilientPreparedModel.cpp | 44 auto preparedModel = resilientPreparedModel.getPreparedModel(); in protect() local 45 auto result = fn(*preparedModel); in protect() 53 auto maybePreparedModel = resilientPreparedModel.recover(preparedModel.get()); in protect() 61 preparedModel = std::move(maybePreparedModel).value(); in protect() 63 return fn(*preparedModel); in protect() 74 auto preparedModel = NN_TRY(makePreparedModel()); in create() local 75 CHECK(preparedModel != nullptr); in create() 77 PrivateConstructorTag{}, std::move(makePreparedModel), std::move(preparedModel)); in create() 82 nn::SharedPreparedModel preparedModel) in ResilientPreparedModel() argument 83 : kMakePreparedModel(std::move(makePreparedModel)), mPreparedModel(std::move(preparedModel)) { in ResilientPreparedModel() [all …]
|
/hardware/interfaces/neuralnetworks/utils/common/test/ |
D | ResilientPreparedModelTest.cpp | 95 const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); in TEST() 98 const auto result = preparedModel->getPreparedModel(); in TEST() 106 const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); in TEST() 112 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 121 const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); in TEST() 127 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 136 const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); in TEST() 142 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 151 const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); in TEST() 162 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/1.2/utils/test/ |
D | PreparedModelTest.cpp | 150 const auto preparedModel = in TEST() local 157 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 167 const auto preparedModel = in TEST() local 175 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 185 const auto preparedModel = in TEST() local 192 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 202 const auto preparedModel = in TEST() local 209 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 219 const auto preparedModel = in TEST() local 227 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/1.0/utils/test/ |
D | PreparedModelTest.cpp | 118 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() local 124 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 134 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() local 141 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 151 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() local 158 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 168 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() local 174 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 184 const auto preparedModel = PreparedModel::create(mockPreparedModel).value(); in TEST() local 190 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/1.1/vts/functional/ |
D | ValidateRequest.cpp | 38 static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message, in validate() argument 45 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); in validate() 56 static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeInputTest() argument 59 validate(preparedModel, message, request, in removeInputTest() 66 static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeOutputTest() argument 69 validate(preparedModel, message, request, in removeOutputTest() 76 void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) { in validateRequest() argument 77 removeInputTest(preparedModel, request); in validateRequest() 78 removeOutputTest(preparedModel, request); in validateRequest()
|
D | VtsHalNeuralnetworks.cpp | 37 sp<IPreparedModel>* preparedModel) { in createPreparedModel() argument 38 ASSERT_NE(nullptr, preparedModel); in createPreparedModel() 39 *preparedModel = nullptr; in createPreparedModel() 62 *preparedModel = preparedModelCallback->getPreparedModel(); in createPreparedModel() 72 ASSERT_EQ(nullptr, preparedModel->get()); in createPreparedModel() 81 ASSERT_NE(nullptr, preparedModel->get()); in createPreparedModel() 122 void validateRequest(const sp<V1_0::IPreparedModel>& preparedModel, const V1_0::Request& request); 128 sp<IPreparedModel> preparedModel; in validateEverything() local 129 createPreparedModel(device, model, &preparedModel); in validateEverything() 130 if (preparedModel == nullptr) return; in validateEverything() [all …]
|
/hardware/interfaces/neuralnetworks/aidl/utils/test/ |
D | PreparedModelTest.cpp | 104 const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value(); in TEST_P() local 116 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST_P() 128 const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value(); in TEST_P() local 134 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST_P() 146 const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value(); in TEST_P() local 152 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST_P() 164 const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value(); in TEST_P() local 170 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST_P() 182 const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value(); in TEST_P() local 193 const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {}); in TEST_P() [all …]
|
/hardware/interfaces/neuralnetworks/1.3/utils/test/ |
D | PreparedModelTest.cpp | 178 const auto preparedModel = in TEST() local 185 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 195 const auto preparedModel = in TEST() local 203 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 213 const auto preparedModel = in TEST() local 220 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 230 const auto preparedModel = in TEST() local 237 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() 247 const auto preparedModel = in TEST() local 255 const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); in TEST() [all …]
|
/hardware/interfaces/neuralnetworks/1.0/vts/functional/ |
D | ValidateRequest.cpp | 34 static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message, in validate() argument 41 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); in validate() 72 static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeInputTest() argument 75 validate(preparedModel, message, request, in removeInputTest() 82 static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { in removeOutputTest() argument 85 validate(preparedModel, message, request, in removeOutputTest() 92 void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) { in validateRequest() argument 93 removeInputTest(preparedModel, request); in validateRequest() 94 removeOutputTest(preparedModel, request); in validateRequest()
|
D | VtsHalNeuralnetworks.cpp | 34 sp<IPreparedModel>* preparedModel) { in createPreparedModel() argument 35 ASSERT_NE(nullptr, preparedModel); in createPreparedModel() 36 *preparedModel = nullptr; in createPreparedModel() 59 *preparedModel = preparedModelCallback->getPreparedModel(); in createPreparedModel() 69 ASSERT_EQ(nullptr, preparedModel->get()); in createPreparedModel() 78 ASSERT_NE(nullptr, preparedModel->get()); in createPreparedModel() 119 void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request); 125 sp<IPreparedModel> preparedModel; in validateEverything() local 126 createPreparedModel(device, model, &preparedModel); in validateEverything() 127 if (preparedModel == nullptr) return; in validateEverything() [all …]
|
/hardware/interfaces/neuralnetworks/1.3/utils/src/ |
D | Callbacks.cpp | 51 V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) { in prepareModelCallback() argument 53 V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) { in prepareModelCallback() 58 V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) { in prepareModelCallback() 61 return V1_0::utils::prepareModelCallback(status, preparedModel); in prepareModelCallback() 79 ErrorStatus status, const sp<IPreparedModel>& preparedModel) { in prepareModelCallback() argument 81 return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); in prepareModelCallback() 98 const sp<V1_0::IPreparedModel>& preparedModel) { in notify() argument 99 mData.put(prepareModelCallback(status, preparedModel)); in notify() 104 const sp<V1_2::IPreparedModel>& preparedModel) { in notify_1_2() argument 105 mData.put(prepareModelCallback(status, preparedModel)); in notify_1_2() [all …]
|
/hardware/interfaces/neuralnetworks/1.2/utils/src/ |
D | Callbacks.cpp | 47 V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) { in prepareModelCallback() argument 49 V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) { in prepareModelCallback() 52 return V1_0::utils::prepareModelCallback(status, preparedModel); in prepareModelCallback() 64 V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel) { in prepareModelCallback() argument 66 return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true)); in prepareModelCallback() 82 const sp<V1_0::IPreparedModel>& preparedModel) { in notify() argument 83 mData.put(prepareModelCallback(status, preparedModel)); in notify() 88 const sp<IPreparedModel>& preparedModel) { in notify_1_2() argument 89 mData.put(prepareModelCallback(status, preparedModel)); in notify_1_2()
|