Home
last modified time | relevance | path

Searched refs:preparedModel (Results 1 – 25 of 35) sorted by relevance

12

/packages/modules/NeuralNetworks/runtime/test/
DPreparedModelCallback.cpp22 bool deadObject, ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) { in notifyInternal() argument
34 mPreparedModel = preparedModel; in notifyInternal()
43 V1_0::ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) { in notify() argument
44 return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel); in notify()
48 V1_0::ErrorStatus errorStatus, const sp<V1_2::IPreparedModel>& preparedModel) { in notify_1_2() argument
49 return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel); in notify_1_2()
53 V1_3::ErrorStatus errorStatus, const sp<V1_3::IPreparedModel>& preparedModel) { in notify_1_3() argument
54 return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel); in notify_1_3()
DPreparedModelCallback.h91 const sp<V1_0::IPreparedModel>& preparedModel) override;
117 const sp<V1_2::IPreparedModel>& preparedModel) override;
145 const sp<V1_3::IPreparedModel>& preparedModel) override;
197 const sp<V1_0::IPreparedModel>& preparedModel);
DTestExecution.cpp80 TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) in TestPreparedModelLatest() argument
81 : mPreparedModelV1_0(preparedModel), in TestPreparedModelLatest()
82 mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), in TestPreparedModelLatest()
83 mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), in TestPreparedModelLatest()
276 TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) in TestPreparedModel12() argument
277 : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {} in TestPreparedModel12()
312 TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) in TestPreparedModel10() argument
313 : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {} in TestPreparedModel10()
/packages/modules/NeuralNetworks/driver/sample_hidl/
DSampleDriverUtils.cpp47 const sp<SamplePreparedModel>& preparedModel) { in notify() argument
48 const auto ret = callback->notify(convertToV1_0(status), preparedModel); in notify()
55 const sp<SamplePreparedModel>& preparedModel) { in notify() argument
56 const auto ret = callback->notify_1_2(convertToV1_0(status), preparedModel); in notify()
64 const sp<SamplePreparedModel>& preparedModel) { in notify() argument
65 const auto ret = callback->notify_1_3(status, preparedModel); in notify()
DSampleDriverUtils.h39 const sp<SamplePreparedModel>& preparedModel);
42 const sp<SamplePreparedModel>& preparedModel);
45 const sp<SamplePreparedModel>& preparedModel);
88 sp<SamplePreparedModel> preparedModel =
90 if (!preparedModel->initialize()) {
94 notify(callback, V1_3::ErrorStatus::NONE, preparedModel);
DSampleDriver.cpp237 const sp<V1_3::IPreparedModel>& preparedModel) { in castToSamplePreparedModel() argument
238 if (preparedModel->isRemote()) { in castToSamplePreparedModel()
243 return static_cast<const SamplePreparedModel*>(preparedModel.get()); in castToSamplePreparedModel()
257 auto getModel = [](const sp<V1_3::IPreparedModel>& preparedModel) -> const V1_3::Model* { in allocate() argument
258 const auto* samplePreparedModel = castToSamplePreparedModel(preparedModel); in allocate()
372 const SamplePreparedModel* preparedModel) { in createRunTimePoolInfos() argument
396 i, uncheckedConvert(request), preparedModel)); in createRunTimePoolInfos()
451 const SamplePreparedModel* preparedModel, in asyncExecute() argument
459 createRunTimePoolInfos(request, driver, preparedModel); in asyncExecute()
507 const SamplePreparedModel* preparedModel, in executeBase() argument
[all …]
/packages/modules/NeuralNetworks/shim_and_sl/
DShimDevice.cpp89 const ShimPreparedModel* castToShimPreparedModel(IPreparedModel* preparedModel) { in castToShimPreparedModel() argument
90 if (preparedModel->isRemote()) { in castToShimPreparedModel()
95 return static_cast<const ShimPreparedModel*>(preparedModel); in castToShimPreparedModel()
343 constexpr auto getCompilation = [](IPreparedModel* preparedModel) -> const ShimPreparedModel* { in allocate() argument
344 const auto* samplePreparedModel = castToShimPreparedModel(preparedModel); in allocate()
361 auto preparedModel = preparedModels[role.modelIndex]; in allocate() local
362 if (preparedModel.preparedModel == nullptr) { in allocate()
367 auto pmodel = getCompilation(preparedModel.preparedModel.get()); in allocate()
399 auto preparedModel = preparedModels[role.modelIndex]; in allocate() local
400 if (preparedModel.preparedModel == nullptr) { in allocate()
[all …]
/packages/modules/NeuralNetworks/driver/sample_aidl/
DSampleDriverAidlUtils.cpp60 const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) { in notify() argument
61 const auto ret = callback->notify(status, preparedModel); in notify()
140 std::shared_ptr<SamplePreparedModel> preparedModel = in prepareModelBase() local
143 if (!preparedModel->initialize()) { in prepareModelBase()
147 notify(callback, aidl_hal::ErrorStatus::NONE, preparedModel); in prepareModelBase()
DSampleDriverAidl.cpp148 const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) { in castToSamplePreparedModel() argument
149 if (preparedModel->isRemote()) { in castToSamplePreparedModel()
154 return static_cast<const SamplePreparedModel*>(preparedModel.get()); in castToSamplePreparedModel()
164 constexpr auto getModel = [](const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) in allocate()
166 const auto* samplePreparedModel = castToSamplePreparedModel(preparedModel); in allocate()
177 preparedModels.push_back(halPreparedModelParcel.preparedModel); in allocate()
317 const SamplePreparedModel* preparedModel) { in createRunTimePoolInfos() argument
339 bufferWrapper->validateRequest(i, request, preparedModel)) in createRunTimePoolInfos()
625 SampleBurst::SampleBurst(std::shared_ptr<SamplePreparedModel> preparedModel) in SampleBurst() argument
626 : kPreparedModel(std::move(preparedModel)) { in SampleBurst()
DSampleDriverAidl.h173 explicit SampleBurst(std::shared_ptr<SamplePreparedModel> preparedModel);
193 SampleExecution(std::shared_ptr<SamplePreparedModel> preparedModel, aidl_hal::Request request, in SampleExecution() argument
195 : kPreparedModel(std::move(preparedModel)), in SampleExecution()
DSampleDriverAidlUtils.h39 const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel);
/packages/modules/NeuralNetworks/runtime/test/android_fuzzing/
DDriverFuzzTest.cpp58 const auto preparedModel = NN_TRY(device->prepareModel( in runTest() local
62 CHECK(preparedModel != nullptr); in runTest()
68 NN_TRY(preparedModel->execute(request, MeasureTiming::YES, /*deadline=*/{}, in runTest()
/packages/modules/NeuralNetworks/runtime/
DMemory.cpp330 auto callback = [&roles](const auto* preparedModel, IOType type, uint32_t index) { in addRole() argument
331 roles.emplace_back(preparedModel, type, index); in addRole()
384 for (const auto& [preparedModel, type, ind] : roles) { in addRole()
385 uint32_t modelIndex = mDesc.preparedModels.add(preparedModel); in addRole()
423 for (const auto* preparedModel : desc.preparedModels) { in logMemoryDescriptorToInfo() local
424 LOG(INFO) << " service = " << preparedModel->getDevice()->getName(); in logMemoryDescriptorToInfo()
439 for (const auto* preparedModel : desc.preparedModels) { in getDevices() local
440 const auto* device = preparedModel->getDevice(); in getDevices()
DManager.cpp182 DriverPreparedModel(const Device* device, const SharedPreparedModel& preparedModel) in DriverPreparedModel() argument
183 : mDevice(device), mPreparedModel(preparedModel) { in DriverPreparedModel()
528 SharedPreparedModel preparedModel = std::move(result).value(); in prepareModel() local
529 CHECK(preparedModel != nullptr) in prepareModel()
532 std::make_shared<DriverPreparedModel>(this, std::move(preparedModel))}; in prepareModel()
540 [](const auto* preparedModel) { in allocate() argument
541 const auto versionedPreparedModel = preparedModel->getInterface(); in allocate()
977 CpuExecution(const CpuPreparedModel& preparedModel, Request request, in CpuExecution() argument
980 : kPreparedModel(preparedModel), in CpuExecution()
1069 std::shared_ptr<RuntimePreparedModel> preparedModel = in create() local
[all …]
/packages/modules/NeuralNetworks/driver/sample/
DCanonicalBurst.cpp33 Burst::Burst(std::shared_ptr<const PreparedModel> preparedModel) in Burst() argument
34 : kPreparedModel(std::move(preparedModel)) { in Burst()
DCanonicalDevice.cpp213 auto getModel = [](const SharedPreparedModel& preparedModel) -> const Model* { in allocate() argument
214 std::any resource = preparedModel->getUnderlyingResource(); in allocate()
DCanonicalBurst.h37 explicit Burst(std::shared_ptr<const PreparedModel> preparedModel);
DCanonicalPreparedModel.cpp40 const PreparedModel& preparedModel) { in createRunTimePoolInfos() argument
61 bufferWrapper->validateRequest(i, request, &preparedModel); in createRunTimePoolInfos()
/packages/modules/NeuralNetworks/common/include/
DDefaultExecution.h34 DefaultExecution(SharedPreparedModel preparedModel, Request request, MeasureTiming measure, in DefaultExecution() argument
36 : kPreparedModel(std::move(preparedModel)), in DefaultExecution()
DHalBufferTracker.h54 const V1_3::IPreparedModel* preparedModel) const;
/packages/modules/NeuralNetworks/common/
DHalBufferTracker.cpp61 const V1_3::IPreparedModel* preparedModel) const { in validateRequest()
71 if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) { in validateRequest()
99 if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) { in validateRequest()
DExecutionBurstServer.cpp56 DefaultBurstExecutorWithCache(V1_2::IPreparedModel* preparedModel) in DefaultBurstExecutorWithCache() argument
57 : mpPreparedModel(preparedModel) {} in DefaultBurstExecutorWithCache()
532 const MQDescriptorSync<FmqResultDatum>& resultChannel, V1_2::IPreparedModel* preparedModel, in create() argument
535 if (preparedModel == nullptr) { in create()
542 std::make_shared<DefaultBurstExecutorWithCache>(preparedModel); in create()
DBufferTracker.cpp61 const IPreparedModel* preparedModel) const { in validateRequest()
71 if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) { in validateRequest()
98 if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) { in validateRequest()
DValidateHal.cpp884 const auto& preparedModel = preparedModels[role.modelIndex]; in validateMemoryDesc() local
885 NN_RET_CHECK(preparedModel != nullptr); in validateMemoryDesc()
886 const auto* model = getModel(preparedModel); in validateMemoryDesc()
892 const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex); in validateMemoryDesc()
898 const auto& preparedModel = preparedModels[role.modelIndex]; in validateMemoryDesc() local
899 NN_RET_CHECK(preparedModel != nullptr); in validateMemoryDesc()
900 const auto* model = getModel(preparedModel); in validateMemoryDesc()
906 const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex); in validateMemoryDesc()
DExecutionBurstController.cpp485 const sp<V1_2::IPreparedModel>& preparedModel, in create() argument
488 if (preparedModel == nullptr) { in create()
516 const hardware::Return<void> ret = preparedModel->configureExecutionBurst( in create()

12