/packages/modules/NeuralNetworks/common/include/nnapi/ |
D | IPreparedModel.h | 36 class IPreparedModel { 194 virtual ~IPreparedModel() = default; 198 IPreparedModel() = default; 199 IPreparedModel(const IPreparedModel&) = default; 200 IPreparedModel(IPreparedModel&&) noexcept = default; 201 IPreparedModel& operator=(const IPreparedModel&) = default; 202 IPreparedModel& operator=(IPreparedModel&&) noexcept = default;
|
D | Types.h | 50 class IPreparedModel; variable 85 using SharedPreparedModel = std::shared_ptr<const IPreparedModel>;
|
D | IDevice.h | 32 class IPreparedModel; variable
|
D | Validation.h | 69 using PreparedModelRole = std::tuple<const IPreparedModel*, IOType, uint32_t>;
|
/packages/modules/NeuralNetworks/runtime/test/ |
D | PreparedModelCallback.h | 91 const sp<V1_0::IPreparedModel>& preparedModel) override; 117 const sp<V1_2::IPreparedModel>& preparedModel) override; 145 const sp<V1_3::IPreparedModel>& preparedModel) override; 186 sp<V1_0::IPreparedModel> getPreparedModel() const; 197 const sp<V1_0::IPreparedModel>& preparedModel); 204 sp<V1_0::IPreparedModel> mPreparedModel;
|
D | PreparedModelCallback.cpp | 22 bool deadObject, ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) { in notifyInternal() 43 V1_0::ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) { in notify() 48 V1_0::ErrorStatus errorStatus, const sp<V1_2::IPreparedModel>& preparedModel) { in notify_1_2() 53 V1_3::ErrorStatus errorStatus, const sp<V1_3::IPreparedModel>& preparedModel) { in notify_1_3() 71 sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() const { in getPreparedModel()
|
D | TestExecution.cpp | 74 class TestPreparedModelLatest : public V1_3::IPreparedModel { 80 TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) in TestPreparedModelLatest() 82 mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), in TestPreparedModelLatest() 83 mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), in TestPreparedModelLatest() 253 const sp<V1_0::IPreparedModel> mPreparedModelV1_0; 254 const sp<V1_2::IPreparedModel> mPreparedModelV1_2; 255 const sp<V1_3::IPreparedModel> mPreparedModelV1_3; 274 class TestPreparedModel12 : public V1_2::IPreparedModel { 276 TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) in TestPreparedModel12() 306 const sp<V1_3::IPreparedModel> mLatestPreparedModel; [all …]
|
D | TestIntrospectionControl.cpp | 628 class TestPreparedModel12 : public V1_2::IPreparedModel { 660 const sp<V1_3::IPreparedModel> mLatestPreparedModel; 664 class TestPreparedModel10 : public V1_0::IPreparedModel { 675 const sp<V1_3::IPreparedModel> mLatestPreparedModel;
|
/packages/modules/NeuralNetworks/runtime/test/android_fuzzing/ |
D | DriverFuzzTest.cpp | 253 const sp<V1_0::IPreparedModel>& /*preparedModel*/) override { in notify() argument 258 const sp<V1_2::IPreparedModel>& /*preparedModel*/) override { in notify_1_2() argument 263 const sp<V1_3::IPreparedModel>& preparedModel) override { in notify_1_3() 264 const sp<V1_3::IPreparedModel> result = in notify_1_3() 275 const sp<V1_3::IPreparedModel>& getResults() const { in getResults() 286 std::optional<const sp<V1_3::IPreparedModel>> mResults; 289 sp<V1_3::IPreparedModel> prepareModel(const sp<V1_3::IDevice>& device, const V1_3::Model& model) { in prepareModel() 296 void execute(const sp<V1_3::IPreparedModel>& preparedModel, const V1_3::Request& request) { in execute()
|
/packages/modules/NeuralNetworks/common/include/ |
D | AidlValidateHal.h | 32 using AidlHalPreparedModelRole = std::tuple<const aidl_hal::IPreparedModel*, IOType, uint32_t>; 36 const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels, 39 std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
|
D | ValidateHal.h | 33 using HalPreparedModelRole = std::tuple<const V1_3::IPreparedModel*, IOType, uint32_t>; 83 const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, 86 std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
|
D | HalBufferTracker.h | 54 const V1_3::IPreparedModel* preparedModel) const;
|
D | BufferTracker.h | 54 const IPreparedModel* preparedModel) const;
|
D | AidlBufferTracker.h | 54 const aidl_hal::IPreparedModel* preparedModel) const;
|
D | ExecutionBurstServer.h | 308 hardware::neuralnetworks::V1_2::IPreparedModel* preparedModel,
|
D | ExecutionBurstController.h | 294 const sp<hardware::neuralnetworks::V1_2::IPreparedModel>& preparedModel,
|
/packages/modules/NeuralNetworks/common/ |
D | AidlValidateHal.cpp | 38 const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels, in validateMemoryDesc() 41 std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)> in validateMemoryDesc()
|
D | ExecutionBurstServer.cpp | 56 DefaultBurstExecutorWithCache(V1_2::IPreparedModel* preparedModel) in DefaultBurstExecutorWithCache() 107 V1_2::IPreparedModel* const mpPreparedModel; 526 const MQDescriptorSync<FmqResultDatum>& resultChannel, V1_2::IPreparedModel* preparedModel, in create()
|
/packages/modules/NeuralNetworks/driver/sample/ |
D | SampleDriver.h | 112 const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, 131 class SamplePreparedModel : public V1_3::IPreparedModel {
|
D | CanonicalPreparedModel.h | 34 class PreparedModel final : public IPreparedModel,
|
/packages/modules/NeuralNetworks/driver/sample_aidl/ |
D | SampleDriverUtils.h | 33 const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel);
|
D | SampleDriver.cpp | 131 const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) { in castToSamplePreparedModel() 147 constexpr auto getModel = [](const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) in allocate() 157 std::vector<std::shared_ptr<aidl_hal::IPreparedModel>> preparedModels; in allocate()
|
D | SampleDriverUtils.cpp | 40 const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) { in notify()
|
/packages/modules/NeuralNetworks/tools/api/ |
D | Types.t | 52 class IPreparedModel; 78 using SharedPreparedModel = std::shared_ptr<const IPreparedModel>;
|
/packages/modules/NeuralNetworks/shim_and_sl/ |
D | ShimDevice.cpp | 88 const ShimPreparedModel* castToShimPreparedModel(IPreparedModel* preparedModel) { in castToShimPreparedModel() 342 constexpr auto getCompilation = [](IPreparedModel* preparedModel) -> const ShimPreparedModel* { in allocate()
|