1 /* 2 * Copyright (c) 2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef NEURAL_NETWORK_RUNTIME_EXECUTION_PLAN_H 17 #define NEURAL_NETWORK_RUNTIME_EXECUTION_PLAN_H 18 19 #include "frameworks/native/nn_tensor.h" 20 #include "interfaces/kits/c/neural_network_runtime_type.h" 21 #include "prepared_model.h" 22 #include "device.h" 23 24 25 namespace OHOS { 26 namespace NeuralNetworkRuntime { 27 class ExecutionPlan { 28 public: ExecutionPlan(std::shared_ptr<PreparedModel> preparedModel,std::shared_ptr<Device> device)29 ExecutionPlan(std::shared_ptr<PreparedModel> preparedModel, std::shared_ptr<Device> device) 30 : m_preparedModel(preparedModel), 31 m_device(device) {}; 32 33 OH_NN_ReturnCode Run(const std::vector<std::shared_ptr<NNTensor>>& inputTensors, 34 std::vector<std::shared_ptr<NNTensor>>& outputTensors); 35 36 std::shared_ptr<Device> GetInputDevice() const; 37 std::shared_ptr<Device> GetOutputDevice() const; 38 39 private: 40 std::shared_ptr<PreparedModel> m_preparedModel {nullptr}; 41 std::shared_ptr<Device> m_device {nullptr}; 42 }; 43 } // namespace NeuralNetworkRuntime 44 } // namespace OHOS 45 #endif