1 /** 2 * Copyright 2022 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #ifndef LITE_NNRT_MODEL_KERNEL_H 17 #define LITE_NNRT_MODEL_KERNEL_H 18 #include <vector> 19 #include <queue> 20 #include <map> 21 #include <utility> 22 #include "include/api/kernel.h" 23 #include "neural_network_runtime/neural_network_runtime.h" 24 #include "src/common/log_adapter.h" 25 #include "src/litert/inner_context.h" 26 #include "include/errorcode.h" 27 28 namespace mindspore { 29 30 class NNRTModelKernel : public kernel::Kernel { 31 /** 32 * Because nnr can't run single op, but the whole model. So we decide to make the whole model into one kernel. 33 * */ 34 public: NNRTModelKernel(OH_NNExecutor * oh_nn_executor,lite::NNRtDeviceInfo nnrt_device_info,const std::vector<mindspore::MSTensor> & inputs,const std::vector<mindspore::MSTensor> & outputs)35 NNRTModelKernel(OH_NNExecutor *oh_nn_executor, lite::NNRtDeviceInfo nnrt_device_info, const std::vector<mindspore::MSTensor> &inputs, 36 const std::vector<mindspore::MSTensor> &outputs) 37 : kernel::Kernel(inputs, outputs, nullptr, nullptr), oh_nn_executor_(oh_nn_executor), nnrt_device_info_(nnrt_device_info) {} 38 int Prepare() override; 39 int Execute() override; 40 int ReSize() override; 41 int SetInputs(); 42 int SetOutputs(); 43 void FreeNNTensor(); ~NNRTModelKernel()44 ~NNRTModelKernel() override { 45 if (!zero_copy_) { 46 FreeNNTensor(); 47 } 48 MS_LOG(INFO) << "NNRTModelKernel Destroy."; 49 } 50 51 protected: 52 OH_NNExecutor *oh_nn_executor_ = nullptr; 53 lite::NNRtDeviceInfo nnrt_device_info_; 54 std::vector<NN_Tensor *> nn_input_tensors_; 55 std::vector<NN_TensorDesc *> nn_input_tensor_descs_; 56 std::vector<NN_Tensor *> nn_output_tensors_; 57 std::vector<NN_TensorDesc *> nn_output_tensor_descs_; 58 59 private: 60 bool zero_copy_{false}; 61 }; 62 } // namespace mindspore 63 64 #endif // LITE_NNRTT_MODEL_KERNEL_H 65