• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_H
17 #define NEURAL_NETWORK_RUNTIME_EXECUTOR_H
18 
19 #include "compilation.h"
20 #include "execution_plan.h"
21 #include "nn_tensor.h"
22 #include "interfaces/kits/c/neural_network_runtime.h"
23 #include "device.h"
24 
25 namespace OHOS {
26 namespace NeuralNetworkRuntime {
27 class Executor {
28 public:
29     explicit Executor(const Compilation* compilation);
30     ~Executor();
31 
32     OH_NN_ReturnCode SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length);
33     OH_NN_ReturnCode SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory);
34     OH_NN_ReturnCode SetOutput(uint32_t index, void* buffer, size_t length);
35     OH_NN_ReturnCode SetOutputFromMemory(uint32_t index, const OH_NN_Memory& memory);
36     OH_NN_ReturnCode GetOutputShape(uint32_t index, int32_t** dimensions, uint32_t& dimensionCount);
37 
38     OH_NN_ReturnCode CreateInputMemory(uint32_t index, size_t length, OH_NN_Memory** memory);
39     OH_NN_ReturnCode CreateOutputMemory(uint32_t index, size_t length, OH_NN_Memory** memory);
40     OH_NN_ReturnCode DestroyInputMemory(uint32_t index, OH_NN_Memory** memory);
41     OH_NN_ReturnCode DestroyOutputMemory(uint32_t index, OH_NN_Memory** memory);
42 
43     OH_NN_ReturnCode Run();
44 
45 private:
46     OH_NN_ReturnCode BuildInputTensor(uint32_t index, const OH_NN_Tensor& nnTensor,
47                                       std::shared_ptr<NNTensor> inputTensor) const;
48     OH_NN_ReturnCode SetInputTensorWithCurrentBuffer(uint32_t index, std::shared_ptr<NNTensor> inputTensor,
49                                                      const void* buffer, size_t dataLength, size_t curBufferLength);
50     void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr<NNTensor> inputTensor,
51                                      const void* inputBuffer, size_t length, bool isInnerMem);
52 
53 private:
54     struct ExeTensor {
55         std::shared_ptr<NNTensor> tensor;
56         void* userBuffer;
57         size_t userBufferLength;
58         bool isInnerMem;
59     };
60     bool m_isRun {false};
61     std::vector<std::shared_ptr<NNTensor>> m_modelInputs;
62     std::vector<std::shared_ptr<NNTensor>> m_modelOutputs;
63     std::shared_ptr<ExecutionPlan> m_executionPlan {nullptr};
64     std::unordered_map<int, std::vector<int32_t>> m_outputDimensions;
65     std::unordered_map<int, ExeTensor> m_inputTensors;
66     std::unordered_map<int, ExeTensor> m_outputTensors;
67     std::unordered_map<int, std::vector<void*>> m_inputCreatedMem;
68     std::unordered_map<int, std::vector<void*>> m_outputCreatedMem;
69 };
70 } // namespace NeuralNetworkRuntime
71 } // namespace OHOS
72 #endif