• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef NEURAL_NETWORK_RUNTIME_INNER_MODEL_H
17 #define NEURAL_NETWORK_RUNTIME_INNER_MODEL_H
18 
19 #include <memory>
20 #include <unordered_map>
21 
22 #include "mindir.h"
23 #include "ops_builder.h"
24 #include "interfaces/innerkits/c/neural_network_runtime_inner.h"
25 #include "interfaces/kits/c/neural_network_runtime.h"
26 
27 namespace OHOS {
28 namespace NeuralNetworkRuntime {
29 class InnerModel {
30 public:
31     InnerModel();
32 
33     bool IsBuild() const;
34     OH_NN_ReturnCode BuildFromLiteGraph(const mindspore::lite::LiteGraph* liteGraph);
35     OH_NN_ReturnCode BuildFromMetaGraph(const void* metaGraph, const Buffer& quantBuffer,
36                                         const std::string& modelName);
37     OH_NN_ReturnCode AddTensor(const OH_NN_Tensor& nnTensor);
38     OH_NN_ReturnCode SetTensorValue(uint32_t index, const void* buffer, size_t length);
39     OH_NN_ReturnCode AddOperation(OH_NN_OperationType opType,
40                                   const OH_NN_UInt32Array& paramIndices,
41                                   const OH_NN_UInt32Array& inputIndices,
42                                   const OH_NN_UInt32Array& outputIndices);
43     OH_NN_ReturnCode GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount);
44     OH_NN_ReturnCode SpecifyInputsAndOutputs(
45         const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices);
46     OH_NN_ReturnCode SetInputsAndOutputsInfo(const OH_NN_TensorInfo* inputsInfo, size_t inputSize,
47         const OH_NN_TensorInfo* outputsInfo, size_t outputSize);
48     OH_NN_ReturnCode Build();
49     std::vector<std::shared_ptr<NNTensor>> GetInputTensors() const;
50     std::vector<std::shared_ptr<NNTensor>> GetOutputTensors() const;
51     std::shared_ptr<mindspore::lite::LiteGraph> GetLiteGraphs() const;
52     void* GetMetaGraph() const;
53     Buffer GetQuantBuffer() const;
54     std::string GetModelName() const;
55 
56 private:
57     void AddTensorsToLiteGraph(std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID);
58     OH_NN_ReturnCode AddNodesToLiteGraph(const std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID);
59     OH_NN_ReturnCode ValidateInputAndOutput(
60         const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const;
61     OH_NN_ReturnCode ValidateTensorArray(const OH_NN_UInt32Array& indices) const;
62     OH_NN_ReturnCode CheckParameters() const;
63 
64 private:
65     std::vector<char> m_supportedOperations; // std::vector<bool> not support data(), use std::vector<char> instead.
66     std::vector<uint32_t> m_inputIndices;
67     std::vector<uint32_t> m_outputIndices;
68     std::vector<std::unique_ptr<Ops::OpsBuilder>> m_ops;
69     std::vector<std::shared_ptr<NNTensor>> m_allTensors;
70     std::vector<std::shared_ptr<NNTensor>> m_inputTensors; // Used to pass input tensors to compilation.
71     std::vector<std::shared_ptr<NNTensor>> m_outputTensors; // Used to pass output tensors to compilation.
72     std::shared_ptr<mindspore::lite::LiteGraph> m_liteGraph {nullptr};
73     void* m_metaGraph {nullptr};
74     Buffer m_quantBuffer;
75     std::string m_modelName;
76 };
77 }  // namespace NeuralNetworkRuntime
78 }  // namespace OHOS
79 #endif // NEURAL_NETWORK_RUNTIME_INNER_MODEL_H
80