• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "inner_model.h"
17 
18 #include <new>
19 #include <unordered_map>
20 #include <vector>
21 
22 #include "securec.h"
23 
24 #include "common/utils.h"
25 #include "common/scoped_trace.h"
26 #include "device_manager.h"
27 #include "validation.h"
28 #include "ops_builder.h"
29 #include "ops_registry.h"
30 #include "transform.h"
31 
32 namespace MSLITE = mindspore::lite;
33 
34 namespace OHOS {
35 namespace NeuralNetworkRuntime {
36 const std::string NNR_MODEL = "NNR_Model";
37 const std::string LOADED_NNR_MODEL = "Loaded_NNR_Model";
38 
39 namespace {
40 class LiteGraphDeleter {
41 public:
operator ()(MSLITE::LiteGraph * liteGraph) const42     void operator()(MSLITE::LiteGraph* liteGraph) const
43     {
44         MindIR_LiteGraph_Destroy(&liteGraph);
45     }
46 };
47 
ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)48 std::shared_ptr<NNTensor> ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)
49 {
50     MSLITE::DataType msDataType = MSLITE::MindIR_Tensor_GetDataType(msTensor);
51     OH_NN_DataType dataType = MSToNN::TransformDataType(msDataType);
52     std::vector<int32_t> msDims = MSLITE::MindIR_Tensor_GetDims(msTensor);
53     std::vector<MSLITE::QuantParam> msQuantParams = MSLITE::MindIR_Tensor_GetQuantParams(msTensor);
54     std::vector<QuantParam> nnQuantParams = MSToNN::TransformQuantParams(msQuantParams);
55     OH_NN_Format nnFormat = MSToNN::TransformFormat(MSLITE::MindIR_Tensor_GetFormat(msTensor));
56 
57     std::shared_ptr<NNTensor> nnTensor = CreateSharedPtr<NNTensor>();
58     if (nnTensor == nullptr) {
59         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when creating NNTensor.");
60         return nullptr;
61     }
62 
63     OH_NN_ReturnCode ret = nnTensor->Build(dataType, msDims, nnQuantParams, OH_NN_TENSOR);
64     if (ret != OH_NN_SUCCESS) {
65         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when building NNTensor with attributes.");
66         return nullptr;
67     }
68 
69     nnTensor->SetFormat(nnFormat);
70 
71     return nnTensor;
72 }
73 
ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph * liteGraph,const std::vector<uint32_t> & indices,std::vector<std::shared_ptr<NNTensor>> & nnTensors)74 OH_NN_ReturnCode ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph* liteGraph,
75                                                  const std::vector<uint32_t>& indices,
76                                                  std::vector<std::shared_ptr<NNTensor>>& nnTensors)
77 {
78     if (indices.empty()) {
79         LOGE("ConstructNNTensorsFromLiteGraph failed, passed empty indices list.");
80         return OH_NN_INVALID_PARAMETER;
81     }
82 
83     uint32_t maximumIndex = *(std::max_element(indices.begin(), indices.end()));
84     if (maximumIndex >= liteGraph->all_tensors_.size()) {
85         LOGE("ConstructNNTensorsFromLiteGraph failed, index exceed size of all_tensors inside liteGraph.");
86         return OH_NN_INVALID_PARAMETER;
87     }
88 
89     std::shared_ptr<NNTensor> nnTensor;
90     for (uint32_t i : indices) {
91         nnTensor = ConstructNNTensorFromLiteGraphTensor(liteGraph->all_tensors_[i]);
92         if (nnTensor == nullptr) {
93             LOGE("ConstructNNTensorsFromLiteGraph failed, failed to construct NNTensor from LiteGraphTensor.");
94             return OH_NN_NULL_PTR;
95         }
96 
97         nnTensors.emplace_back(nnTensor);
98     }
99 
100     return OH_NN_SUCCESS;
101 }
102 } // anonymous namespace
103 
InnerModel()104 InnerModel::InnerModel() {}
105 
IsBuild() const106 bool InnerModel::IsBuild() const
107 {
108     return ((m_liteGraph != nullptr) || (m_metaGraph != nullptr));
109 }
110 
BuildFromLiteGraph(const MSLITE::LiteGraph * liteGraph)111 OH_NN_ReturnCode InnerModel::BuildFromLiteGraph(const MSLITE::LiteGraph* liteGraph)
112 {
113     NNRT_TRACE_NAME("Build model from lite graph");
114     if (liteGraph == nullptr) {
115         LOGE("BuildFromLiteGraph failed, passed empty liteGraph.");
116         return OH_NN_INVALID_PARAMETER;
117     }
118 
119     if (IsBuild()) {
120         LOGE("BuildFromLiteGraph failed, inner model has been built or loaded before.");
121         return OH_NN_OPERATION_FORBIDDEN;
122     }
123 
124     if (!m_allTensors.empty() || !m_ops.empty()) {
125         LOGE("BuildFromLiteGraph failed, please LoadLiteGraph without adding tensor and operations.");
126         return OH_NN_OPERATION_FORBIDDEN;
127     }
128 
129     m_inputTensors.clear();
130     OH_NN_ReturnCode ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->input_indices_, m_inputTensors);
131     if (ret != OH_NN_SUCCESS) {
132         LOGE("BuildFromLiteGraph failed, error happened when constructing input NNTensors from liteGraph.");
133         return ret;
134     }
135 
136     m_outputTensors.clear();
137     ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->output_indices_, m_outputTensors);
138     if (ret != OH_NN_SUCCESS) {
139         LOGE("BuildFromLiteGraph failed, error happened when constructing output NNTensors from liteGraph.");
140         return ret;
141     }
142 
143     m_liteGraph.reset(const_cast<MSLITE::LiteGraph*>(liteGraph), LiteGraphDeleter());
144     m_liteGraph->name_ = LOADED_NNR_MODEL;
145 
146     return OH_NN_SUCCESS;
147 }
148 
BuildFromMetaGraph(const void * metaGraph,const Buffer & quantBuffer,const std::string & modelName)149 OH_NN_ReturnCode InnerModel::BuildFromMetaGraph(
150     const void* metaGraph, const Buffer& quantBuffer, const std::string& modelName)
151 {
152     NNRT_TRACE_NAME("Build model from meta graph");
153     if (metaGraph == nullptr) {
154         LOGE("BuildFromMetaGraph failed, passed empty metaGraph.");
155         return OH_NN_INVALID_PARAMETER;
156     }
157 
158     if (IsBuild()) {
159         LOGE("BuildFromMetaGraph failed, inner model has been built or loaded before.");
160         return OH_NN_OPERATION_FORBIDDEN;
161     }
162 
163     if (m_allTensors.empty()) {
164         LOGE("BuildFromMetaGraph failed, SetInputsAndOutputsInfo should be called before building metaGraph.");
165         return OH_NN_OPERATION_FORBIDDEN;
166     }
167 
168     m_metaGraph = const_cast<void*>(metaGraph);
169     m_quantBuffer = quantBuffer;
170     m_modelName = modelName;
171 
172     return OH_NN_SUCCESS;
173 }
174 
AddTensor(const OH_NN_Tensor & nnTensor)175 OH_NN_ReturnCode InnerModel::AddTensor(const OH_NN_Tensor& nnTensor)
176 {
177     if (IsBuild()) {
178         LOGE("AddTensor failed, AddTensor is forbidden after model has been built.");
179         return OH_NN_OPERATION_FORBIDDEN;
180     }
181 
182     std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
183     if (tensor == nullptr) {
184         LOGE("AddTensor failed, error happened when creating NNTensor.");
185         return OH_NN_MEMORY_ERROR;
186     }
187 
188     OH_NN_ReturnCode ret = tensor->BuildFromOHNNTensor(nnTensor);
189     if (ret != OH_NN_SUCCESS) {
190         LOGE("AddTensor failed, error happened when build NNTensor from OH_NN_Tensor.");
191         return ret;
192     }
193 
194     // The NNTensor is named as "Tensor: <tensor index>"".
195     tensor->SetName("Tensor: " + std::to_string(m_allTensors.size()));
196     m_allTensors.emplace_back(tensor);
197 
198     return OH_NN_SUCCESS;
199 }
200 
201 // DOTO: 圈复杂度待优化
SetTensorValue(uint32_t index,const void * buffer,size_t length)202 OH_NN_ReturnCode InnerModel::SetTensorValue(uint32_t index, const void* buffer, size_t length)
203 {
204     if (IsBuild()) {
205         LOGE("SetTensorValue failed, SetTensorValue is forbidden after model has been built.");
206         return OH_NN_OPERATION_FORBIDDEN;
207     }
208 
209     if (index >= m_allTensors.size()) {
210         LOGE("SetTensorValue failed, passed index %u out of the number of added tensors.", index);
211         return OH_NN_INVALID_PARAMETER;
212     }
213 
214     const std::shared_ptr<NNTensor> tensor = m_allTensors[index];
215     if (tensor->GetBuffer() != nullptr) {
216         LOGE("SetTensorValue failed, tensor has been set value twice. Tensor index: %u.", index);
217         return OH_NN_INVALID_PARAMETER;
218     }
219 
220     if (buffer == nullptr) {
221         LOGW("SetTensorValue passed empty buffer, which makes no effect.");
222         return OH_NN_SUCCESS;
223     }
224 
225     if (tensor->IsDynamicShape()) {
226         LOGE("SetTensorValue failed, cannot set value to tensor with dynamic shape.");
227         return OH_NN_OPERATION_FORBIDDEN;
228     }
229 
230     if (length != tensor->GetDataLength()) {
231         LOGE("SetTensorValue failed, get buffer length %zu different from the byte size of tensor %zu.",
232              length, tensor->GetDataLength());
233         return OH_NN_INVALID_PARAMETER;
234     }
235 
236     // Data will be released inside NNTensor if it is set inside NNTensor using SetBuffer().
237     void* data = new (std::nothrow) char[length];
238     if (data == nullptr) {
239         LOGE("SetTensorValue failed, please check whether it runs out of memory.");
240         return OH_NN_MEMORY_ERROR;
241     }
242 
243     errno_t ret = memcpy_s(data, length, buffer, length);
244     if (ret != EOK) {
245         LOGE("SetTensorValue failed, please the information of error number %d from memcpy_s.", ret);
246         delete [] reinterpret_cast<char*>(data);
247         return OH_NN_FAILED;
248     }
249 
250     tensor->SetBuffer(data, length);
251     return OH_NN_SUCCESS;
252 }
253 
ValidateInputAndOutput(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices) const254 OH_NN_ReturnCode InnerModel::ValidateInputAndOutput(
255     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const
256 {
257     OH_NN_ReturnCode ret = ValidateTensorArray(inputIndices);
258     if (ret != OH_NN_SUCCESS) {
259         LOGE("ValidateInputAndOutput failed, please check input indices.");
260         return ret;
261     }
262 
263     ret = ValidateTensorArray(outputIndices);
264     if (ret != OH_NN_SUCCESS) {
265         LOGE("ValidateInputAndOutput failed, please check output indices.");
266         return ret;
267     }
268 
269     if (inputIndices.size == 0) {
270         LOGE("ValidateInputAndOutput failed, passed empty input indices.");
271         return OH_NN_INVALID_PARAMETER;
272     }
273 
274     if (outputIndices.size == 0) {
275         LOGE("ValidateInputAndOutput failed, passed empty output indices.");
276         return OH_NN_INVALID_PARAMETER;
277     }
278 
279     std::shared_ptr<NNTensor> tensor{nullptr};
280     for (uint32_t i = 0; i < inputIndices.size; i++) {
281         tensor = m_allTensors[inputIndices.data[i]];
282         if (tensor->GetType() != OH_NN_TENSOR) {
283             LOGE("ValidateInputAndOutput failed, tensor set as input should has type of OH_NN_TENSOR, but receive %d."
284                  "Tensor index: %u.", tensor->GetType(), i);
285             return OH_NN_INVALID_PARAMETER;
286         }
287     }
288 
289     for (uint32_t i = 0; i < outputIndices.size; i++) {
290         tensor = m_allTensors[outputIndices.data[i]];
291         if (tensor->GetType() != OH_NN_TENSOR) {
292             LOGE("ValidateInputAndOutput failed, tensor set as output should has type of OH_NN_TENSOR, but receive %d."
293                  "Tensor index: %u.", tensor->GetType(), i);
294             return OH_NN_INVALID_PARAMETER;
295         }
296     }
297 
298     // The number of inputIndices and outputIndices are usually small, so O(n**2) iteration is fine.
299     for (uint32_t i = 0; i < inputIndices.size; i++) {
300         for (uint32_t j = 0; j < outputIndices.size; j++) {
301             if (inputIndices.data[i] == outputIndices.data[j]) {
302                 LOGE("ValidateInputAndOutput failed, should not set an tensor as input and output at the same time, "
303                      "input index %u, output index %u", inputIndices.data[i], outputIndices.data[j]);
304                 return OH_NN_INVALID_PARAMETER;
305             }
306         }
307     }
308     return OH_NN_SUCCESS;
309 }
310 
311 /* Check whether the indices exceed the number of added tensors. */
ValidateTensorArray(const OH_NN_UInt32Array & indices) const312 OH_NN_ReturnCode InnerModel::ValidateTensorArray(const OH_NN_UInt32Array& indices) const
313 {
314     OH_NN_ReturnCode ret = Validation::ValidateArray(indices.data, indices.size);
315     if (ret != OH_NN_SUCCESS) {
316         LOGE("ValidateTensorArray failed, please check the validity of indices.");
317         return ret;
318     }
319 
320     size_t allTensorsSize = m_allTensors.size();
321     for (uint32_t i = 0; i < indices.size; i++) {
322         if (indices.data[i] >= allTensorsSize) {
323             LOGE("ValidateTensors failed, index %u is out of the number of added tensors.", indices.data[i]);
324             return OH_NN_INVALID_PARAMETER;
325         }
326     }
327 
328     return OH_NN_SUCCESS;
329 }
330 
AddOperation(OH_NN_OperationType opType,const OH_NN_UInt32Array & paramIndices,const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)331 OH_NN_ReturnCode InnerModel::AddOperation(OH_NN_OperationType opType, const OH_NN_UInt32Array& paramIndices,
332                                           const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
333 {
334     if (IsBuild()) {
335         LOGE("AddOperation failed, AddOperation is forbidden after model has been built.");
336         return OH_NN_OPERATION_FORBIDDEN;
337     }
338 
339     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
340     if (ret != OH_NN_SUCCESS) {
341         LOGE("AddOperation failed, please check inputIndices and outputIndices.");
342         return ret;
343     }
344     std::vector<uint32_t> inputs = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
345     std::vector<uint32_t> outputs = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
346 
347     ret = ValidateTensorArray(paramIndices);
348     if (ret != OH_NN_SUCCESS) {
349         LOGE("AddOperation failed, please check paramIndices.");
350         return ret;
351     }
352     std::vector<uint32_t> parameters = ConstructVectorFromArray(paramIndices.data, paramIndices.size);
353 
354     Ops::OpsRegistry& opsRegistry = Ops::OpsRegistry::GetSingleton();
355     std::unique_ptr<Ops::OpsBuilder> opsBuilder = opsRegistry.GetOpsBuilder(opType);
356     if (opsBuilder == nullptr) {
357         LOGE("AddOperation failed, cannot add operation of type: %d.", opType);
358         return OH_NN_INVALID_PARAMETER;
359     }
360 
361     ret = opsBuilder->Build(parameters, inputs, outputs, m_allTensors);
362     if (ret != OH_NN_SUCCESS) {
363         LOGE("AddOperation failed, error happens when build operations.");
364         return ret;
365     }
366 
367     m_ops.emplace_back(std::move(opsBuilder));
368     return OH_NN_SUCCESS;
369 }
370 
SpecifyInputsAndOutputs(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)371 OH_NN_ReturnCode InnerModel::SpecifyInputsAndOutputs(
372     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
373 {
374     if (IsBuild()) {
375         LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs is forbidden after model has been built.");
376         return OH_NN_OPERATION_FORBIDDEN;
377     }
378 
379     if (!m_inputTensors.empty()) {
380         LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs should not be called twice.");
381         return OH_NN_OPERATION_FORBIDDEN;
382     }
383 
384     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
385     if (ret != OH_NN_SUCCESS) {
386         LOGE("SpecifyInputsAndOutputs failed, please check inputIndices and outputIndices.");
387         return ret;
388     }
389 
390     m_inputIndices = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
391     m_outputIndices = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
392 
393     for (uint32_t i : m_inputIndices) {
394         m_inputTensors.emplace_back(m_allTensors[i]);
395     }
396 
397     for (uint32_t i : m_outputIndices) {
398         m_outputTensors.emplace_back(m_allTensors[i]);
399     }
400 
401     return OH_NN_SUCCESS;
402 }
403 
CheckParameters() const404 OH_NN_ReturnCode InnerModel::CheckParameters() const
405 {
406     if (m_liteGraph != nullptr) {
407         LOGE("CheckParameters failed, liteGraph is not nullptr.");
408         return OH_NN_OPERATION_FORBIDDEN;
409     }
410 
411     if (m_metaGraph != nullptr) {
412         LOGE("CheckParameters failed, metaGraph is not nullptr.");
413         return OH_NN_OPERATION_FORBIDDEN;
414     }
415 
416     if (!m_allTensors.empty()) {
417         LOGE("CheckParameters failed, m_allTensors is not empty.");
418         return OH_NN_OPERATION_FORBIDDEN;
419     }
420 
421     if (!(m_inputTensors.empty() && (m_inputIndices.empty()))) {
422         LOGE("CheckParameters failed, m_inputTensors is not empty.");
423         return OH_NN_OPERATION_FORBIDDEN;
424     }
425 
426     if (!(m_outputTensors.empty() && (m_outputIndices.empty()))) {
427         LOGE("CheckParameters failed, m_outputTensors is not empty.");
428         return OH_NN_OPERATION_FORBIDDEN;
429     }
430 
431     return OH_NN_SUCCESS;
432 }
433 
SetInputsAndOutputsInfo(const OH_NN_TensorInfo * inputsInfo,size_t inputSize,const OH_NN_TensorInfo * outputsInfo,size_t outputSize)434 OH_NN_ReturnCode InnerModel::SetInputsAndOutputsInfo(const OH_NN_TensorInfo* inputsInfo, size_t inputSize,
435     const OH_NN_TensorInfo* outputsInfo, size_t outputSize)
436 {
437     OH_NN_ReturnCode ret = CheckParameters();
438     if (ret != OH_NN_SUCCESS) {
439         LOGE("SetInputsAndOutputsInfo failed, error happened when checking parameters.");
440         return ret;
441     }
442 
443     // 根据inputsInfo设置输入NNTensor
444     for (size_t i = 0; i < inputSize; ++i) {
445         std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
446         if (tensor == nullptr) {
447             LOGE("SetInputsAndOutputsInfo failed, error happened when creating input NNTensor.");
448             return OH_NN_MEMORY_ERROR;
449         }
450 
451         ret = tensor->BuildFromOHNNTensorInfo(inputsInfo[i]);
452         if (ret != OH_NN_SUCCESS) {
453             LOGE("SetInputsAndOutputsInfo failed, error happened when building input NNTensor from info.");
454             return ret;
455         }
456         m_inputIndices.emplace_back(i);
457         m_allTensors.emplace_back(tensor);
458         m_inputTensors.emplace_back(tensor);
459     }
460 
461     // 根据outputsInfo设置输入NNTensor
462     for (size_t i = 0; i < outputSize; ++i) {
463         std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
464         if (tensor == nullptr) {
465             LOGE("SetInputsAndOutputsInfo failed, error happened when creating output NNTensor.");
466             return OH_NN_MEMORY_ERROR;
467         }
468 
469         ret = tensor->BuildFromOHNNTensorInfo(outputsInfo[i]);
470         if (ret != OH_NN_SUCCESS) {
471             LOGE("SetInputsAndOutputsInfo failed, error happened when building output NNTensor from info.");
472             return ret;
473         }
474         m_outputIndices.emplace_back(i + inputSize);
475         m_allTensors.emplace_back(tensor);
476         m_outputTensors.emplace_back(tensor);
477     }
478 
479     return OH_NN_SUCCESS;
480 }
481 
Build()482 OH_NN_ReturnCode InnerModel::Build()
483 {
484     NNRT_TRACE_NAME("Build model");
485     if (IsBuild()) {
486         LOGE("Build failed, OH_NNModel_Finish() shouldn't be called after OH_NNModel_Finish() or "
487              "OH_NNModel_BuildFromMetaGraph() or OH_NNModel_BuildFromLiteGraph().");
488         return OH_NN_OPERATION_FORBIDDEN;
489     }
490 
491     if (m_allTensors.empty()) {
492         LOGE("Build failed, no OH_NN_Tensor has been added. Must call AddTensor before Build().");
493         return OH_NN_OPERATION_FORBIDDEN;
494     }
495 
496     if (m_ops.empty()) {
497         LOGE("Build failed, no operation has beed added. Must call AddOperation before Build().");
498         return OH_NN_OPERATION_FORBIDDEN;
499     }
500 
501     if ((m_inputIndices.empty()) || (m_outputIndices.empty())) {
502         LOGE("Build failed, inputs and outputs are unspecified. Must call SpecifyInputsAndOutputs before Build().");
503         return OH_NN_OPERATION_FORBIDDEN;
504     }
505 
506     MSLITE::LiteGraph* pLiteGraph = new (std::nothrow) MSLITE::LiteGraph();
507     if (pLiteGraph == nullptr) {
508         LOGE("Build failed, error happend when creating LiteGraph.");
509         return OH_NN_MEMORY_ERROR;
510     }
511     m_liteGraph.reset(pLiteGraph, LiteGraphDeleter());
512 
513     m_liteGraph->name_ = NNR_MODEL;
514 
515     std::unordered_map<uint32_t, uint32_t> modelIDToGraphID;
516     AddTensorsToLiteGraph(modelIDToGraphID);
517 
518     OH_NN_ReturnCode ret = AddNodesToLiteGraph(modelIDToGraphID);
519     if (ret != OH_NN_SUCCESS) {
520         return ret;
521     }
522 
523     // subGraph will be released by LiteGraph if it is added into instance of LiteGraph.
524     MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph();
525     if (subGraph == nullptr) {
526         LOGE("AddNodesToLiteGraph failed, error happened when creating subgraph.");
527         return OH_NN_NULL_PTR;
528     }
529 
530     subGraph->name_ = "NNRt_SubGraph"; // Name of subGraph
531     subGraph->input_indices_ = m_liteGraph->input_indices_;
532     subGraph->output_indices_ = m_liteGraph->output_indices_;
533     uint32_t nodeCount = static_cast<uint32_t>(m_ops.size()); // m_ops.size() smaller than UINT32_MAX
534     for (uint32_t i = 0; i < nodeCount; i++) {
535         subGraph->node_indices_.emplace_back(i);
536     }
537     m_liteGraph->sub_graphs_.emplace_back(subGraph);
538 
539     return OH_NN_SUCCESS;
540 }
541 
AddTensorsToLiteGraph(std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)542 void InnerModel::AddTensorsToLiteGraph(std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
543 {
544     uint32_t graphID = 0;
545     LiteGraphTensorPtr tensor(nullptr, DestroyLiteGraphTensor);
546     size_t tensorCount = m_allTensors.size();
547     for (size_t i = 0; i < tensorCount; i++) {
548         const std::shared_ptr<NNTensor>& nnTensor = m_allTensors[i];
549         // If the tensor is used as operation parameter, it will not convert to the tensor of LiteGraph.
550         if (nnTensor->IsOpParameter()) {
551             continue;
552         }
553 
554         tensor = nnTensor->ConvertToLiteGraphTensor();
555         m_liteGraph->all_tensors_.emplace_back(tensor.release());
556         modelIDToGraphID[i] = graphID++;
557     }
558 
559     // Note: Indices in m_inputIndices and m_outputIndices have been checked in SpecifyInputAndOutput(), there is no
560     // need to check twice.
561     std::vector<uint32_t>& inputIndices = m_liteGraph->input_indices_;
562     for (uint32_t index : m_inputIndices) {
563         inputIndices.emplace_back(modelIDToGraphID.at(index));
564     }
565 
566     std::vector<uint32_t>& outputIndices = m_liteGraph->output_indices_;
567     for (uint32_t index : m_outputIndices) {
568         outputIndices.emplace_back(modelIDToGraphID.at(index));
569     }
570 }
571 
AddNodesToLiteGraph(const std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)572 OH_NN_ReturnCode InnerModel::AddNodesToLiteGraph(const std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
573 {
574     MSLITE::LiteGraph::Node* node{nullptr};
575     size_t opCount = m_ops.size();
576     Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor};
577     for (size_t i = 0; i < opCount; i++) {
578         std::unique_ptr<Ops::OpsBuilder>& op = m_ops[i];
579         // node will be released by LiteGraph if it is added into instance of LiteGraph.
580         node = new(std::nothrow) MSLITE::LiteGraph::Node();
581         if (node == nullptr) {
582             LOGE("AddNodesToLiteGraph failed, error happened when creating LiteGraph tensor.");
583             return OH_NN_NULL_PTR;
584         }
585 
586         node->name_ = op->GetName() + ":" + std::to_string(i);
587         node->quant_type_ = NNToMS::TransformQuantType(op->GetQuantType());
588 
589         op->GetInputIndex(node->input_indices_, modelIDToGraphID);
590         op->GetOutputIndex(node->output_indices_, modelIDToGraphID);
591 
592         primitive = op->GetPrimitive();
593         if (primitive == nullptr) {
594             LOGE("Build %s primitive failed.", op->GetName().c_str());
595             delete node;
596             return OH_NN_FAILED;
597         }
598 
599         node->primitive_ = primitive.release();
600         m_liteGraph->all_nodes_.emplace_back(node);
601     }
602 
603     return OH_NN_SUCCESS;
604 }
605 
GetSupportedOperations(size_t deviceID,const bool ** isSupported,uint32_t & opCount)606 OH_NN_ReturnCode InnerModel::GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount)
607 {
608     if (m_liteGraph == nullptr) {
609         LOGE("GetSupportedOperations failed. GetSupportedOperations() must be called after Finish().");
610         return OH_NN_OPERATION_FORBIDDEN;
611     }
612 
613     DeviceManager& deviceManager = DeviceManager::GetInstance();
614 
615     std::shared_ptr<Device> device = deviceManager.GetDevice(deviceID);
616     if (device == nullptr) {
617         LOGE("GetSupportedOperations failed, retrieve device failed.");
618         return OH_NN_FAILED;
619     }
620 
621     std::vector<bool> supportedOperations;
622     OH_NN_ReturnCode ret = device->GetSupportedOperation(m_liteGraph, supportedOperations);
623     if (ret != OH_NN_SUCCESS) {
624         LOGE("GetSupportedOperations failed, error happened when get supported operations from devices.");
625         return ret;
626     }
627 
628     m_supportedOperations.clear();
629     std::copy(supportedOperations.begin(), supportedOperations.end(), std::back_inserter(m_supportedOperations));
630 
631     *isSupported = reinterpret_cast<bool*>(m_supportedOperations.data());
632     opCount = m_supportedOperations.size();
633 
634     return OH_NN_SUCCESS;
635 }
636 
GetLiteGraphs() const637 std::shared_ptr<MSLITE::LiteGraph> InnerModel::GetLiteGraphs() const
638 {
639     return m_liteGraph;
640 }
641 
GetInputTensors() const642 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetInputTensors() const
643 {
644     return m_inputTensors;
645 }
646 
GetOutputTensors() const647 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetOutputTensors() const
648 {
649     return m_outputTensors;
650 }
651 
GetMetaGraph() const652 void* InnerModel::GetMetaGraph() const
653 {
654     return m_metaGraph;
655 }
656 
GetQuantBuffer() const657 Buffer InnerModel::GetQuantBuffer() const
658 {
659     return m_quantBuffer;
660 }
661 
GetModelName() const662 std::string InnerModel::GetModelName() const
663 {
664     return m_modelName;
665 }
666 }  // namespace NeuralNetworkRuntime
667 }  // namespace OHOS