• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "inner_model.h"
17 
18 #include <new>
19 #include <unordered_map>
20 #include <vector>
21 
22 #include "securec.h"
23 
24 #include "utils.h"
25 #include "scoped_trace.h"
26 #include "backend_manager.h"
27 #include "validation.h"
28 #include "ops_builder.h"
29 #include "ops_registry.h"
30 #include "transform.h"
31 #include "nnbackend.h"
32 
33 namespace MSLITE = mindspore::lite;
34 
35 namespace OHOS {
36 namespace NeuralNetworkRuntime {
37 const std::string NNR_MODEL = "NNR_Model";
38 const std::string LOADED_NNR_MODEL = "Loaded_NNR_Model";
39 const size_t INPUT_OUTPUT_MAX_INDICES = 200;
40 
41 namespace {
42 class LiteGraphDeleter {
43 public:
operator ()(MSLITE::LiteGraph * liteGraph) const44     void operator()(MSLITE::LiteGraph* liteGraph) const
45     {
46         MindIR_LiteGraph_Destroy(&liteGraph);
47     }
48 };
49 
ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)50 std::shared_ptr<NNTensor> ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)
51 {
52     MSLITE::DataType msDataType = MSLITE::MindIR_Tensor_GetDataType(msTensor);
53     OH_NN_DataType dataType = MSToNN::TransformDataType(msDataType);
54     std::vector<int32_t> msDims = MSLITE::MindIR_Tensor_GetDims(msTensor);
55     std::vector<MSLITE::QuantParam> msQuantParams = MSLITE::MindIR_Tensor_GetQuantParams(msTensor);
56     std::vector<QuantParam> nnQuantParams = MSToNN::TransformQuantParams(msQuantParams);
57     OH_NN_Format nnFormat = MSToNN::TransformFormat(MSLITE::MindIR_Tensor_GetFormat(msTensor));
58 
59     std::shared_ptr<NNTensor> nnTensor = CreateSharedPtr<NNTensor>();
60     if (nnTensor == nullptr) {
61         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when creating NNTensor.");
62         return nullptr;
63     }
64 
65     OH_NN_ReturnCode ret = nnTensor->Build(dataType, msDims, nnQuantParams, OH_NN_TENSOR);
66     if (ret != OH_NN_SUCCESS) {
67         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when building NNTensor with attributes.");
68         return nullptr;
69     }
70 
71     nnTensor->SetFormat(nnFormat);
72 
73     return nnTensor;
74 }
75 
ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph * liteGraph,const std::vector<uint32_t> & indices,std::vector<std::shared_ptr<NNTensor>> & nnTensors)76 OH_NN_ReturnCode ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph* liteGraph,
77                                                  const std::vector<uint32_t>& indices,
78                                                  std::vector<std::shared_ptr<NNTensor>>& nnTensors)
79 {
80     if ((indices.empty()) || (indices.size() > INPUT_OUTPUT_MAX_INDICES)) {
81         LOGE("ConstructNNTensorsFromLiteGraph failed, passed empty indices list.");
82         return OH_NN_INVALID_PARAMETER;
83     }
84 
85     uint32_t maximumIndex = *(std::max_element(indices.begin(), indices.end()));
86     if (maximumIndex >= liteGraph->all_tensors_.size()) {
87         LOGE("ConstructNNTensorsFromLiteGraph failed, index exceed size of all_tensors inside liteGraph.");
88         return OH_NN_INVALID_PARAMETER;
89     }
90 
91     std::shared_ptr<NNTensor> nnTensor;
92     for (uint32_t i : indices) {
93         nnTensor = ConstructNNTensorFromLiteGraphTensor(liteGraph->all_tensors_[i]);
94         if (nnTensor == nullptr) {
95             LOGE("ConstructNNTensorsFromLiteGraph failed, failed to construct NNTensor from LiteGraphTensor.");
96             return OH_NN_NULL_PTR;
97         }
98 
99         nnTensors.emplace_back(nnTensor);
100     }
101 
102     return OH_NN_SUCCESS;
103 }
104 } // anonymous namespace
105 
InnerModel()106 InnerModel::InnerModel() {}
107 
IsBuild() const108 bool InnerModel::IsBuild() const
109 {
110     return ((m_liteGraph != nullptr) || (m_metaGraph != nullptr));
111 }
112 
BuildFromLiteGraph(const MSLITE::LiteGraph * liteGraph,const ExtensionConfig & extensionConfig)113 OH_NN_ReturnCode InnerModel::BuildFromLiteGraph(const MSLITE::LiteGraph* liteGraph,
114     const ExtensionConfig& extensionConfig)
115 {
116     NNRT_TRACE_NAME("Build model from lite graph");
117     if (liteGraph == nullptr) {
118         LOGE("BuildFromLiteGraph failed, passed empty liteGraph.");
119         return OH_NN_INVALID_PARAMETER;
120     }
121 
122     if (IsBuild()) {
123         LOGE("BuildFromLiteGraph failed, inner model has been built or loaded before.");
124         return OH_NN_OPERATION_FORBIDDEN;
125     }
126 
127     if (!m_allTensors.empty() || !m_ops.empty()) {
128         LOGE("BuildFromLiteGraph failed, please LoadLiteGraph without adding tensor and operations.");
129         return OH_NN_OPERATION_FORBIDDEN;
130     }
131 
132     m_inputTensors.clear();
133     OH_NN_ReturnCode ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->input_indices_, m_inputTensors);
134     if (ret != OH_NN_SUCCESS) {
135         LOGE("BuildFromLiteGraph failed, error happened when constructing input NNTensors from liteGraph.");
136         return ret;
137     }
138 
139     m_outputTensors.clear();
140     ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->output_indices_, m_outputTensors);
141     if (ret != OH_NN_SUCCESS) {
142         LOGE("BuildFromLiteGraph failed, error happened when constructing output NNTensors from liteGraph.");
143         return ret;
144     }
145 
146     m_liteGraph.reset(const_cast<MSLITE::LiteGraph*>(liteGraph), LiteGraphDeleter());
147     m_liteGraph->name_ = LOADED_NNR_MODEL;
148 
149     m_extensionConfig = extensionConfig;
150 
151     return OH_NN_SUCCESS;
152 }
153 
BuildFromMetaGraph(const void * metaGraph,const ExtensionConfig & extensionConfig)154 OH_NN_ReturnCode InnerModel::BuildFromMetaGraph(const void* metaGraph, const ExtensionConfig& extensionConfig)
155 {
156     NNRT_TRACE_NAME("Build model from meta graph");
157     if (metaGraph == nullptr) {
158         LOGE("BuildFromMetaGraph failed, passed empty metaGraph.");
159         return OH_NN_INVALID_PARAMETER;
160     }
161 
162     if (IsBuild()) {
163         LOGE("BuildFromMetaGraph failed, inner model has been built or loaded before.");
164         return OH_NN_OPERATION_FORBIDDEN;
165     }
166 
167     if (m_allTensors.empty()) {
168         LOGE("BuildFromMetaGraph failed, SetInputsAndOutputsInfo should be called before building metaGraph.");
169         return OH_NN_OPERATION_FORBIDDEN;
170     }
171 
172     m_metaGraph = const_cast<void*>(metaGraph);
173     m_extensionConfig = extensionConfig;
174     return OH_NN_SUCCESS;
175 }
176 
AddTensor(const OH_NN_Tensor & nnTensor)177 OH_NN_ReturnCode InnerModel::AddTensor(const OH_NN_Tensor& nnTensor)
178 {
179     if (IsBuild()) {
180         LOGE("AddTensor failed, AddTensor is forbidden after model has been built.");
181         return OH_NN_OPERATION_FORBIDDEN;
182     }
183 
184     std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
185     if (tensor == nullptr) {
186         LOGE("AddTensor failed, error happened when creating NNTensor.");
187         return OH_NN_MEMORY_ERROR;
188     }
189 
190     OH_NN_ReturnCode ret = tensor->BuildFromOHNNTensor(nnTensor);
191     if (ret != OH_NN_SUCCESS) {
192         LOGE("AddTensor failed, error happened when build NNTensor from OH_NN_Tensor.");
193         return ret;
194     }
195 
196     // The NNTensor is named as "Tensor: <tensor index>"".
197     tensor->SetName("Tensor: " + std::to_string(m_allTensors.size()));
198     m_allTensors.emplace_back(tensor);
199 
200     return OH_NN_SUCCESS;
201 }
202 
AddTensorDesc(const NN_TensorDesc * nnTensorDesc)203 OH_NN_ReturnCode InnerModel::AddTensorDesc(const NN_TensorDesc* nnTensorDesc)
204 {
205     if (nnTensorDesc == nullptr) {
206         LOGE("AddTensorDesc failed, passed nullptr to nnTensorDesc.");
207         return OH_NN_INVALID_PARAMETER;
208     }
209 
210     std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
211     if (tensor == nullptr) {
212         LOGE("AddTensorDesc failed, error happened when creating NNTensor.");
213         return OH_NN_MEMORY_ERROR;
214     }
215 
216     OH_NN_ReturnCode returnCode = tensor->BuildFromTensorDesc(nnTensorDesc);
217     if (returnCode != OH_NN_SUCCESS) {
218         LOGE("AddTensorDesc failed, error happened when build NNTensor from OH_NNCore_TensorDesc.");
219         return returnCode;
220     }
221 
222     // The NNTensor is named as "Tensor: <tensor index>"".
223     tensor->SetName("Tensor: " + std::to_string(m_allTensors.size()));
224     m_allTensors.emplace_back(tensor);
225 
226     return OH_NN_SUCCESS;
227 }
228 
SetTensorType(uint32_t index,OH_NN_TensorType tensorType)229 OH_NN_ReturnCode InnerModel::SetTensorType(uint32_t index, OH_NN_TensorType tensorType)
230 {
231     if (IsBuild()) {
232         LOGE("SetTensorType failed, SetTensorType is forbidden after model has been built.");
233         return OH_NN_OPERATION_FORBIDDEN;
234     }
235 
236     if (index >= m_allTensors.size()) {
237         LOGE("SetTensorType failed, passed index %u out of the number of added tensors.", index);
238         return OH_NN_INVALID_PARAMETER;
239     }
240 
241     std::shared_ptr<NNTensor> tensor = m_allTensors[index];
242     OH_NN_ReturnCode returnCode = tensor->SetTensorType(tensorType);
243     if (returnCode != OH_NN_SUCCESS) {
244         LOGE("SetTensorType failed, error happened when setting tensor type.");
245     }
246 
247     return returnCode;
248 }
249 
SetTensorQuantParam(uint32_t index,const NN_QuantParam * quantParam)250 OH_NN_ReturnCode InnerModel::SetTensorQuantParam(uint32_t index, const NN_QuantParam* quantParam)
251 {
252     if (IsBuild()) {
253         LOGE("SetTensorQuantParam failed, SetTensorValue is forbidden after model has been built.");
254         return OH_NN_OPERATION_FORBIDDEN;
255     }
256 
257     if (index >= m_allTensors.size()) {
258         LOGE("SetTensorQuantParam failed, passed index %u out of the number of added tensors.", index);
259         return OH_NN_INVALID_PARAMETER;
260     }
261 
262     std::shared_ptr<NNTensor> tensor = m_allTensors[index];
263     // quantParam is validated in outer function, no need to check it here.
264     OH_NN_ReturnCode returnCode = tensor->SetQuantParam(quantParam);
265     if (returnCode != OH_NN_SUCCESS) {
266         LOGE("SetTensorQuantParam failed, error happened when set quant param.");
267     }
268 
269     return returnCode;
270 }
271 
272 // DOTO: 圈复杂度待优化
SetTensorValue(uint32_t index,const void * buffer,size_t length)273 OH_NN_ReturnCode InnerModel::SetTensorValue(uint32_t index, const void* buffer, size_t length)
274 {
275     if (IsBuild()) {
276         LOGE("SetTensorValue failed, SetTensorValue is forbidden after model has been built.");
277         return OH_NN_OPERATION_FORBIDDEN;
278     }
279 
280     if (index >= m_allTensors.size()) {
281         LOGE("SetTensorValue failed, passed index %u out of the number of added tensors.", index);
282         return OH_NN_INVALID_PARAMETER;
283     }
284 
285     const std::shared_ptr<NNTensor> tensor = m_allTensors[index];
286     if (tensor->GetBuffer() != nullptr) {
287         LOGE("SetTensorValue failed, tensor has been set value twice. Tensor index: %u.", index);
288         return OH_NN_INVALID_PARAMETER;
289     }
290 
291     if (buffer == nullptr) {
292         LOGW("SetTensorValue passed empty buffer, which makes no effect.");
293         return OH_NN_SUCCESS;
294     }
295 
296     if (tensor->IsDynamicShape()) {
297         LOGE("SetTensorValue failed, cannot set value to tensor with dynamic shape.");
298         return OH_NN_OPERATION_FORBIDDEN;
299     }
300 
301     if (length != tensor->GetDataLength()) {
302         LOGE("SetTensorValue failed, get buffer length %zu different from the byte size of tensor %zu.",
303              length, tensor->GetDataLength());
304         return OH_NN_INVALID_PARAMETER;
305     }
306 
307     // Data will be released inside NNTensor if it is set inside NNTensor using SetBuffer().
308     void* data = new (std::nothrow) char[length];
309     if (data == nullptr) {
310         LOGE("SetTensorValue failed, please check whether it runs out of memory.");
311         return OH_NN_MEMORY_ERROR;
312     }
313 
314     errno_t ret = memcpy_s(data, length, buffer, length);
315     if (ret != EOK) {
316         LOGE("SetTensorValue failed, please the information of error number %d from memcpy_s.", ret);
317         delete [] reinterpret_cast<char*>(data);
318         return OH_NN_FAILED;
319     }
320 
321     tensor->SetBuffer(data, length);
322     return OH_NN_SUCCESS;
323 }
324 
ValidateInputAndOutput(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices) const325 OH_NN_ReturnCode InnerModel::ValidateInputAndOutput(
326     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const
327 {
328     if (inputIndices.size == 0 || inputIndices.size > INPUT_OUTPUT_MAX_INDICES) {
329         LOGE("ValidateInputAndOutput failed, passed empty input indices.");
330         return OH_NN_INVALID_PARAMETER;
331     }
332 
333     if (outputIndices.size == 0 || outputIndices.size > INPUT_OUTPUT_MAX_INDICES) {
334         LOGE("ValidateInputAndOutput failed, passed empty output indices.");
335         return OH_NN_INVALID_PARAMETER;
336     }
337 
338     OH_NN_ReturnCode ret = ValidateTensorArray(inputIndices);
339     if (ret != OH_NN_SUCCESS) {
340         LOGE("ValidateInputAndOutput failed, please check input indices.");
341         return ret;
342     }
343 
344     ret = ValidateTensorArray(outputIndices);
345     if (ret != OH_NN_SUCCESS) {
346         LOGE("ValidateInputAndOutput failed, please check output indices.");
347         return ret;
348     }
349 
350     std::shared_ptr<NNTensor> tensor{nullptr};
351     for (uint32_t i = 0; i < inputIndices.size; i++) {
352         tensor = m_allTensors[inputIndices.data[i]];
353         if (tensor->GetType() != OH_NN_TENSOR) {
354             LOGE("ValidateInputAndOutput failed, tensor set as input should has type of OH_NN_TENSOR, but receive %d."
355                  "Tensor index: %u.", tensor->GetType(), i);
356             return OH_NN_INVALID_PARAMETER;
357         }
358     }
359 
360     for (uint32_t i = 0; i < outputIndices.size; i++) {
361         tensor = m_allTensors[outputIndices.data[i]];
362         if (tensor->GetType() != OH_NN_TENSOR) {
363             LOGE("ValidateInputAndOutput failed, tensor set as output should has type of OH_NN_TENSOR, but receive %d."
364                  "Tensor index: %u.", tensor->GetType(), i);
365             return OH_NN_INVALID_PARAMETER;
366         }
367     }
368 
369     // The number of inputIndices and outputIndices are usually small, so O(n**2) iteration is fine.
370     for (uint32_t i = 0; i < inputIndices.size; i++) {
371         for (uint32_t j = 0; j < outputIndices.size; j++) {
372             if (inputIndices.data[i] == outputIndices.data[j]) {
373                 LOGE("ValidateInputAndOutput failed, should not set an tensor as input and output at the same time, "
374                      "input index %u, output index %u", inputIndices.data[i], outputIndices.data[j]);
375                 return OH_NN_INVALID_PARAMETER;
376             }
377         }
378     }
379     return OH_NN_SUCCESS;
380 }
381 
382 /* Check whether the indices exceed the number of added tensors. */
ValidateTensorArray(const OH_NN_UInt32Array & indices) const383 OH_NN_ReturnCode InnerModel::ValidateTensorArray(const OH_NN_UInt32Array& indices) const
384 {
385     OH_NN_ReturnCode ret = Validation::ValidateArray(indices.data, indices.size);
386     if (ret != OH_NN_SUCCESS) {
387         LOGE("ValidateTensorArray failed, please check the validity of indices.");
388         return ret;
389     }
390 
391     size_t allTensorsSize = m_allTensors.size();
392     for (uint32_t i = 0; i < indices.size; i++) {
393         if (indices.data[i] >= allTensorsSize) {
394             LOGE("ValidateTensors failed, index %{public}u is out of the number of added tensors.", indices.data[i]);
395             return OH_NN_INVALID_PARAMETER;
396         }
397     }
398 
399     return OH_NN_SUCCESS;
400 }
401 
AddOperation(OH_NN_OperationType opType,const OH_NN_UInt32Array & paramIndices,const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)402 OH_NN_ReturnCode InnerModel::AddOperation(OH_NN_OperationType opType, const OH_NN_UInt32Array& paramIndices,
403                                           const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
404 {
405     if (IsBuild()) {
406         LOGE("AddOperation failed, AddOperation is forbidden after model has been built.");
407         return OH_NN_OPERATION_FORBIDDEN;
408     }
409 
410     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
411     if (ret != OH_NN_SUCCESS) {
412         LOGE("AddOperation failed, please check inputIndices and outputIndices.");
413         return ret;
414     }
415     std::vector<uint32_t> inputs = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
416     std::vector<uint32_t> outputs = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
417 
418     ret = ValidateTensorArray(paramIndices);
419     if (ret != OH_NN_SUCCESS) {
420         LOGE("AddOperation failed, please check paramIndices.");
421         return ret;
422     }
423     std::vector<uint32_t> parameters = ConstructVectorFromArray(paramIndices.data, paramIndices.size);
424 
425     const Ops::OpsRegistry& opsRegistry = Ops::OpsRegistry::GetSingleton();
426     std::unique_ptr<Ops::OpsBuilder> opsBuilder = opsRegistry.GetOpsBuilder(opType);
427     if (opsBuilder == nullptr) {
428         LOGE("AddOperation failed, cannot add operation of type: %d.", opType);
429         return OH_NN_INVALID_PARAMETER;
430     }
431 
432     ret = opsBuilder->Build(parameters, inputs, outputs, m_allTensors);
433     if (ret != OH_NN_SUCCESS) {
434         LOGE("AddOperation failed, error happens when build operations.");
435         return ret;
436     }
437 
438     m_ops.emplace_back(std::move(opsBuilder));
439     return OH_NN_SUCCESS;
440 }
441 
SpecifyInputsAndOutputs(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)442 OH_NN_ReturnCode InnerModel::SpecifyInputsAndOutputs(
443     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
444 {
445     if (IsBuild()) {
446         LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs is forbidden after model has been built.");
447         return OH_NN_OPERATION_FORBIDDEN;
448     }
449 
450     if (!m_inputTensors.empty()) {
451         LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs should not be called twice.");
452         return OH_NN_OPERATION_FORBIDDEN;
453     }
454 
455     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
456     if (ret != OH_NN_SUCCESS) {
457         LOGE("SpecifyInputsAndOutputs failed, please check inputIndices and outputIndices.");
458         return ret;
459     }
460 
461     m_inputIndices = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
462     m_outputIndices = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
463 
464     std::transform(m_inputIndices.begin(), m_inputIndices.end(), std::back_inserter(m_inputTensors),
465         [this](uint32_t i) {
466             return m_allTensors[i];
467         });
468 
469     std::transform(m_outputIndices.begin(), m_outputIndices.end(), std::back_inserter(m_outputTensors),
470         [this](uint32_t i) {
471             return m_allTensors[i];
472         });
473 
474     return OH_NN_SUCCESS;
475 }
476 
CheckParameters() const477 OH_NN_ReturnCode InnerModel::CheckParameters() const
478 {
479     if (m_liteGraph != nullptr) {
480         LOGE("CheckParameters failed, liteGraph is not nullptr.");
481         return OH_NN_OPERATION_FORBIDDEN;
482     }
483 
484     if (m_metaGraph != nullptr) {
485         LOGE("CheckParameters failed, metaGraph is not nullptr.");
486         return OH_NN_OPERATION_FORBIDDEN;
487     }
488 
489     if (!m_allTensors.empty()) {
490         LOGE("CheckParameters failed, m_allTensors is not empty.");
491         return OH_NN_OPERATION_FORBIDDEN;
492     }
493 
494     if (!(m_inputTensors.empty() && (m_inputIndices.empty()))) {
495         LOGE("CheckParameters failed, m_inputTensors is not empty.");
496         return OH_NN_OPERATION_FORBIDDEN;
497     }
498 
499     if (!(m_outputTensors.empty() && (m_outputIndices.empty()))) {
500         LOGE("CheckParameters failed, m_outputTensors is not empty.");
501         return OH_NN_OPERATION_FORBIDDEN;
502     }
503 
504     return OH_NN_SUCCESS;
505 }
506 
SetInputsAndOutputsInfo(const OH_NN_TensorInfo * inputsInfo,size_t inputSize,const OH_NN_TensorInfo * outputsInfo,size_t outputSize)507 OH_NN_ReturnCode InnerModel::SetInputsAndOutputsInfo(const OH_NN_TensorInfo* inputsInfo, size_t inputSize,
508     const OH_NN_TensorInfo* outputsInfo, size_t outputSize)
509 {
510     OH_NN_ReturnCode ret = CheckParameters();
511     if (ret != OH_NN_SUCCESS) {
512         LOGE("SetInputsAndOutputsInfo failed, error happened when checking parameters.");
513         return ret;
514     }
515 
516     // 根据inputsInfo设置输入NNTensor
517     for (size_t i = 0; i < inputSize; ++i) {
518         std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
519         if (tensor == nullptr) {
520             LOGE("SetInputsAndOutputsInfo failed, error happened when creating input NNTensor.");
521             return OH_NN_MEMORY_ERROR;
522         }
523 
524         ret = tensor->BuildFromOHNNTensorInfo(inputsInfo[i]);
525         if (ret != OH_NN_SUCCESS) {
526             LOGE("SetInputsAndOutputsInfo failed, error happened when building input NNTensor from info.");
527             return ret;
528         }
529         m_inputIndices.emplace_back(i);
530         m_allTensors.emplace_back(tensor);
531         m_inputTensors.emplace_back(tensor);
532     }
533 
534     // 根据outputsInfo设置输入NNTensor
535     for (size_t i = 0; i < outputSize; ++i) {
536         std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
537         if (tensor == nullptr) {
538             LOGE("SetInputsAndOutputsInfo failed, error happened when creating output NNTensor.");
539             return OH_NN_MEMORY_ERROR;
540         }
541 
542         ret = tensor->BuildFromOHNNTensorInfo(outputsInfo[i]);
543         if (ret != OH_NN_SUCCESS) {
544             LOGE("SetInputsAndOutputsInfo failed, error happened when building output NNTensor from info.");
545             return ret;
546         }
547         m_outputIndices.emplace_back(i + inputSize);
548         m_allTensors.emplace_back(tensor);
549         m_outputTensors.emplace_back(tensor);
550     }
551 
552     return OH_NN_SUCCESS;
553 }
554 
Build()555 OH_NN_ReturnCode InnerModel::Build()
556 {
557     NNRT_TRACE_NAME("Build model");
558     if (IsBuild()) {
559         LOGE("Build failed, OH_NNModel_Finish() shouldn't be called after OH_NNModel_Finish() or "
560              "OH_NNModel_BuildFromMetaGraph() or OH_NNModel_BuildFromLiteGraph().");
561         return OH_NN_OPERATION_FORBIDDEN;
562     }
563 
564     if (m_allTensors.empty()) {
565         LOGE("Build failed, no OH_NN_Tensor has been added. Must call AddTensor before Build().");
566         return OH_NN_OPERATION_FORBIDDEN;
567     }
568 
569     if (m_ops.empty()) {
570         LOGE("Build failed, no operation has beed added. Must call AddOperation before Build().");
571         return OH_NN_OPERATION_FORBIDDEN;
572     }
573 
574     if ((m_inputIndices.empty()) || (m_outputIndices.empty())) {
575         LOGE("Build failed, inputs and outputs are unspecified. Must call SpecifyInputsAndOutputs before Build().");
576         return OH_NN_OPERATION_FORBIDDEN;
577     }
578 
579     MSLITE::LiteGraph* pLiteGraph = new (std::nothrow) MSLITE::LiteGraph();
580     if (pLiteGraph == nullptr) {
581         LOGE("Build failed, error happend when creating LiteGraph.");
582         return OH_NN_MEMORY_ERROR;
583     }
584     m_liteGraph.reset(pLiteGraph, LiteGraphDeleter());
585 
586     m_liteGraph->name_ = NNR_MODEL;
587 
588     std::unordered_map<uint32_t, uint32_t> modelIDToGraphID;
589     AddTensorsToLiteGraph(modelIDToGraphID);
590 
591     OH_NN_ReturnCode ret = AddNodesToLiteGraph(modelIDToGraphID);
592     if (ret != OH_NN_SUCCESS) {
593         return ret;
594     }
595 
596     // subGraph will be released by LiteGraph if it is added into instance of LiteGraph.
597     MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph();
598     if (subGraph == nullptr) {
599         LOGE("AddNodesToLiteGraph failed, error happened when creating subgraph.");
600         return OH_NN_NULL_PTR;
601     }
602 
603     subGraph->name_ = "NNRt_SubGraph"; // Name of subGraph
604     subGraph->input_indices_ = m_liteGraph->input_indices_;
605     subGraph->output_indices_ = m_liteGraph->output_indices_;
606     uint32_t nodeCount = static_cast<uint32_t>(m_ops.size()); // m_ops.size() smaller than UINT32_MAX
607     for (uint32_t i = 0; i < nodeCount; i++) {
608         subGraph->node_indices_.emplace_back(i);
609     }
610     m_liteGraph->sub_graphs_.emplace_back(subGraph);
611 
612     return OH_NN_SUCCESS;
613 }
614 
AddTensorsToLiteGraph(std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)615 void InnerModel::AddTensorsToLiteGraph(std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
616 {
617     uint32_t graphID = 0;
618     LiteGraphTensorPtr tensor(nullptr, DestroyLiteGraphTensor);
619     size_t tensorCount = m_allTensors.size();
620     for (size_t i = 0; i < tensorCount; i++) {
621         const std::shared_ptr<NNTensor>& nnTensor = m_allTensors[i];
622         // If the tensor is used as operation parameter, it will not convert to the tensor of LiteGraph.
623         if (nnTensor->IsOpParameter()) {
624             continue;
625         }
626 
627         tensor = nnTensor->ConvertToLiteGraphTensor();
628         m_liteGraph->all_tensors_.emplace_back(tensor.release());
629         modelIDToGraphID[i] = graphID++;
630     }
631 
632     // Note: Indices in m_inputIndices and m_outputIndices have been checked in SpecifyInputAndOutput(), there is no
633     // need to check twice.
634     std::vector<uint32_t>& inputIndices = m_liteGraph->input_indices_;
635     std::transform(m_inputIndices.begin(), m_inputIndices.end(), std::back_inserter(inputIndices),
636         [modelIDToGraphID](uint32_t index) {return modelIDToGraphID.at(index);});
637 
638     std::vector<uint32_t>& outputIndices = m_liteGraph->output_indices_;
639     std::transform(m_outputIndices.begin(), m_outputIndices.end(), std::back_inserter(outputIndices),
640         [modelIDToGraphID](uint32_t index) {return modelIDToGraphID.at(index);});
641 }
642 
AddNodesToLiteGraph(const std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)643 OH_NN_ReturnCode InnerModel::AddNodesToLiteGraph(const std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
644 {
645     MSLITE::LiteGraph::Node* node{nullptr};
646     size_t opCount = m_ops.size();
647     Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor};
648     for (size_t i = 0; i < opCount; i++) {
649         std::unique_ptr<Ops::OpsBuilder>& op = m_ops[i];
650         // node will be released by LiteGraph if it is added into instance of LiteGraph.
651         node = new(std::nothrow) MSLITE::LiteGraph::Node();
652         if (node == nullptr) {
653             LOGE("AddNodesToLiteGraph failed, error happened when creating LiteGraph tensor.");
654             return OH_NN_NULL_PTR;
655         }
656 
657         node->name_ = op->GetName() + ":" + std::to_string(i);
658         node->quant_type_ = NNToMS::TransformQuantType(op->GetQuantType());
659 
660         op->GetInputIndex(node->input_indices_, modelIDToGraphID);
661         op->GetOutputIndex(node->output_indices_, modelIDToGraphID);
662 
663         primitive = op->GetPrimitive();
664         if (primitive == nullptr) {
665             LOGE("Build %s primitive failed.", op->GetName().c_str());
666             delete node;
667             return OH_NN_FAILED;
668         }
669 
670         node->primitive_ = primitive.release();
671         m_liteGraph->all_nodes_.emplace_back(node);
672     }
673 
674     return OH_NN_SUCCESS;
675 }
676 
GetSupportedOperations(size_t deviceID,const bool ** isSupported,uint32_t & opCount)677 OH_NN_ReturnCode InnerModel::GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount)
678 {
679     if (m_liteGraph == nullptr) {
680         LOGE("GetSupportedOperations failed. GetSupportedOperations() must be called after Finish().");
681         return OH_NN_OPERATION_FORBIDDEN;
682     }
683 
684     BackendManager& backendManager = BackendManager::GetInstance();
685 
686     std::shared_ptr<Backend> backend = backendManager.GetBackend(deviceID);
687     if (backend == nullptr) {
688         LOGE("GetSupportedOperations failed, retrieve backend failed.");
689         return OH_NN_FAILED;
690     }
691 
692     std::vector<bool> supportedOperations;
693     std::shared_ptr<NNBackend> nnBackend = std::reinterpret_pointer_cast<NNBackend>(backend);
694     OH_NN_ReturnCode ret = nnBackend->GetSupportedOperation(m_liteGraph, supportedOperations);
695     if (ret != OH_NN_SUCCESS) {
696         LOGE("GetSupportedOperations failed, error happened when get supported operations from backends.");
697         return ret;
698     }
699 
700     m_supportedOperations.clear();
701     std::copy(supportedOperations.begin(), supportedOperations.end(), std::back_inserter(m_supportedOperations));
702 
703     *isSupported = reinterpret_cast<bool*>(m_supportedOperations.data());
704     opCount = m_supportedOperations.size();
705 
706     return OH_NN_SUCCESS;
707 }
708 
GetInputTensors() const709 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetInputTensors() const
710 {
711     return m_inputTensors;
712 }
713 
GetOutputTensors() const714 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetOutputTensors() const
715 {
716     return m_outputTensors;
717 }
718 
GetInputTensorDescs() const719 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> InnerModel::GetInputTensorDescs() const
720 {
721     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> inputTensorDescs;
722     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> tensorDescPair;
723     if (m_inputTensors.size() > INPUT_OUTPUT_MAX_INDICES) {
724         LOGE("Input tensor descs more than 200.");
725         return inputTensorDescs;
726     }
727 
728     for (auto inputTensor : m_inputTensors) {
729         tensorDescPair.first = OHOS::NeuralNetworkRuntime::CreateSharedPtr<TensorDesc>();
730         inputTensor->ConvertToTensorDesc(*(tensorDescPair.first.get()));
731         tensorDescPair.second = inputTensor->GetType();
732         inputTensorDescs.emplace_back(tensorDescPair);
733     }
734 
735     return inputTensorDescs;
736 }
737 
GetOutputTensorDescs() const738 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> InnerModel::GetOutputTensorDescs() const
739 {
740     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> outputTensorDescs;
741     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> tensorDescPair;
742     if (m_outputTensors.size() > INPUT_OUTPUT_MAX_INDICES) {
743         LOGE("Output tensor descs more than 200.");
744         return outputTensorDescs;
745     }
746 
747     for (auto outputTensor : m_outputTensors) {
748         tensorDescPair.first = OHOS::NeuralNetworkRuntime::CreateSharedPtr<TensorDesc>();
749         outputTensor->ConvertToTensorDesc(*(tensorDescPair.first.get()));
750         tensorDescPair.second = outputTensor->GetType();
751         outputTensorDescs.emplace_back(tensorDescPair);
752     }
753 
754     return outputTensorDescs;
755 }
756 
GetMetaGraph() const757 void* InnerModel::GetMetaGraph() const
758 {
759     return m_metaGraph;
760 }
761 
GetExtensionConfig() const762 ExtensionConfig InnerModel::GetExtensionConfig() const
763 {
764     return m_extensionConfig;
765 }
766 }  // namespace NeuralNetworkRuntime
767 }  // namespace OHOS
768