• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "inner_model.h"
17 
18 #include <new>
19 #include <unordered_map>
20 #include <vector>
21 
22 #include "securec.h"
23 
24 #include "common/utils.h"
25 #include "common/scoped_trace.h"
26 #include "device_manager.h"
27 #include "hdi_device.h"
28 #include "validation.h"
29 #include "ops_builder.h"
30 #include "ops_registry.h"
31 #include "transform.h"
32 
33 namespace MSLITE = mindspore::lite;
34 
35 namespace OHOS {
36 namespace NeuralNetworkRuntime {
37 const std::string NNR_MODEL = "NNR_Model";
38 const std::string LOADED_NNR_MODEL = "Loaded_NNR_Model";
39 
40 namespace {
41 class LiteGraphDeleter {
42 public:
operator ()(MSLITE::LiteGraph * liteGraph) const43     void operator()(MSLITE::LiteGraph* liteGraph) const
44     {
45         MindIR_LiteGraph_Destroy(&liteGraph);
46     }
47 };
48 
ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)49 std::shared_ptr<NNTensor> ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)
50 {
51     MSLITE::DataType msDataType = MSLITE::MindIR_Tensor_GetDataType(msTensor);
52     OH_NN_DataType dataType = MSToNN::TransformDataType(msDataType);
53     std::vector<int32_t> msDims = MSLITE::MindIR_Tensor_GetDims(msTensor);
54     std::vector<MSLITE::QuantParam> msQuantParams = MSLITE::MindIR_Tensor_GetQuantParams(msTensor);
55     std::vector<QuantParam> nnQuantParams = MSToNN::TransformQuantParams(msQuantParams);
56 
57     std::shared_ptr<NNTensor> nnTensor = CreateSharedPtr<NNTensor>();
58     if (nnTensor == nullptr) {
59         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when creating NNTensor.");
60         return nullptr;
61     }
62 
63     OH_NN_ReturnCode ret = nnTensor->Build(dataType, msDims, nnQuantParams, OH_NN_TENSOR);
64     if (ret != OH_NN_SUCCESS) {
65         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when building NNTensor with attributes.");
66         return nullptr;
67     }
68 
69     return nnTensor;
70 }
71 
ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph * liteGraph,const std::vector<uint32_t> & indices,std::vector<std::shared_ptr<NNTensor>> & nnTensors)72 OH_NN_ReturnCode ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph* liteGraph,
73                                                  const std::vector<uint32_t>& indices,
74                                                  std::vector<std::shared_ptr<NNTensor>>& nnTensors)
75 {
76     if (indices.empty()) {
77         LOGE("ConstructNNTensorsFromLiteGraph failed, passed empty indices list.");
78         return OH_NN_INVALID_PARAMETER;
79     }
80 
81     uint32_t maximumIndex = *(std::max_element(indices.begin(), indices.end()));
82     if (maximumIndex >= liteGraph->all_tensors_.size()) {
83         LOGE("ConstructNNTensorsFromLiteGraph failed, index exceed size of all_tensors inside liteGraph.");
84         return OH_NN_INVALID_PARAMETER;
85     }
86 
87     std::shared_ptr<NNTensor> nnTensor;
88     for (uint32_t i : indices) {
89         nnTensor = ConstructNNTensorFromLiteGraphTensor(liteGraph->all_tensors_[i]);
90         if (nnTensor == nullptr) {
91             LOGE("ConstructNNTensorsFromLiteGraph failed, failed to construct NNTensor from LiteGraphTensor.");
92             return OH_NN_NULL_PTR;
93         }
94 
95         nnTensors.emplace_back(nnTensor);
96     }
97 
98     return OH_NN_SUCCESS;
99 }
100 } // anonymous namespace
101 
InnerModel()102 InnerModel::InnerModel() {}
103 
IsBuild() const104 bool InnerModel::IsBuild() const
105 {
106     return (m_liteGraph != nullptr);
107 }
108 
BuildFromLiteGraph(const MSLITE::LiteGraph * liteGraph)109 OH_NN_ReturnCode InnerModel::BuildFromLiteGraph(const MSLITE::LiteGraph* liteGraph)
110 {
111     NNRT_TRACE_NAME("Build model from lite graph");
112     if (liteGraph == nullptr) {
113         LOGE("BuildFromLiteGraph failed, passed empty liteGraph.");
114         return OH_NN_INVALID_PARAMETER;
115     }
116 
117     if (m_liteGraph != nullptr) {
118         LOGE("BuildFromLiteGraph failed, liteGraph has been built or loaded before.");
119         return OH_NN_OPERATION_FORBIDDEN;
120     }
121 
122     if (!m_allTensors.empty() || !m_ops.empty()) {
123         LOGE("BuildFromLiteGraph failed, please LoadLiteGraph without adding tensor and operations.");
124         return OH_NN_OPERATION_FORBIDDEN;
125     }
126 
127     m_inputTensors.clear();
128     OH_NN_ReturnCode ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->input_indices_, m_inputTensors);
129     if (ret != OH_NN_SUCCESS) {
130         LOGE("BuildFromLiteGraph failed, error happened when constructing input NNTensors from liteGraph.");
131         return ret;
132     }
133 
134     m_outputTensors.clear();
135     ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->output_indices_, m_outputTensors);
136     if (ret != OH_NN_SUCCESS) {
137         LOGE("BuildFromLiteGraph failed, error happened when constructing output NNTensors from liteGraph.");
138         return ret;
139     }
140 
141     m_liteGraph.reset(const_cast<MSLITE::LiteGraph*>(liteGraph), LiteGraphDeleter());
142     m_liteGraph->name_ = LOADED_NNR_MODEL;
143 
144     return OH_NN_SUCCESS;
145 }
146 
AddTensor(const OH_NN_Tensor & nnTensor)147 OH_NN_ReturnCode InnerModel::AddTensor(const OH_NN_Tensor& nnTensor)
148 {
149     if (m_liteGraph != nullptr) {
150         LOGE("AddTensor failed, AddTensor is forbidden after Finish() or LoadLiteGraph() has been called.");
151         return OH_NN_OPERATION_FORBIDDEN;
152     }
153 
154     std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
155     if (tensor == nullptr) {
156         LOGE("AddTensor failed, error happened when creating NNTensor.");
157         return OH_NN_MEMORY_ERROR;
158     }
159 
160     OH_NN_ReturnCode ret = tensor->BuildFromOHNNTensor(nnTensor);
161     if (ret != OH_NN_SUCCESS) {
162         LOGE("AddTensor failed, error happened when build NNTensor from OH_NN_Tensor.");
163         return ret;
164     }
165 
166     // The NNTensor is named as "Tensor: <tensor index>"".
167     tensor->SetName("Tensor: " + std::to_string(m_allTensors.size()));
168     m_allTensors.emplace_back(tensor);
169 
170     return OH_NN_SUCCESS;
171 }
172 
173 // DOTO: 圈复杂度待优化
SetTensorValue(uint32_t index,const void * buffer,size_t length)174 OH_NN_ReturnCode InnerModel::SetTensorValue(uint32_t index, const void* buffer, size_t length)
175 {
176     if (m_liteGraph != nullptr) {
177         LOGE("SetTensorValue failed, SetTensorValue is forbidden after Finish() or LoadLiteGraph() has been called.");
178         return OH_NN_OPERATION_FORBIDDEN;
179     }
180 
181     if (index >= m_allTensors.size()) {
182         LOGE("SetTensorValue failed, passed index %u out of the number of added tensors.", index);
183         return OH_NN_INVALID_PARAMETER;
184     }
185 
186     const std::shared_ptr<NNTensor> tensor = m_allTensors[index];
187     if (tensor->GetBuffer() != nullptr) {
188         LOGE("SetTensorValue failed, tensor has been set value twice. Tensor index: %u.", index);
189         return OH_NN_INVALID_PARAMETER;
190     }
191 
192     if (buffer == nullptr) {
193         LOGW("SetTensorValue passed empty buffer, which makes no effect.");
194         return OH_NN_SUCCESS;
195     }
196 
197     if (tensor->IsDynamicShape()) {
198         LOGE("SetTensorValue failed, cannot set value to tensor with dynamic shape.");
199         return OH_NN_OPERATION_FORBIDDEN;
200     }
201 
202     if (length != tensor->GetDataLength()) {
203         LOGE("SetTensorValue failed, get buffer length %zu different from the byte size of tensor %zu.",
204              length, tensor->GetDataLength());
205         return OH_NN_INVALID_PARAMETER;
206     }
207 
208     // Data will be released inside NNTensor if it is set inside NNTensor using SetBuffer().
209     void* data = new (std::nothrow) char[length];
210     if (data == nullptr) {
211         LOGE("SetTensorValue failed, please check whether it runs out of memory.");
212         return OH_NN_MEMORY_ERROR;
213     }
214 
215     errno_t ret = memcpy_s(data, length, buffer, length);
216     if (ret != EOK) {
217         LOGE("SetTensorValue failed, please the information of error number %d from memcpy_s.", ret);
218         delete [] reinterpret_cast<char*>(data);
219         return OH_NN_FAILED;
220     }
221 
222     tensor->SetBuffer(data, length);
223     return OH_NN_SUCCESS;
224 }
225 
ValidateInputAndOutput(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices) const226 OH_NN_ReturnCode InnerModel::ValidateInputAndOutput(
227     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const
228 {
229     OH_NN_ReturnCode ret = ValidateTensorArray(inputIndices);
230     if (ret != OH_NN_SUCCESS) {
231         LOGE("ValidateInputAndOutput failed, please check input indices.");
232         return ret;
233     }
234 
235     ret = ValidateTensorArray(outputIndices);
236     if (ret != OH_NN_SUCCESS) {
237         LOGE("ValidateInputAndOutput failed, please check output indices.");
238         return ret;
239     }
240 
241     if (inputIndices.size == 0) {
242         LOGE("ValidateInputAndOutput failed, passed empty input indices.");
243         return OH_NN_INVALID_PARAMETER;
244     }
245 
246     if (outputIndices.size == 0) {
247         LOGE("ValidateInputAndOutput failed, passed empty output indices.");
248         return OH_NN_INVALID_PARAMETER;
249     }
250 
251     std::shared_ptr<NNTensor> tensor{nullptr};
252     for (uint32_t i = 0; i < inputIndices.size; i++) {
253         tensor = m_allTensors[inputIndices.data[i]];
254         if (tensor->GetType() != OH_NN_TENSOR) {
255             LOGE("ValidateInputAndOutput failed, tensor set as input should has type of OH_NN_TENSOR, but receive %d."
256                  "Tensor index: %u.", tensor->GetType(), i);
257             return OH_NN_INVALID_PARAMETER;
258         }
259     }
260 
261     for (uint32_t i = 0; i < outputIndices.size; i++) {
262         tensor = m_allTensors[outputIndices.data[i]];
263         if (tensor->GetType() != OH_NN_TENSOR) {
264             LOGE("ValidateInputAndOutput failed, tensor set as output should has type of OH_NN_TENSOR, but receive %d."
265                  "Tensor index: %u.", tensor->GetType(), i);
266             return OH_NN_INVALID_PARAMETER;
267         }
268     }
269 
270     // The number of inputIndices and outputIndices are usually small, so O(n**2) iteration is fine.
271     for (uint32_t i = 0; i < inputIndices.size; i++) {
272         for (uint32_t j = 0; j < outputIndices.size; j++) {
273             if (inputIndices.data[i] == outputIndices.data[j]) {
274                 LOGE("ValidateInputAndOutput failed, should not set an tensor as input and output at the same time, "
275                      "input index %u, output index %u", inputIndices.data[i], outputIndices.data[j]);
276                 return OH_NN_INVALID_PARAMETER;
277             }
278         }
279     }
280     return OH_NN_SUCCESS;
281 }
282 
283 /* Check whether the indices exceed the number of added tensors. */
ValidateTensorArray(const OH_NN_UInt32Array & indices) const284 OH_NN_ReturnCode InnerModel::ValidateTensorArray(const OH_NN_UInt32Array& indices) const
285 {
286     OH_NN_ReturnCode ret = Validation::ValidateArray(indices.data, indices.size);
287     if (ret != OH_NN_SUCCESS) {
288         LOGE("ValidateTensorArray failed, please check the validity of indices.");
289         return ret;
290     }
291 
292     for (uint32_t i = 0; i < indices.size; i++) {
293         if (indices.data[i] >= m_allTensors.size()) {
294             LOGE("ValidateTensors failed, index %u is out of the number of added tensors.", indices.data[i]);
295             return OH_NN_INVALID_PARAMETER;
296         }
297     }
298 
299     return OH_NN_SUCCESS;
300 }
301 
AddOperation(OH_NN_OperationType opType,const OH_NN_UInt32Array & paramIndices,const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)302 OH_NN_ReturnCode InnerModel::AddOperation(OH_NN_OperationType opType, const OH_NN_UInt32Array& paramIndices,
303                                           const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
304 {
305     if (m_liteGraph != nullptr) {
306         LOGE("AddOperation failed, AddOperation is forbidden after after Finish() or LoadLiteGraph() has been called.");
307         return OH_NN_OPERATION_FORBIDDEN;
308     }
309 
310     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
311     if (ret != OH_NN_SUCCESS) {
312         LOGE("AddOperation failed, please check inputIndices and outputIndices.");
313         return ret;
314     }
315     std::vector<uint32_t> inputs = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
316     std::vector<uint32_t> outputs = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
317 
318     ret = ValidateTensorArray(paramIndices);
319     if (ret != OH_NN_SUCCESS) {
320         LOGE("AddOperation failed, please check paramIndices.");
321         return ret;
322     }
323     std::vector<uint32_t> parameters = ConstructVectorFromArray(paramIndices.data, paramIndices.size);
324 
325     Ops::OpsRegistry& opsRegistry = Ops::OpsRegistry::GetSingleton();
326     std::unique_ptr<Ops::OpsBuilder> opsBuilder = opsRegistry.GetOpsBuilder(opType);
327     if (opsBuilder == nullptr) {
328         LOGE("AddOperation failed, cannot add operation of type: %d.", opType);
329         return OH_NN_INVALID_PARAMETER;
330     }
331 
332     ret = opsBuilder->Build(parameters, inputs, outputs, m_allTensors);
333     if (ret != OH_NN_SUCCESS) {
334         LOGE("AddOperation failed, error happens when build operations.");
335         return ret;
336     }
337 
338     m_ops.emplace_back(std::move(opsBuilder));
339     return OH_NN_SUCCESS;
340 }
341 
SpecifyInputsAndOutputs(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)342 OH_NN_ReturnCode InnerModel::SpecifyInputsAndOutputs(
343     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
344 {
345     if (m_liteGraph != nullptr) {
346         LOGE("SpecifyInputsAndOutputs failed, "
347              "SpecifyInputsAndOutputs is forbidden after Finish() or LoadLiteGraph() has been called.");
348         return OH_NN_OPERATION_FORBIDDEN;
349     }
350 
351     if (!m_inputTensors.empty()) {
352         LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs should not be called twice.");
353         return OH_NN_OPERATION_FORBIDDEN;
354     }
355 
356     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
357     if (ret != OH_NN_SUCCESS) {
358         LOGE("SpecifyInputsAndOutputs failed, please check inputIndices and outputIndices.");
359         return ret;
360     }
361 
362     m_inputIndices = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
363     m_outputIndices = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
364 
365     for (uint32_t i : m_inputIndices) {
366         m_inputTensors.emplace_back(m_allTensors[i]);
367     }
368 
369     for (uint32_t i : m_outputIndices) {
370         m_outputTensors.emplace_back(m_allTensors[i]);
371     }
372 
373     return OH_NN_SUCCESS;
374 }
375 
Build()376 OH_NN_ReturnCode InnerModel::Build()
377 {
378     NNRT_TRACE_NAME("Build model");
379     if (m_liteGraph != nullptr) {
380         LOGE("Build failed,"
381              " OH_NNModel is not allowed to build again after Build() or BuildFromLiteGraph() has been called.");
382         return OH_NN_OPERATION_FORBIDDEN;
383     }
384 
385     if (m_allTensors.empty()) {
386         LOGE("Build failed, no OH_NN_Tensor has been added. Must call AddTensor before Build().");
387         return OH_NN_OPERATION_FORBIDDEN;
388     }
389 
390     if (m_ops.empty()) {
391         LOGE("Build failed, no operation has beed added. Must call AddOperation before Build().");
392         return OH_NN_OPERATION_FORBIDDEN;
393     }
394 
395     if ((m_inputIndices.empty()) || (m_outputIndices.empty())) {
396         LOGE("Build failed, inputs and outputs are unspecified. Must call SpecifyInputsAndOutputs before Build().");
397         return OH_NN_OPERATION_FORBIDDEN;
398     }
399 
400     MSLITE::LiteGraph* pLiteGraph = new (std::nothrow) MSLITE::LiteGraph();
401     if (pLiteGraph == nullptr) {
402         LOGE("Build failed, error happend when creating LiteGraph.");
403         return OH_NN_MEMORY_ERROR;
404     }
405     m_liteGraph.reset(pLiteGraph, LiteGraphDeleter());
406 
407     m_liteGraph->name_ = NNR_MODEL;
408 
409     std::unordered_map<uint32_t, uint32_t> modelIDToGraphID;
410     AddTensorsToLiteGraph(modelIDToGraphID);
411 
412     OH_NN_ReturnCode ret = AddNodesToLiteGraph(modelIDToGraphID);
413     if (ret != OH_NN_SUCCESS) {
414         return ret;
415     }
416 
417     // subGraph will be released by LiteGraph if it is added into instance of LiteGraph.
418     MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph();
419     if (subGraph == nullptr) {
420         LOGE("AddNodesToLiteGraph failed, error happened when creating subgraph.");
421         return OH_NN_NULL_PTR;
422     }
423 
424     subGraph->name_ = "NNRt_SubGraph"; // Name of subGraph
425     subGraph->input_indices_ = m_liteGraph->input_indices_;
426     subGraph->output_indices_ = m_liteGraph->output_indices_;
427     uint32_t nodeCount = static_cast<uint32_t>(m_ops.size()); // m_ops.size() smaller than UINT32_MAX
428     for (uint32_t i = 0; i < nodeCount; i++) {
429         subGraph->node_indices_.emplace_back(i);
430     }
431     m_liteGraph->sub_graphs_.emplace_back(subGraph);
432 
433     return OH_NN_SUCCESS;
434 }
435 
AddTensorsToLiteGraph(std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)436 void InnerModel::AddTensorsToLiteGraph(std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
437 {
438     uint32_t graphID = 0;
439     LiteGraphTensorPtr tensor(nullptr, DestroyLiteGraphTensor);
440     size_t tensorCount = m_allTensors.size();
441     for (size_t i = 0; i < tensorCount; i++) {
442         const std::shared_ptr<NNTensor>& nnTensor = m_allTensors[i];
443         // If the tensor is used as operation parameter, it will not convert to the tensor of LiteGraph.
444         if (nnTensor->IsOpParameter()) {
445             continue;
446         }
447 
448         tensor = nnTensor->ConvertToLiteGraphTensor();
449         m_liteGraph->all_tensors_.emplace_back(tensor.release());
450         modelIDToGraphID[i] = graphID++;
451     }
452 
453     // Note: Indices in m_inputIndices and m_outputIndices have been checked in SpecifyInputAndOutput(), there is no
454     // need to check twice.
455     std::vector<uint32_t>& inputIndices = m_liteGraph->input_indices_;
456     for (uint32_t index : m_inputIndices) {
457         inputIndices.emplace_back(modelIDToGraphID.at(index));
458     }
459 
460     std::vector<uint32_t>& outputIndices = m_liteGraph->output_indices_;
461     for (uint32_t index : m_outputIndices) {
462         outputIndices.emplace_back(modelIDToGraphID.at(index));
463     }
464 }
465 
AddNodesToLiteGraph(const std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)466 OH_NN_ReturnCode InnerModel::AddNodesToLiteGraph(const std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
467 {
468     MSLITE::LiteGraph::Node* node{nullptr};
469     size_t opCount = m_ops.size();
470     Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor};
471     for (size_t i = 0; i < opCount; i++) {
472         std::unique_ptr<Ops::OpsBuilder>& op = m_ops[i];
473         // node will be released by LiteGraph if it is added into instance of LiteGraph.
474         node = new(std::nothrow) MSLITE::LiteGraph::Node();
475         if (node == nullptr) {
476             LOGE("AddNodesToLiteGraph failed, error happened when creating LiteGraph tensor.");
477             return OH_NN_NULL_PTR;
478         }
479 
480         node->name_ = op->GetName() + ":" + std::to_string(i);
481         node->quant_type_ = NNToMS::TransformQuantType(op->GetQuantType());
482 
483         op->GetInputIndex(node->input_indices_, modelIDToGraphID);
484         op->GetOutputIndex(node->output_indices_, modelIDToGraphID);
485 
486         primitive = op->GetPrimitive();
487         if (primitive == nullptr) {
488             LOGE("Build %s primitive failed.", op->GetName().c_str());
489             delete node;
490             return OH_NN_FAILED;
491         }
492 
493         node->primitive_ = primitive.release();
494         m_liteGraph->all_nodes_.emplace_back(node);
495     }
496 
497     return OH_NN_SUCCESS;
498 }
499 
GetSupportedOperations(size_t deviceID,const bool ** isSupported,uint32_t & opCount)500 OH_NN_ReturnCode InnerModel::GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount)
501 {
502     if (m_liteGraph == nullptr) {
503         LOGE("GetSupportedOperations failed. GetSupportedOperations() must be called after Finish().");
504         return OH_NN_OPERATION_FORBIDDEN;
505     }
506 
507     DeviceManager& deviceManager = DeviceManager::GetInstance();
508 
509     std::shared_ptr<Device> device = deviceManager.GetDevice(deviceID);
510     if (device == nullptr) {
511         LOGE("GetSupportedOperations failed, retrieve device failed.");
512         return OH_NN_FAILED;
513     }
514 
515     std::vector<bool> supportedOperations;
516     OH_NN_ReturnCode ret = device->GetSupportedOperation(m_liteGraph, supportedOperations);
517     if (ret != OH_NN_SUCCESS) {
518         LOGE("GetSupportedOperations failed, error happened when get supported operations from devices.");
519         return ret;
520     }
521 
522     m_supportedOperations.clear();
523     std::copy(supportedOperations.begin(), supportedOperations.end(), std::back_inserter(m_supportedOperations));
524 
525     *isSupported = reinterpret_cast<bool*>(m_supportedOperations.data());
526     opCount = m_supportedOperations.size();
527 
528     return OH_NN_SUCCESS;
529 }
530 
GetLiteGraphs() const531 std::shared_ptr<MSLITE::LiteGraph> InnerModel::GetLiteGraphs() const
532 {
533     return m_liteGraph;
534 }
535 
GetInputTensors() const536 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetInputTensors() const
537 {
538     return m_inputTensors;
539 }
540 
GetOutputTensors() const541 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetOutputTensors() const
542 {
543     return m_outputTensors;
544 }
545 }  // namespace NeuralNetworkRuntime
546 }  // namespace OHOS