• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "execution_plan.h"
17 
18 #include <vector>
19 
20 #include "common/log.h"
21 #include "cpp_type.h"
22 
23 
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
GetInputDimRanges(std::vector<std::vector<uint32_t>> & minInputDims,std::vector<std::vector<uint32_t>> & maxInputDims)26 OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(std::vector<std::vector<uint32_t>>& minInputDims,
27     std::vector<std::vector<uint32_t>>& maxInputDims)
28 {
29     OH_NN_ReturnCode ret = m_preparedModel->GetInputDimRanges(minInputDims, maxInputDims);
30     if (ret != OH_NN_SUCCESS) {
31         LOGE("ExecutionPlan GetInputDimRanges() failed.");
32         return ret;
33     }
34 
35     return OH_NN_SUCCESS;
36 }
37 
Run(const std::vector<std::shared_ptr<NNTensor>> & inputTensors,std::vector<std::shared_ptr<NNTensor>> & outputTensors)38 OH_NN_ReturnCode ExecutionPlan::Run(const std::vector<std::shared_ptr<NNTensor>>& inputTensors,
39                                     std::vector<std::shared_ptr<NNTensor>>& outputTensors)
40 {
41     OH_NN_ReturnCode ret {OH_NN_FAILED};
42     IOTensor tensor;
43     std::vector<IOTensor> inputIOTensors;
44     size_t inputSize = inputTensors.size();
45     size_t outputSize = outputTensors.size();
46     for (size_t i = 0; i < inputSize; ++i) {
47         inputTensors[i]->ConvertToIOTensor(tensor);
48         inputIOTensors.emplace_back(std::move(tensor));
49     }
50 
51     std::vector<IOTensor> outputIOTensors;
52     for (size_t i = 0; i < outputSize; ++i) {
53         outputTensors[i]->ConvertToIOTensor(tensor);
54         outputIOTensors.emplace_back(std::move(tensor));
55     }
56 
57     std::vector<std::vector<int32_t>> outputsDims;
58     std::vector<bool> isSufficientDataBuffer;
59     ret = m_preparedModel->Run(inputIOTensors, outputIOTensors, outputsDims, isSufficientDataBuffer);
60     if (ret != OH_NN_SUCCESS) {
61         LOGE("PrepardModel Run() failed.");
62         return ret;
63     }
64 
65     // Set the output NNTensor's dimensions from output IOTensor if it is dynamic.
66     // NNTensor::SetDimensions will check if the tensor buffer is enough for the new dimensions.
67     if (outputsDims.size() != outputSize) {
68         LOGE("ExecutionPlan run failed, size of outputsDims is not equal to outputTensors.");
69         return OH_NN_INVALID_PARAMETER;
70     }
71     for (size_t i = 0; i < outputSize; ++i) {
72         ret = outputTensors[i]->SetDimensions(outputsDims[i]);
73         if (ret != OH_NN_SUCCESS) {
74             LOGE("Run failed, error happened when setting output tensor's dimensions, output id: %zu.", i);
75             return ret;
76         }
77     }
78 
79     return OH_NN_SUCCESS;
80 }
81 
82 
GetInputDevice() const83 std::shared_ptr<Device> ExecutionPlan::GetInputDevice() const
84 {
85     return m_device;
86 }
87 
88 
GetOutputDevice() const89 std::shared_ptr<Device> ExecutionPlan::GetOutputDevice() const
90 {
91     return m_device;
92 }
93 } // NeuralNetworkRuntime
94 } // OHOS