• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "hdi_prepared_model_v2_0.h"
17 
18 #include "common/log.h"
19 #include "hdi_returncode_utils.h"
20 #include "memory_manager.h"
21 
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace {
TransDataType(const OH_NN_DataType & dataType)25 V2_0::DataType TransDataType(const OH_NN_DataType& dataType)
26 {
27     switch (dataType) {
28         case OH_NN_BOOL:
29             return V2_0::DataType::DATA_TYPE_BOOL;
30         case OH_NN_INT8:
31             return V2_0::DataType::DATA_TYPE_INT8;
32         case OH_NN_INT16:
33             return V2_0::DataType::DATA_TYPE_INT16;
34         case OH_NN_INT32:
35             return V2_0::DataType::DATA_TYPE_INT32;
36         case OH_NN_INT64:
37             return V2_0::DataType::DATA_TYPE_INT64;
38         case OH_NN_UINT8:
39             return V2_0::DataType::DATA_TYPE_UINT8;
40         case OH_NN_UINT16:
41             return V2_0::DataType::DATA_TYPE_UINT16;
42         case OH_NN_UINT32:
43             return V2_0::DataType::DATA_TYPE_UINT32;
44         case OH_NN_UINT64:
45             return V2_0::DataType::DATA_TYPE_UINT64;
46         case OH_NN_FLOAT16:
47             return V2_0::DataType::DATA_TYPE_FLOAT16;
48         case OH_NN_FLOAT32:
49             return V2_0::DataType::DATA_TYPE_FLOAT32;
50         case OH_NN_FLOAT64:
51             return V2_0::DataType::DATA_TYPE_FLOAT64;
52         default:
53             return V2_0::DataType::DATA_TYPE_UNKNOWN;
54     }
55 }
56 
TransFormat(const OH_NN_Format & format)57 V2_0::Format TransFormat(const OH_NN_Format& format)
58 {
59     switch (format) {
60         case OH_NN_FORMAT_NCHW:
61             return V2_0::Format::FORMAT_NCHW;
62         case OH_NN_FORMAT_NHWC:
63             return V2_0::Format::FORMAT_NHWC;
64         default:
65             return V2_0::Format::FORMAT_NONE;
66     }
67 }
68 
TransIOTensor(const IOTensor & tensor)69 V2_0::IOTensor TransIOTensor(const IOTensor& tensor)
70 {
71     V2_0::IOTensor iTensor;
72     iTensor.name = tensor.name;
73     iTensor.dataType = TransDataType(tensor.dataType);
74     iTensor.dimensions = tensor.dimensions;
75     iTensor.format = TransFormat(tensor.format);
76 
77     V2_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0};
78     if (tensor.data != nullptr) {
79         auto memManager = MemoryManager::GetInstance();
80         Memory memory;
81         auto ret = memManager->GetMemory(tensor.data, memory);
82         if (ret != OH_NN_SUCCESS) {
83             LOGE("Invalid Tensor buffer, cannot transform to fd.");
84         } else {
85             iBuffer.fd = memory.fd;
86             iBuffer.bufferSize = memory.length;
87             iBuffer.offset = 0;
88             iBuffer.dataSize = memory.length;
89         }
90     }
91     iTensor.data = iBuffer;
92 
93     return iTensor;
94 }
95 } // unamed namespace
96 
HDIPreparedModelV2_0(OHOS::sptr<V2_0::IPreparedModel> hdiPreparedModel)97 HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr<V2_0::IPreparedModel> hdiPreparedModel)
98     : m_hdiPreparedModel(hdiPreparedModel)
99 {
100     hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second);
101 }
102 
ExportModelCache(std::vector<Buffer> & modelCache)103 OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector<Buffer>& modelCache)
104 {
105     if (!modelCache.empty()) {
106         LOGE("The vector of modelCache should be empty. size=%{public}zu", modelCache.size());
107         return OH_NN_INVALID_PARAMETER;
108     }
109 
110     std::vector<V2_0::SharedBuffer> iBuffers;
111     auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers);
112     if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) {
113         return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Export model cache failed");
114     }
115 
116     auto memManager = MemoryManager::GetInstance();
117     size_t iBuffersSize = iBuffers.size();
118     for (size_t i = 0; i < iBuffersSize; i++) {
119         auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize);
120         if (addr == nullptr) {
121             LOGE("Export the %{public}zuth model cache failed, cannot not map fd to address.", i + 1);
122             return OH_NN_MEMORY_ERROR;
123         }
124         Buffer modelbuffer {addr, iBuffers[i].bufferSize};
125         modelCache.emplace_back(modelbuffer);
126     }
127 
128     return OH_NN_SUCCESS;
129 }
130 
Run(const std::vector<IOTensor> & inputs,const std::vector<IOTensor> & outputs,std::vector<std::vector<int32_t>> & outputsDims,std::vector<bool> & isOutputBufferEnough)131 OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
132     std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough)
133 {
134     V2_0::IOTensor iTensor;
135     std::vector<V2_0::IOTensor> iInputTensors;
136     for (auto& input: inputs) {
137         iTensor = TransIOTensor(input);
138         if (iTensor.data.fd == INVALID_FD) {
139             LOGE("Transform inputs tensor failed, cannot find data file descriptor.");
140             return OH_NN_INVALID_PARAMETER;
141         }
142         iInputTensors.emplace_back(iTensor);
143     }
144 
145     std::vector<V2_0::IOTensor> iOutputTensors;
146     for (auto& output: outputs) {
147         iTensor = TransIOTensor(output);
148         if (iTensor.data.fd == INVALID_FD) {
149             LOGE("Transform outputs tensor failed, cannot find data file descriptor.");
150             return OH_NN_INVALID_PARAMETER;
151         }
152         iOutputTensors.emplace_back(iTensor);
153     }
154 
155     auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims);
156     if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) {
157         return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Run model failed");
158     }
159     if (outputsDims.empty()) {
160         LOGE("Run failed, outputsDims is empty.");
161         return OH_NN_UNAVALIDABLE_DEVICE;
162     }
163 
164     return OH_NN_SUCCESS;
165 }
166 
GetInputDimRanges(std::vector<std::vector<uint32_t>> & minInputDims,std::vector<std::vector<uint32_t>> & maxInputDims)167 OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector<std::vector<uint32_t>>& minInputDims,
168                                                          std::vector<std::vector<uint32_t>>& maxInputDims)
169 {
170     auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims);
171     if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) {
172         return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get input dim ranges failed");
173     }
174 
175     return OH_NN_SUCCESS;
176 }
177 } // namespace NeuralNetworkRuntime
178 } // OHOS
179