• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "hdi_device.h"
17 
18 #include "hdf_base.h"
19 #include "mindir.h"
20 
21 #include "hdi_prepared_model.h"
22 #include "memory_manager.h"
23 #include "transform.h"
24 #include "common/log.h"
25 #include "common/utils.h"
26 
27 namespace OHOS {
28 namespace NeuralNetworkRuntime {
HDIDevice(OHOS::sptr<V1_0::INnrtDevice> device)29 HDIDevice::HDIDevice(OHOS::sptr<V1_0::INnrtDevice> device) : m_iDevice(device)
30 {
31     device->GetVersion(m_hdiVersion.first, m_hdiVersion.second);
32 }
33 
GetDeviceName(std::string & name)34 OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name)
35 {
36     auto ret = m_iDevice->GetDeviceName(name);
37     if (ret != HDF_SUCCESS) {
38         LOGE("Get HDI device name failed. ErrorCode=%d", ret);
39         return OH_NN_UNAVALIDABLE_DEVICE;
40     }
41     return OH_NN_SUCCESS;
42 }
43 
GetVendorName(std::string & name)44 OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name)
45 {
46     auto ret = m_iDevice->GetVendorName(name);
47     if (ret != HDF_SUCCESS) {
48         LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret);
49         return OH_NN_UNAVALIDABLE_DEVICE;
50     }
51     return OH_NN_SUCCESS;
52 }
53 
GetDeviceType(OH_NN_DeviceType & deviceType)54 OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType)
55 {
56     V1_0::DeviceType iDeviceType;
57     auto ret = m_iDevice->GetDeviceType(iDeviceType);
58     if (ret != HDF_SUCCESS) {
59         LOGE("Get HDI device type failed. ErrorCode=%d", ret);
60         return OH_NN_UNAVALIDABLE_DEVICE;
61     }
62 
63     deviceType = HDIToNN::TransHDIDeviceType(iDeviceType);
64     return OH_NN_SUCCESS;
65 }
66 
GetDeviceStatus(DeviceStatus & status)67 OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status)
68 {
69     V1_0::DeviceStatus iDeviceStatus;
70     auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus);
71     if (ret != HDF_SUCCESS) {
72         LOGE("Get HDI device status failed. ErrorCode=%d", ret);
73         return OH_NN_UNAVALIDABLE_DEVICE;
74     }
75     status = HDIToNN::TransHDIDeviceStatus(iDeviceStatus);
76     return OH_NN_SUCCESS;
77 }
78 
GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,std::vector<bool> & ops)79 OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,
80                                                   std::vector<bool>& ops)
81 {
82     if (model == nullptr) {
83         LOGE("Model is nullptr, cannot query supported operation.");
84         return OH_NN_NULL_PTR;
85     }
86 
87     V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0};
88     size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get());
89     int32_t hdiRet {0};
90     if (tensorSize > 0) {
91         hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer);
92         if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) {
93             LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet);
94             return OH_NN_FAILED;
95         }
96     }
97 
98     auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer);
99     if (iModel == nullptr) {
100         LOGE("Parse litegraph to hdi model failed.");
101         ReleaseSharedBuffer(tensorBuffer);
102         return OH_NN_FAILED;
103     }
104 
105     hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops);
106 
107     mindspore::lite::MindIR_Model_Destroy(&iModel);
108     auto ret = ReleaseSharedBuffer(tensorBuffer);
109     if (ret != OH_NN_SUCCESS) {
110         LOGE("Release tensorBuffer failed.");
111         return OH_NN_FAILED;
112     }
113     if (hdiRet != HDF_SUCCESS) {
114         LOGE("Get supported operation failed. ErrorCode=%d", ret);
115         return OH_NN_UNAVALIDABLE_DEVICE;
116     }
117     return OH_NN_SUCCESS;
118 }
119 
IsFloat16PrecisionSupported(bool & isSupported)120 OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported)
121 {
122     auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported);
123     if (ret != HDF_SUCCESS) {
124         LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret);
125         return OH_NN_UNAVALIDABLE_DEVICE;
126     }
127     return OH_NN_SUCCESS;
128 }
129 
IsPerformanceModeSupported(bool & isSupported)130 OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported)
131 {
132     auto ret = m_iDevice->IsPerformanceModeSupported(isSupported);
133     if (ret != HDF_SUCCESS) {
134         LOGE("Query performance mode supported failed. ErrorCode=%d", ret);
135         return OH_NN_UNAVALIDABLE_DEVICE;
136     }
137     return OH_NN_SUCCESS;
138 }
139 
IsPrioritySupported(bool & isSupported)140 OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported)
141 {
142     auto ret = m_iDevice->IsPrioritySupported(isSupported);
143     if (ret != HDF_SUCCESS) {
144         LOGE("Query priority supported failed. ErrorCode=%d", ret);
145         return OH_NN_UNAVALIDABLE_DEVICE;
146     }
147     return OH_NN_SUCCESS;
148 }
149 
IsDynamicInputSupported(bool & isSupported)150 OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported)
151 {
152     auto ret = m_iDevice->IsDynamicInputSupported(isSupported);
153     if (ret != HDF_SUCCESS) {
154         LOGE("Query dynamic input supported failed. ErrorCode=%d", ret);
155         return OH_NN_UNAVALIDABLE_DEVICE;
156     }
157     return OH_NN_SUCCESS;
158 }
159 
IsModelCacheSupported(bool & isSupported)160 OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported)
161 {
162     auto ret = m_iDevice->IsModelCacheSupported(isSupported);
163     if (ret != HDF_SUCCESS) {
164         LOGE("Query cache model supported failed. ErrorCode=%d", ret);
165         return OH_NN_UNAVALIDABLE_DEVICE;
166     }
167     return OH_NN_SUCCESS;
168 }
169 
PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)170 OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
171                                          const ModelConfig& config,
172                                          std::shared_ptr<PreparedModel>& preparedModel)
173 {
174     if (model == nullptr) {
175         LOGE("Model is nullptr, cannot prepare model.");
176         return OH_NN_INVALID_PARAMETER;
177     }
178 
179     V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0};
180     size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get());
181     int32_t hdiRet {0};
182     if (tensorSize > 0) {
183         hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer);
184         if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) {
185             LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet);
186             return OH_NN_FAILED;
187         }
188     }
189 
190     V1_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer);
191     if (iModel == nullptr) {
192         LOGE("Parse litegraph to hdi model failed.");
193         ReleaseSharedBuffer(tensorBuffer);
194         return OH_NN_FAILED;
195     }
196 
197     V1_0::ModelConfig iModelConfig;
198     iModelConfig.enableFloat16 = config.enableFloat16;
199     iModelConfig.mode = NNToHDI::TransPerformanceMode(config.mode);
200     iModelConfig.priority = NNToHDI::TransPriority(config.priority);
201     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
202 
203     auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel);
204 
205     mindspore::lite::MindIR_Model_Destroy(&iModel);
206     auto ret = ReleaseSharedBuffer(tensorBuffer);
207     if (ret != OH_NN_SUCCESS) {
208         LOGE("Release tensorBuffer failed.");
209         return OH_NN_FAILED;
210     }
211     if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) {
212         LOGE("Prepare model failed. ErrorCode=%d", preparedRet);
213         return OH_NN_FAILED;
214     }
215 
216     preparedModel = CreateSharedPtr<HDIPreparedModel>(iPreparedModel);
217     if (preparedModel == nullptr) {
218         LOGE("Prepare model failed, because fail to create preparedModel instance.");
219         return OH_NN_MEMORY_ERROR;
220     }
221 
222     return OH_NN_SUCCESS;
223 }
224 
PrepareModelFromModelCache(const std::vector<ModelBuffer> & modelCache,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)225 OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector<ModelBuffer>& modelCache,
226                                                        const ModelConfig& config,
227                                                        std::shared_ptr<PreparedModel>& preparedModel)
228 {
229     std::vector<V1_0::SharedBuffer> iBuffers;
230     auto memManager = MemoryManager::GetInstance();
231     Memory memory;
232     OH_NN_ReturnCode ret;
233     size_t modelCacheSize = modelCache.size();
234     for (size_t i = 0; i < modelCacheSize; i++) {
235         ret = memManager->GetMemory(modelCache[i].buffer, memory);
236         if (ret != OH_NN_SUCCESS) {
237             LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1);
238             return ret;
239         }
240         iBuffers.emplace_back(V1_0::SharedBuffer {memory.fd, memory.length, 0, memory.length});
241     }
242 
243     V1_0::ModelConfig iModelConfig;
244     iModelConfig.enableFloat16 = config.enableFloat16;
245     iModelConfig.mode = NNToHDI::TransPerformanceMode(config.mode);
246     iModelConfig.priority = NNToHDI::TransPriority(config.priority);
247 
248     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
249     auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel);
250     if (hdiRet != HDF_SUCCESS) {
251         LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet);
252         return OH_NN_UNAVALIDABLE_DEVICE;
253     }
254 
255     preparedModel = CreateSharedPtr<HDIPreparedModel>(iPreparedModel);
256     if (preparedModel == nullptr) {
257         LOGE("Prepare model from model cache failed, because fail to create preparedModel instance.");
258         return OH_NN_MEMORY_ERROR;
259     }
260     return OH_NN_SUCCESS;
261 }
262 
AllocateBuffer(size_t length)263 void* HDIDevice::AllocateBuffer(size_t length)
264 {
265     if (length == 0) {
266         LOGE("The length param is invalid, length=0");
267         return nullptr;
268     }
269 
270     V1_0::SharedBuffer buffer;
271     auto ret = m_iDevice->AllocateBuffer(length, buffer);
272     if (ret != HDF_SUCCESS) {
273         LOGE("Allocate buffer error. ErrorCode: %d", ret);
274         return nullptr;
275     }
276 
277     auto memManager = MemoryManager::GetInstance();
278     auto addr = memManager->MapMemory(buffer.fd, length);
279     if (addr == nullptr) {
280         LOGE("Map fd to address failed.");
281     }
282     return addr;
283 }
284 
ReleaseBuffer(const void * buffer)285 OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer)
286 {
287     if (buffer == nullptr) {
288         LOGE("Buffer is nullptr, no need to release.");
289         return OH_NN_INVALID_PARAMETER;
290     }
291 
292     auto memManager = MemoryManager::GetInstance();
293     Memory memory;
294     auto ret = memManager->GetMemory(buffer, memory);
295     if (ret != OH_NN_SUCCESS) {
296         LOGE("Invalid Buffer, it is not NNRt buffer.");
297         return ret;
298     }
299 
300     V1_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length};
301     auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer);
302     if (deviceResult != HDF_SUCCESS) {
303         LOGE("Device release buffer error. ErrorCode: %d", deviceResult);
304         return OH_NN_FAILED;
305     }
306 
307     ret = memManager->UnMapMemory(buffer);
308     if (ret != OH_NN_SUCCESS) {
309         LOGE("Unmap memory failed.");
310         return ret;
311     }
312 
313     return OH_NN_SUCCESS;
314 }
315 
ReleaseSharedBuffer(const V1_0::SharedBuffer & buffer)316 OH_NN_ReturnCode HDIDevice::ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer)
317 {
318     if (buffer.fd == INVALID_FD) {
319         LOGI("No need to release. fd=%d", INVALID_FD);
320         return OH_NN_SUCCESS;
321     }
322 
323     auto ret = m_iDevice->ReleaseBuffer(buffer);
324     if (ret != HDF_SUCCESS) {
325         LOGE("Device release buffer error. ErrorCode=%d", ret);
326         return OH_NN_FAILED;
327     }
328     return OH_NN_SUCCESS;
329 }
330 } // namespace NeuralNetworkRuntime
331 } // namespace OHOS