• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "hdi_device_v1_0.h"
17 
18 #include "hdf_base.h"
19 #include "mindir.h"
20 
21 #include "hdi_prepared_model_v1_0.h"
22 #include "memory_manager.h"
23 #include "transform.h"
24 #include "common/log.h"
25 #include "common/utils.h"
26 
27 namespace OHOS {
28 namespace NeuralNetworkRuntime {
29 namespace {
TransHDIDeviceV1_0Type(const V1_0::DeviceType & iDeviceType)30 OH_NN_DeviceType TransHDIDeviceV1_0Type(const V1_0::DeviceType& iDeviceType)
31 {
32     switch (iDeviceType) {
33         case V1_0::DeviceType::CPU:
34             return OH_NN_CPU;
35         case V1_0::DeviceType::GPU:
36             return OH_NN_GPU;
37         case V1_0::DeviceType::ACCELERATOR:
38             return OH_NN_ACCELERATOR;
39         default:
40             return OH_NN_OTHERS;
41     }
42 }
43 
TransHDIDeviceV1_0Status(const V1_0::DeviceStatus & iDeviceStatus)44 DeviceStatus TransHDIDeviceV1_0Status(const V1_0::DeviceStatus& iDeviceStatus)
45 {
46     switch (iDeviceStatus) {
47         case V1_0::DeviceStatus::AVAILABLE:
48             return DeviceStatus::AVAILABLE;
49         case V1_0::DeviceStatus::BUSY:
50             return DeviceStatus::BUSY;
51         case V1_0::DeviceStatus::OFFLINE:
52             return DeviceStatus::OFFLINE;
53         default:
54             return DeviceStatus::UNKNOWN;
55     }
56 }
57 
TransPerformanceMode(const OH_NN_PerformanceMode & mode)58 V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode)
59 {
60     switch (mode) {
61         case OH_NN_PERFORMANCE_LOW:
62             return V1_0::PerformanceMode::PERFORMANCE_LOW;
63         case OH_NN_PERFORMANCE_MEDIUM:
64             return V1_0::PerformanceMode::PERFORMANCE_MEDIUM;
65         case OH_NN_PERFORMANCE_HIGH:
66             return V1_0::PerformanceMode::PERFORMANCE_HIGH;
67         case OH_NN_PERFORMANCE_EXTREME:
68             return V1_0::PerformanceMode::PERFORMANCE_EXTREME;
69         default:
70             return V1_0::PerformanceMode::PERFORMANCE_NONE;
71     }
72 }
73 
TransPriority(const OH_NN_Priority & priority)74 V1_0::Priority TransPriority(const OH_NN_Priority& priority)
75 {
76     switch (priority) {
77         case OH_NN_PRIORITY_LOW:
78             return V1_0::Priority::PRIORITY_LOW;
79         case OH_NN_PRIORITY_MEDIUM:
80             return V1_0::Priority::PRIORITY_MEDIUM;
81         case OH_NN_PRIORITY_HIGH:
82             return V1_0::Priority::PRIORITY_HIGH;
83         default:
84             return V1_0::Priority::PRIORITY_NONE;
85     }
86 }
87 }
88 
HDIDeviceV1_0(OHOS::sptr<V1_0::INnrtDevice> device)89 HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr<V1_0::INnrtDevice> device) : m_iDevice(device)
90 {}
91 
GetDeviceName(std::string & name)92 OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name)
93 {
94     auto ret = m_iDevice->GetDeviceName(name);
95     if (ret != HDF_SUCCESS) {
96         LOGE("Get HDI device name failed. ErrorCode=%d", ret);
97         return OH_NN_UNAVALIDABLE_DEVICE;
98     }
99     return OH_NN_SUCCESS;
100 }
101 
GetVendorName(std::string & name)102 OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name)
103 {
104     auto ret = m_iDevice->GetVendorName(name);
105     if (ret != HDF_SUCCESS) {
106         LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret);
107         return OH_NN_UNAVALIDABLE_DEVICE;
108     }
109     return OH_NN_SUCCESS;
110 }
111 
GetVersion(std::string & version)112 OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version)
113 {
114     auto ret = m_iDevice->GetVersion(m_hdiVersion.first, m_hdiVersion.second);
115     if (ret != HDF_SUCCESS) {
116         LOGE("Get HDI version failed. ErrorCode=%d", ret);
117         return OH_NN_UNAVALIDABLE_DEVICE;
118     }
119     version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second);
120     return OH_NN_SUCCESS;
121 }
122 
GetDeviceType(OH_NN_DeviceType & deviceType)123 OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType)
124 {
125     V1_0::DeviceType iDeviceType;
126     auto ret = m_iDevice->GetDeviceType(iDeviceType);
127     if (ret != HDF_SUCCESS) {
128         LOGE("Get HDI device type failed. ErrorCode=%d", ret);
129         return OH_NN_UNAVALIDABLE_DEVICE;
130     }
131 
132     deviceType = TransHDIDeviceV1_0Type(iDeviceType);
133     return OH_NN_SUCCESS;
134 }
135 
GetDeviceStatus(DeviceStatus & status)136 OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceStatus(DeviceStatus& status)
137 {
138     V1_0::DeviceStatus iDeviceStatus;
139     auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus);
140     if (ret != HDF_SUCCESS) {
141         LOGE("Get HDI device status failed. ErrorCode=%d", ret);
142         return OH_NN_UNAVALIDABLE_DEVICE;
143     }
144     status = TransHDIDeviceV1_0Status(iDeviceStatus);
145     return OH_NN_SUCCESS;
146 }
147 
GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,std::vector<bool> & ops)148 OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,
149     std::vector<bool>& ops)
150 {
151     if (model == nullptr) {
152         LOGE("Model is nullptr, cannot query supported operation.");
153         return OH_NN_NULL_PTR;
154     }
155 
156     OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0};
157     size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get());
158     int32_t hdiRet {0};
159     if (tensorSize > 0) {
160         hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer);
161         if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) {
162             LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet);
163             return OH_NN_FAILED;
164         }
165     }
166 
167     auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer);
168     if (iModel == nullptr) {
169         LOGE("Parse litegraph to hdi model failed.");
170         ReleaseSharedBuffer(tensorBuffer);
171         return OH_NN_FAILED;
172     }
173 
174     hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops);
175 
176     mindspore::lite::MindIR_Model_Destroy(&iModel);
177     auto ret = ReleaseSharedBuffer(tensorBuffer);
178     if (ret != OH_NN_SUCCESS) {
179         LOGE("Release tensorBuffer failed.");
180         return OH_NN_FAILED;
181     }
182     if (hdiRet != HDF_SUCCESS) {
183         LOGE("Get supported operation failed. ErrorCode=%d", hdiRet);
184         return OH_NN_UNAVALIDABLE_DEVICE;
185     }
186     return OH_NN_SUCCESS;
187 }
188 
IsFloat16PrecisionSupported(bool & isSupported)189 OH_NN_ReturnCode HDIDeviceV1_0::IsFloat16PrecisionSupported(bool& isSupported)
190 {
191     auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported);
192     if (ret != HDF_SUCCESS) {
193         LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret);
194         return OH_NN_UNAVALIDABLE_DEVICE;
195     }
196     return OH_NN_SUCCESS;
197 }
198 
IsPerformanceModeSupported(bool & isSupported)199 OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported)
200 {
201     auto ret = m_iDevice->IsPerformanceModeSupported(isSupported);
202     if (ret != HDF_SUCCESS) {
203         LOGE("Query performance mode supported failed. ErrorCode=%d", ret);
204         return OH_NN_UNAVALIDABLE_DEVICE;
205     }
206     return OH_NN_SUCCESS;
207 }
208 
IsPrioritySupported(bool & isSupported)209 OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported)
210 {
211     auto ret = m_iDevice->IsPrioritySupported(isSupported);
212     if (ret != HDF_SUCCESS) {
213         LOGE("Query priority supported failed. ErrorCode=%d", ret);
214         return OH_NN_UNAVALIDABLE_DEVICE;
215     }
216     return OH_NN_SUCCESS;
217 }
218 
IsDynamicInputSupported(bool & isSupported)219 OH_NN_ReturnCode HDIDeviceV1_0::IsDynamicInputSupported(bool& isSupported)
220 {
221     auto ret = m_iDevice->IsDynamicInputSupported(isSupported);
222     if (ret != HDF_SUCCESS) {
223         LOGE("Query dynamic input supported failed. ErrorCode=%d", ret);
224         return OH_NN_UNAVALIDABLE_DEVICE;
225     }
226     return OH_NN_SUCCESS;
227 }
228 
IsModelCacheSupported(bool & isSupported)229 OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported)
230 {
231     auto ret = m_iDevice->IsModelCacheSupported(isSupported);
232     if (ret != HDF_SUCCESS) {
233         LOGE("Query cache model supported failed. ErrorCode=%d", ret);
234         return OH_NN_UNAVALIDABLE_DEVICE;
235     }
236     return OH_NN_SUCCESS;
237 }
238 
PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)239 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
240     const ModelConfig& config, std::shared_ptr<PreparedModel>& preparedModel)
241 {
242     if (model == nullptr) {
243         LOGE("Model is nullptr, cannot prepare model.");
244         return OH_NN_INVALID_PARAMETER;
245     }
246 
247     OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0};
248     size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get());
249     int32_t hdiRet {0};
250     if (tensorSize > 0) {
251         hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer);
252         if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) {
253             LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet);
254             return OH_NN_FAILED;
255         }
256     }
257 
258     V1_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer);
259     if (iModel == nullptr) {
260         LOGE("Parse litegraph to hdi model failed.");
261         ReleaseSharedBuffer(tensorBuffer);
262         return OH_NN_FAILED;
263     }
264 
265     V1_0::ModelConfig iModelConfig;
266     iModelConfig.enableFloat16 = config.enableFloat16;
267     iModelConfig.mode = TransPerformanceMode(config.mode);
268     iModelConfig.priority = TransPriority(config.priority);
269     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
270 
271     auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel);
272 
273     mindspore::lite::MindIR_Model_Destroy(&iModel);
274     auto ret = ReleaseSharedBuffer(tensorBuffer);
275     if (ret != OH_NN_SUCCESS) {
276         LOGE("Release tensorBuffer failed.");
277         return OH_NN_FAILED;
278     }
279     if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) {
280         LOGE("Prepare model failed. ErrorCode=%d", preparedRet);
281         return OH_NN_FAILED;
282     }
283 
284     preparedModel = CreateSharedPtr<HDIPreparedModelV1_0>(iPreparedModel);
285     if (preparedModel == nullptr) {
286         LOGE("Prepare model failed, because fail to create preparedModel instance.");
287         return OH_NN_MEMORY_ERROR;
288     }
289 
290     return OH_NN_SUCCESS;
291 }
292 
PrepareModel(const void * metaGraph,const Buffer & quantBuffer,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)293 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(const void* metaGraph,
294                                              const Buffer& quantBuffer,
295                                              const ModelConfig& config,
296                                              std::shared_ptr<PreparedModel>& preparedModel)
297 {
298     return OH_NN_OPERATION_FORBIDDEN;
299 }
300 
PrepareModelFromModelCache(const std::vector<Buffer> & modelCache,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)301 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector<Buffer>& modelCache,
302     const ModelConfig& config, std::shared_ptr<PreparedModel>& preparedModel)
303 {
304     std::vector<V1_0::SharedBuffer> iBuffers;
305     auto memManager = MemoryManager::GetInstance();
306     Memory memory;
307     OH_NN_ReturnCode ret;
308     size_t modelCacheSize = modelCache.size();
309     for (size_t i = 0; i < modelCacheSize; i++) {
310         ret = memManager->GetMemory(modelCache[i].data, memory);
311         if (ret != OH_NN_SUCCESS) {
312             LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1);
313             return ret;
314         }
315         iBuffers.emplace_back(V1_0::SharedBuffer {memory.fd, memory.length, 0, memory.length});
316     }
317 
318     V1_0::ModelConfig iModelConfig;
319     iModelConfig.enableFloat16 = config.enableFloat16;
320     iModelConfig.mode = TransPerformanceMode(config.mode);
321     iModelConfig.priority = TransPriority(config.priority);
322 
323     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
324     auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel);
325     if (hdiRet != HDF_SUCCESS) {
326         LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet);
327         return OH_NN_UNAVALIDABLE_DEVICE;
328     }
329 
330     preparedModel = CreateSharedPtr<HDIPreparedModelV1_0>(iPreparedModel);
331     if (preparedModel == nullptr) {
332         LOGE("Prepare model from model cache failed, because fail to create preparedModel instance.");
333         return OH_NN_MEMORY_ERROR;
334     }
335     return OH_NN_SUCCESS;
336 }
337 
AllocateBuffer(size_t length)338 void* HDIDeviceV1_0::AllocateBuffer(size_t length)
339 {
340     if (length == 0) {
341         LOGE("The length param is invalid, length=0");
342         return nullptr;
343     }
344 
345     V1_0::SharedBuffer buffer;
346     auto ret = m_iDevice->AllocateBuffer(length, buffer);
347     if (ret != HDF_SUCCESS) {
348         LOGE("Allocate buffer error. ErrorCode: %d", ret);
349         return nullptr;
350     }
351 
352     auto memManager = MemoryManager::GetInstance();
353     auto addr = memManager->MapMemory(buffer.fd, length);
354     if (addr == nullptr) {
355         LOGE("Map fd to address failed.");
356     }
357     return addr;
358 }
359 
AllocateTensorBuffer(size_t length,std::shared_ptr<NNTensor> tensor)360 void* HDIDeviceV1_0::AllocateTensorBuffer(size_t length, std::shared_ptr<NNTensor> tensor)
361 {
362     return AllocateBuffer(length);
363 }
364 
ReleaseBuffer(const void * buffer)365 OH_NN_ReturnCode HDIDeviceV1_0::ReleaseBuffer(const void* buffer)
366 {
367     if (buffer == nullptr) {
368         LOGE("Buffer is nullptr, no need to release.");
369         return OH_NN_INVALID_PARAMETER;
370     }
371 
372     auto memManager = MemoryManager::GetInstance();
373     Memory memory;
374     auto ret = memManager->GetMemory(buffer, memory);
375     if (ret != OH_NN_SUCCESS) {
376         LOGE("Invalid Buffer, it is not NNRt buffer.");
377         return ret;
378     }
379 
380     V1_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length};
381     auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer);
382     if (deviceResult != HDF_SUCCESS) {
383         LOGE("Device release buffer error. ErrorCode: %d", deviceResult);
384         return OH_NN_FAILED;
385     }
386 
387     ret = memManager->UnMapMemory(buffer);
388     if (ret != OH_NN_SUCCESS) {
389         LOGE("Unmap memory failed.");
390         return ret;
391     }
392 
393     return OH_NN_SUCCESS;
394 }
395 
ReleaseSharedBuffer(const V1_0::SharedBuffer & buffer)396 OH_NN_ReturnCode HDIDeviceV1_0::ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer)
397 {
398     if (buffer.fd == INVALID_FD) {
399         LOGI("No need to release. fd=%d", INVALID_FD);
400         return OH_NN_SUCCESS;
401     }
402 
403     auto ret = m_iDevice->ReleaseBuffer(buffer);
404     if (ret != HDF_SUCCESS) {
405         LOGE("Device release buffer error. ErrorCode=%d", ret);
406         return OH_NN_FAILED;
407     }
408     return OH_NN_SUCCESS;
409 }
410 
PrepareOfflineModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)411 OH_NN_ReturnCode HDIDeviceV1_0::PrepareOfflineModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
412                                                     const ModelConfig& config,
413                                                     std::shared_ptr<PreparedModel>& preparedModel)
414 {
415     LOGE("HDIDeviceV1.0 not support PrepareOfflineModel.");
416     return OH_NN_OPERATION_FORBIDDEN;
417 }
418 } // namespace NeuralNetworkRuntime
419 } // namespace OHOS
420