1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "const.h"
16 #include "mock_idevice.h"
17
18 namespace OHOS {
19 namespace HDI {
20 namespace Nnrt {
21 namespace V1_0 {
22
Get(bool isStub)23 sptr<INnrtDevice> INnrtDevice::Get(bool isStub)
24 {
25 return INnrtDevice::Get("mock_device_service", isStub);
26 }
27
Get(const std::string & serviceName,bool isStub)28 sptr<INnrtDevice> INnrtDevice::Get(const std::string &serviceName, bool isStub)
29 {
30 if (isStub) {
31 return nullptr;
32 }
33 sptr<INnrtDevice> mockIDevice = sptr<MockIDevice>(MockIDevice::GetInstance());
34 return mockIDevice;
35 }
36
~MockIDevice()37 MockIDevice::~MockIDevice()
38 {
39 for (auto ash : m_ashmems) {
40 ash.second->UnmapAshmem();
41 ash.second->CloseAshmem();
42 }
43 }
44
GetInstance()45 MockIDevice *MockIDevice::GetInstance()
46 {
47 static MockIDevice iDevice;
48 return &iDevice;
49 }
50
SetFP16Supported(bool isSupported)51 void MockIDevice::SetFP16Supported(bool isSupported)
52 {
53 m_fp16 = isSupported;
54 }
55
SetPerformanceSupported(bool isSupported)56 void MockIDevice::SetPerformanceSupported(bool isSupported)
57 {
58 m_performance = isSupported;
59 }
60
SetPrioritySupported(bool isSupported)61 void MockIDevice::SetPrioritySupported(bool isSupported)
62 {
63 m_priority = isSupported;
64 }
65
SetModelCacheSupported(bool isSupported)66 void MockIDevice::SetModelCacheSupported(bool isSupported)
67 {
68 m_cache = isSupported;
69 }
70
SetOperationsSupported(std::vector<bool> isSupported)71 void MockIDevice::SetOperationsSupported(std::vector<bool> isSupported)
72 {
73 m_operations = isSupported;
74 }
75
SetDynamicInputSupported(bool isSupported)76 void MockIDevice::SetDynamicInputSupported(bool isSupported)
77 {
78 m_dynamic = isSupported;
79 }
80
GetDeviceName(std::string & name)81 int32_t MockIDevice::GetDeviceName(std::string& name)
82 {
83 name = "Device-CPU";
84 return HDF_SUCCESS;
85 }
86
GetVendorName(std::string & name)87 int32_t MockIDevice::GetVendorName(std::string& name)
88 {
89 name = "TestVendor";
90 return HDF_SUCCESS;
91 }
92
GetDeviceType(DeviceType & deviceType)93 int32_t MockIDevice::GetDeviceType(DeviceType& deviceType)
94 {
95 deviceType = DeviceType::CPU;
96 return HDF_SUCCESS;
97 }
98
GetDeviceStatus(DeviceStatus & status)99 int32_t MockIDevice::GetDeviceStatus(DeviceStatus& status)
100 {
101 status = DeviceStatus::AVAILABLE;
102 return HDF_SUCCESS;
103 }
104
GetVersion(uint32_t & majorVersion,uint32_t & minorVersion)105 int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
106 {
107 majorVersion = 1;
108 minorVersion = 0;
109 return HDF_SUCCESS;
110 }
111
GetSupportedOperation(const Model & model,std::vector<bool> & ops)112 int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector<bool>& ops)
113 {
114 ops = m_operations;
115 return HDF_SUCCESS;
116 }
117
IsFloat16PrecisionSupported(bool & isSupported)118 int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported)
119 {
120 isSupported = m_fp16;
121 return HDF_SUCCESS;
122 }
123
IsPerformanceModeSupported(bool & isSupported)124 int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported)
125 {
126 isSupported = m_performance;
127 return HDF_SUCCESS;
128 }
129
IsPrioritySupported(bool & isSupported)130 int32_t MockIDevice::IsPrioritySupported(bool& isSupported)
131 {
132 isSupported = m_priority;
133 return HDF_SUCCESS;
134 }
135
IsDynamicInputSupported(bool & isSupported)136 int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported)
137 {
138 isSupported = m_dynamic;
139 return HDF_SUCCESS;
140 }
141
IsModelCacheSupported(bool & isSupported)142 int32_t MockIDevice::IsModelCacheSupported(bool& isSupported)
143 {
144 isSupported = m_cache;
145 return HDF_SUCCESS;
146 }
147
AllocateBuffer(uint32_t length,SharedBuffer & buffer)148 int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
149 {
150 std::lock_guard<std::mutex> lock(m_mtx);
151 sptr<Ashmem> ashptr = Ashmem::CreateAshmem("allocateBuffer", length);
152 if (ashptr == nullptr) {
153 LOGE("[NNRtTest] Create shared memory failed.");
154 return HDF_FAILURE;
155 }
156
157 if (!ashptr->MapReadAndWriteAshmem()) {
158 LOGE("[NNRtTest] Map allocate buffer failed.");
159 return HDF_FAILURE;
160 }
161
162 buffer.fd = ashptr->GetAshmemFd();
163 buffer.bufferSize = ashptr->GetAshmemSize();
164 buffer.offset = 0;
165 buffer.dataSize = length;
166
167 m_ashmems[buffer.fd] = ashptr;
168 m_bufferFd = buffer.fd;
169 return HDF_SUCCESS;
170 }
171
ReleaseBuffer(const SharedBuffer & buffer)172 int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
173 {
174 std::lock_guard<std::mutex> lock(m_mtx);
175 auto ash = m_ashmems[buffer.fd];
176 ash->UnmapAshmem();
177 return HDF_SUCCESS;
178 }
179
MemoryCopy(float * data,uint32_t length)180 int32_t MockIDevice::MemoryCopy(float* data, uint32_t length)
181 {
182 std::lock_guard<std::mutex> lock(m_mtx);
183 auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance();
184 auto memAddress = memManager->MapMemory(m_bufferFd, length);
185 if (memAddress == nullptr) {
186 LOGE("[NNRtTest] Map fd to address failed.");
187 return HDF_FAILURE;
188 }
189 auto ret = memcpy_s(memAddress, length, data, length);
190 if (ret != EOK) {
191 LOGE("[NNRtTest] MockIDevice memory cop failed.");
192 return HDF_FAILURE;
193 }
194 return HDF_SUCCESS;
195 }
196
PrepareModel(const Model & model,const ModelConfig & config,sptr<IPreparedModel> & preparedModel)197 int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, sptr<IPreparedModel>& preparedModel)
198 {
199 preparedModel = new (std::nothrow) V1_0::MockIPreparedModel();
200 return HDF_SUCCESS;
201 }
202
PrepareModelFromModelCache(const std::vector<SharedBuffer> & modelCache,const ModelConfig & config,sptr<IPreparedModel> & preparedModel)203 int32_t MockIDevice::PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
204 sptr<IPreparedModel>& preparedModel)
205 {
206 preparedModel = new (std::nothrow) V1_0::MockIPreparedModel();
207 return HDF_SUCCESS;
208 }
209
ExportModelCache(std::vector<SharedBuffer> & modelCache)210 int32_t MockIPreparedModel::ExportModelCache(std::vector<SharedBuffer>& modelCache)
211 {
212 if (!modelCache.empty()) {
213 LOGE("[NNRtTest] The parameters of ExportModelCache should be an empty vector.");
214 return HDF_ERR_INVALID_PARAM;
215 }
216 uint8_t buffer[4] = {0, 1, 2, 3};
217 uint32_t size = sizeof(buffer);
218 sptr<Ashmem> cache = Ashmem::CreateAshmem("cache", size);
219 if (cache == nullptr) {
220 LOGE("[NNRtTest] Create shared memory failed.");
221 return HDF_ERR_MALLOC_FAIL;
222 }
223 bool ret = cache->MapReadAndWriteAshmem();
224 if (!ret) {
225 LOGE("[NNRtTest] Map fd to write cache failed.");
226 return HDF_FAILURE;
227 }
228
229 ret = cache->WriteToAshmem(buffer, size, 0);
230 cache->UnmapAshmem();
231 if (!ret) {
232 LOGE("[NNRtTest] Write cache failed.");
233 return HDF_FAILURE;
234 }
235 // SharedBuffer: fd, bufferSize, offset, dataSize
236 modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetAshmemSize()});
237 return HDF_SUCCESS;
238 }
239
GetVersion(uint32_t & majorVersion,uint32_t & minorVersion)240 int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
241 {
242 majorVersion = 1;
243 minorVersion = 0;
244 return HDF_SUCCESS;
245 }
246
Run(const std::vector<IOTensor> & inputs,const std::vector<IOTensor> & outputs,std::vector<std::vector<int32_t>> & outputsDims,std::vector<bool> & isOutputBufferEnough)247 int32_t MockIPreparedModel::Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
248 std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough)
249 {
250 outputsDims = {{1, 2, 2, 1}};
251 isOutputBufferEnough = {true};
252 return HDF_SUCCESS;
253 }
254
255 } // namespace V1_0
256 } // namespace Nnrt
257 } // namespace HDI
258 } // namespace OHOS
259