1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <sstream>
17 #include <algorithm>
18 #include <v1_0/nnrt_types.h>
19
20 #include "securec.h"
21 #include "interfaces/kits/c/neural_network_runtime.h"
22 #include "frameworks/native/inner_model.h"
23 #include "frameworks/native/memory_manager.h"
24 #include "hdi_nnrt_test_utils.h"
25
26 namespace OHOS::NeuralNetworkRuntime::Test {
27
BuildAddGraph(OH_NNModel ** model)28 void HDICommon::BuildAddGraph(OH_NNModel **model)
29 {
30 printf("[NNRtTest] BuildAddGraph start.\n");
31 // create OH_NNModel
32 *model = OH_NNModel_Construct();
33 ASSERT_NE(nullptr, model);
34 // add inputs of Add operation
35 int32_t dimensions[3]{3, 2, 2};
36 OH_NN_Tensor firstAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
37 OH_NN_Tensor secondAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
38 uint32_t inputIndicesValue[2]{0, 1};
39 OH_NN_UInt32Array inputIndices{inputIndicesValue, 2};
40 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &firstAddend));
41 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &secondAddend));
42
43 // Add activation type and set value
44 OH_NN_Tensor activationType{OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE};
45 int8_t activationValue{0};
46 uint32_t paramIndicesValue{2};
47 OH_NN_UInt32Array paramIndices{¶mIndicesValue, 1};
48 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &activationType));
49 int opCnt = 2;
50 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorData(*model, opCnt, (void *)&activationValue, sizeof(int8_t)));
51
52 // add output of Add operation
53 OH_NN_Tensor output{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
54 uint32_t outputIndicesValue{3};
55 OH_NN_UInt32Array outputIndices{&outputIndicesValue, 1};
56 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &output));
57
58 // add Add operation to model
59 ASSERT_EQ(OH_NN_SUCCESS,
60 OH_NNModel_AddOperation(*model, OH_NN_OPS_ADD, ¶mIndices, &inputIndices, &outputIndices));
61 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SpecifyInputsAndOutputs(*model, &inputIndices, &outputIndices));
62 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(*model));
63
64 printf("[NNRtTest] BuildAddGraph done.\n");
65 }
66
BuildAddGraphDynamic(OH_NNModel ** model)67 void HDICommon::BuildAddGraphDynamic(OH_NNModel **model)
68 {
69 printf("[NNRtTest] BuildAddGraphDynamic start.\n");
70 // create OH_NNModel
71 *model = OH_NNModel_Construct();
72 ASSERT_NE(nullptr, *model);
73 // add inputs of Add operation
74 int32_t dimensions[3]{-1, -1, -1};
75 OH_NN_Tensor firstAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
76 OH_NN_Tensor secondAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
77 uint32_t inputIndicesValue[2]{0, 1};
78 OH_NN_UInt32Array inputIndices{inputIndicesValue, 2};
79 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &firstAddend));
80 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &secondAddend));
81
82 // Add activation type and set value
83 OH_NN_Tensor activationType{OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE};
84 int8_t activationValue{OH_NN_FUSED_NONE};
85 uint32_t paramIndicesValue{2};
86 OH_NN_UInt32Array paramIndices{¶mIndicesValue, 1};
87 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &activationType));
88 int opCnt = 2;
89 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorData(*model, opCnt, (void *)&activationValue, sizeof(int8_t)));
90
91 // add output of Add operation
92 OH_NN_Tensor output{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
93 uint32_t outputIndicesValue{3};
94 OH_NN_UInt32Array outputIndices{&outputIndicesValue, 1};
95 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &output));
96
97 // add Add operation to model
98 ASSERT_EQ(OH_NN_SUCCESS,
99 OH_NNModel_AddOperation(*model, OH_NN_OPS_ADD, ¶mIndices, &inputIndices, &outputIndices));
100 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SpecifyInputsAndOutputs(*model, &inputIndices, &outputIndices));
101 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(*model));
102
103 printf("[NNRtTest] BuildAddGraphDynamic done\n");
104 }
105
ConvertModel(OHOS::sptr<V1_0::INnrtDevice> device_,OH_NNModel * model,V1_0::SharedBuffer & tensorBuffer,V1_0::Model ** iModel)106 OH_NN_ReturnCode HDICommon::ConvertModel(OHOS::sptr<V1_0::INnrtDevice> device_, OH_NNModel *model,
107 V1_0::SharedBuffer &tensorBuffer, V1_0::Model **iModel)
108 {
109 printf("[NNRtTest] [ConvertModel] convert OH_NNModel to V1_0::Model\n");
110 auto *innerModel = reinterpret_cast<InnerModel *>(model);
111 std::shared_ptr<mindspore::lite::LiteGraph> m_liteGraph = innerModel->GetLiteGraphs();
112 if (m_liteGraph == nullptr) {
113 printf("[NNRtTest] Model is nullptr, cannot query supported operation.");
114 return OH_NN_NULL_PTR;
115 }
116
117 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(m_liteGraph.get());
118 std::cout << "[ConvertModel] const tensorsize:" << tensorSize << std::endl;
119 int32_t hdiRet{0};
120 if (tensorSize > 0) {
121 hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer);
122 if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == NNRT_INVALID_FD) {
123 printf("[NNRtTest] [ConvertModel] allocate tensor buffer failed after get const tensor size,"\
124 "ret:%d\n", hdiRet);
125 return OH_NN_FAILED;
126 }
127 }
128 *iModel = mindspore::lite::MindIR_LiteGraph_To_Model(m_liteGraph.get(), tensorBuffer);
129 if (iModel == nullptr) {
130 printf("[NNRtTest] Parse litegraph to hdi model failed.\n");
131 device_->ReleaseBuffer(tensorBuffer);
132 return OH_NN_FAILED;
133 }
134 // release model
135 OH_NNModel_Destroy(&model);
136 model = nullptr;
137 printf("[NNRtTest] [ConvertModel] convert model done\n");
138 return OH_NN_SUCCESS;
139 }
140
CreateIOTensor(OHOS::sptr<V1_0::INnrtDevice> & device)141 V1_0::IOTensor HDICommon::CreateIOTensor(OHOS::sptr<V1_0::INnrtDevice> &device)
142 {
143 V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
144 int ret = device->AllocateBuffer(ADDEND_BUFFER_LENGTH, buffer);
145 if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
146 printf("[NNRtTest] [CreateIOTensor] allocate buffer error. ret: %d, fd: %d\n", ret, buffer.fd);
147 }
148 V1_0::IOTensor tensor{.name = "tensor",
149 .dataType = V1_0::DATA_TYPE_FLOAT32,
150 .dimensions = TENSOR_DIMS,
151 .format = V1_0::FORMAT_NHWC,
152 .data = buffer};
153 return tensor;
154 }
155
CreateInputIOTensor(OHOS::sptr<V1_0::INnrtDevice> & device,size_t length,float * data)156 V1_0::IOTensor HDICommon::CreateInputIOTensor(OHOS::sptr<V1_0::INnrtDevice> &device, size_t length, float* data)
157 {
158 if (length == 0) {
159 std::cout << "The length param is invalid, length=0" << std::endl;
160 }
161
162 V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
163 auto ret = device->AllocateBuffer(length, buffer);
164
165 if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
166 printf("[NNRtTest] [CreateInputIOTensor] allocate buffer error. ret: %d, fd: %d\n", ret, buffer.fd);
167 }
168
169 auto memManager = MemoryManager::GetInstance();
170 auto memAddress = memManager->MapMemory(buffer.fd, length);
171 if (memAddress == nullptr) {
172 printf("[NNRtTest] [CreateInputIOTensor] map fd to address failed.\n");
173 }
174 memcpy_s(memAddress, length, data, length);
175
176 V1_0::IOTensor tensor{.name = "tensor",
177 .dataType = V1_0::DATA_TYPE_FLOAT32,
178 .dimensions = {3, 2, 2},
179 .format = V1_0::FORMAT_NHWC,
180 .data = buffer};
181 return tensor;
182 }
183
CreateOutputIOTensor(OHOS::sptr<V1_0::INnrtDevice> & device,size_t length)184 V1_0::IOTensor HDICommon::CreateOutputIOTensor(OHOS::sptr<V1_0::INnrtDevice> &device, size_t length)
185 {
186 if (length == 0) {
187 printf("[NNRtTest] The length param is invalid, length=0");
188 }
189
190 V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
191 int ret = device->AllocateBuffer(length, buffer);
192
193 if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
194 printf("[NNRtTest] Allocate buffer error. ErrorCode: %d, fd: %d", ret, buffer.fd);
195 }
196
197 V1_0::IOTensor tensor{.name = "tensor",
198 .dataType = V1_0::DATA_TYPE_FLOAT32,
199 .dimensions = {3, 2, 2},
200 .format = V1_0::FORMAT_NHWC,
201 .data = buffer};
202 return tensor;
203 }
204
MapMemory(int fd,size_t length)205 void* HDICommon::MapMemory(int fd, size_t length)
206 {
207 auto memManager = MemoryManager::GetInstance();
208 auto memAddress = memManager->MapMemory(fd, length);
209 if (memAddress == nullptr) {
210 printf("[NNRtTest] Map fd to address failed.");
211 return nullptr;
212 }
213 return memAddress;
214 }
215
UnmapMemory(float * buffer)216 void HDICommon::UnmapMemory(float* buffer)
217 {
218 auto memManager = MemoryManager::GetInstance();
219 auto ret = memManager->UnMapMemory(buffer);
220 if (ret != OH_NN_SUCCESS) {
221 printf("[NNRtTest] [UnmapMemory] unmap memory failed. ret:%d.\n", ret);
222 }
223 }
224
SetData(float * buffer,size_t length,float * data)225 void HDICommon::SetData(float* buffer, size_t length, float* data)
226 {
227 if (buffer == nullptr || data == nullptr) {
228 printf("[NNRtTest] [SetData] buffer or data is nullprt\n");
229 return;
230 }
231 int ret = memcpy_s(buffer, length, data, length);
232 if (ret != 0) {
233 printf("[NNRtTest] [SetData] set data failed, error code: %d\n", ret);
234 }
235 }
236
ReleaseBufferOfTensors(OHOS::sptr<V1_0::INnrtDevice> & device,std::vector<V1_0::IOTensor> & tensors)237 void HDICommon::ReleaseBufferOfTensors(OHOS::sptr<V1_0::INnrtDevice> &device, std::vector<V1_0::IOTensor> &tensors)
238 {
239 if (device == nullptr) {
240 printf("[NNRtTest] [ReleaseBufferOfTensors] device is nullptr.\n");
241 return;
242 }
243
244 for (auto &tensor : tensors) {
245 auto ret = device->ReleaseBuffer(tensor.data);
246 if (ret != HDF_SUCCESS) {
247 printf("[NNRtTest] [ReleaseBufferOfTensors] release buffer failed, fd:%d ret:%d.\n", tensor.data.fd, ret);
248 }
249 }
250 }
251
UnmapAllMemory(std::vector<void * > & buffers)252 void HDICommon::UnmapAllMemory(std::vector<void* > &buffers)
253 {
254 auto memoryMenager = MemoryManager::GetInstance();
255 for (auto buffer : buffers) {
256 auto ret = memoryMenager->UnMapMemory(buffer);
257 if (ret != OH_NN_SUCCESS) {
258 printf("[NNRtTest] [UnmapAllMemory] release buffer failed, ret:%d.\n", ret);
259 }
260 }
261 }
262
CheckExpectOutput(const std::vector<float> & output,const std::vector<float> & expect)263 bool CheckExpectOutput(const std::vector<float> &output, const std::vector<float> &expect)
264 {
265 if (output.empty() || expect.empty()) {
266 printf("[NNRtTest] [CheckExpectOutput] output or expect is empty.\n");
267 return false;
268 }
269 int outputSize = output.size();
270 int expectSize = expect.size();
271 if (outputSize != expectSize) {
272 printf("[NNRtTest] [CheckExpectOutput] output size not match: expect:%d, actual:%d\n", outputSize, expectSize);
273 return false;
274 }
275 for (int i = 0; i < outputSize; i++) {
276 if (std::abs(float(output[i]) - float(expect[i])) > 1e-8) {
277 printf("[NNRtTest] [CheckExpectOutput] output %d not match: expect:%f, actual:%f\n", i, float(expect[i]),
278 float(output[i]));
279 return false;
280 }
281 }
282 return true;
283 }
284
PrintTensor(const float * buffer,size_t length)285 void PrintTensor(const float *buffer, size_t length)
286 {
287 std::stringstream ss;
288 size_t printNum = std::min(length, PRINT_NUM);
289 for (size_t i = 0; i < printNum; i++) {
290 ss << std::to_string(buffer[i]) << " ";
291 }
292 printf("[NNRtTest] [data] %s\n", ss.str().c_str());
293 }
294
295 } // namespace OHOS::NeuralNetworkRuntime::Test