1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include <fstream>
16
17 #include "nncore_utils.h"
18 #include "nncore_const.h"
19
20 namespace OHOS {
21 namespace NeuralNetworkRuntime {
22 namespace Test {
TransformUInt32Array(const std::vector<uint32_t> & vector)23 OH_NN_UInt32Array TransformUInt32Array(const std::vector<uint32_t>& vector)
24 {
25 uint32_t* data = (vector.empty()) ? nullptr : const_cast<uint32_t*>(vector.data());
26 return {data, vector.size()};
27 }
28
createTensorDesc(const int32_t * shape,size_t shapeNum,OH_NN_DataType dataType,OH_NN_Format format)29 NN_TensorDesc* createTensorDesc(const int32_t* shape, size_t shapeNum, OH_NN_DataType dataType, OH_NN_Format format)
30 {
31 NN_TensorDesc* tensorDescTmp = OH_NNTensorDesc_Create();
32 if (tensorDescTmp == nullptr) {
33 LOGE("[NNRtTest]OH_NNTensorDesc_Create failed!");
34 return nullptr;
35 }
36
37 OH_NN_ReturnCode ret = OH_NNTensorDesc_SetDataType(tensorDescTmp, dataType);
38 if (ret != OH_NN_SUCCESS) {
39 LOGE("[NNRtTest]OH_NNTensorDesc_SetDataType failed!ret = %d\n", ret);
40 return nullptr;
41 }
42
43 if (shape != nullptr) {
44 ret = OH_NNTensorDesc_SetShape(tensorDescTmp, shape, shapeNum);
45 if (ret != OH_NN_SUCCESS) {
46 LOGE("[NNRtTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret);
47 return nullptr;
48 }
49 }
50
51 ret = OH_NNTensorDesc_SetFormat(tensorDescTmp, format);
52 if (ret != OH_NN_SUCCESS) {
53 LOGE("[NNRtTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret);
54 return nullptr;
55 }
56
57 return tensorDescTmp;
58 }
59
MultiModelBuildEndStep(OH_NNModel * model,const OHNNGraphArgsMulti & graphArgs)60 int MultiModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
61 {
62 int ret = 0;
63 auto graphInputs = TransformUInt32Array(graphArgs.graphInput);
64 auto graphOutputs = TransformUInt32Array(graphArgs.graphOutput);
65 ret = OH_NNModel_SpecifyInputsAndOutputs(model, &graphInputs, &graphOutputs);
66 if (ret != OH_NN_SUCCESS) {
67 LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret);
68 return ret;
69 }
70 ret = OH_NNModel_Finish(model);
71 if (ret != OH_NN_SUCCESS) {
72 LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret);
73 return ret;
74 }
75 return ret;
76 }
77
BuildMultiOpGraph(OH_NNModel * model,const OHNNGraphArgsMulti & graphArgs)78 int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
79 {
80 int ret = 0;
81 int opCnt = 0;
82 for (size_t j = 0; j < graphArgs.operationTypes.size(); j++) {
83 for (size_t i = 0; i < graphArgs.operands[j].size(); i++) {
84 const OHNNOperandTest &operandTem = graphArgs.operands[j][i];
85 NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(),
86 (uint32_t) operandTem.shape.size(),
87 operandTem.dataType, operandTem.format);
88 ret = OH_NNModel_AddTensorToModel(model, tensorDesc);
89 if (ret != OH_NN_SUCCESS) {
90 LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
91 return ret;
92 }
93 ret = OH_NNModel_SetTensorType(model, i, operandTem.type);
94 if (ret != OH_NN_SUCCESS) {
95 LOGE("[NNRtTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret);
96 return ret;
97 }
98 if (std::find(graphArgs.paramIndices[j].begin(), graphArgs.paramIndices[j].end(), opCnt) !=
99 graphArgs.paramIndices[j].end()) {
100 ret = OH_NNModel_SetTensorData(model, opCnt, operandTem.data, operandTem.length);
101 opCnt += 1;
102 }
103 if (ret != OH_NN_SUCCESS) {
104 LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret);
105 return ret;
106 }
107 }
108 auto paramIndices = TransformUInt32Array(graphArgs.paramIndices[j]);
109 auto inputIndices = TransformUInt32Array(graphArgs.inputIndices[j]);
110 auto outputIndices = TransformUInt32Array(graphArgs.outputIndices[j]);
111 ret = OH_NNModel_AddOperation(model, graphArgs.operationTypes[j], ¶mIndices, &inputIndices,
112 &outputIndices);
113 if (ret != OH_NN_SUCCESS) {
114 LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret);
115 return ret;
116 }
117 }
118 ret = MultiModelBuildEndStep(model, graphArgs);
119 return ret;
120 }
121
SingleModelBuildEndStep(OH_NNModel * model,const OHNNGraphArgs & graphArgs)122 int SingleModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgs &graphArgs)
123 {
124 int ret = 0;
125 auto paramIndices = TransformUInt32Array(graphArgs.paramIndices);
126 auto inputIndices = TransformUInt32Array(graphArgs.inputIndices);
127 auto outputIndices = TransformUInt32Array(graphArgs.outputIndices);
128 if (graphArgs.addOperation) {
129 ret = OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices,
130 &outputIndices);
131 if (ret != OH_NN_SUCCESS) {
132 LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret);
133 return ret;
134 }
135 }
136 if (graphArgs.specifyIO) {
137 ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices);
138 if (ret != OH_NN_SUCCESS) {
139 LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret);
140 return ret;
141 }
142 }
143 if (graphArgs.build) {
144 ret = OH_NNModel_Finish(model);
145 if (ret != OH_NN_SUCCESS) {
146 LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret);
147 return ret;
148 }
149 }
150 return ret;
151 }
152
BuildSingleOpGraph(OH_NNModel * model,const OHNNGraphArgs & graphArgs)153 int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs)
154 {
155 int ret = 0;
156 for (size_t i = 0; i < graphArgs.operands.size(); i++) {
157 const OHNNOperandTest &operandTem = graphArgs.operands[i];
158 NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(),
159 (uint32_t) operandTem.shape.size(),
160 operandTem.dataType, operandTem.format);
161 ret = OH_NNModel_AddTensorToModel(model, tensorDesc);
162 if (ret != OH_NN_SUCCESS) {
163 LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
164 return ret;
165 }
166 ret = OH_NNModel_SetTensorType(model, i, operandTem.type);
167 if (ret != OH_NN_SUCCESS) {
168 LOGE("[NNRtTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret);
169 return ret;
170 }
171 if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) !=
172 graphArgs.paramIndices.end()) {
173 ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length);
174 if (ret != OH_NN_SUCCESS) {
175 LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret);
176 return ret;
177 }
178 }
179 }
180 ret = SingleModelBuildEndStep(model, graphArgs);
181 return ret;
182 }
183
BuildSingleOpGraphWithQuantParams(OH_NNModel * model,const OHNNGraphArgs & graphArgs)184 int BuildSingleOpGraphWithQuantParams(OH_NNModel *model, const OHNNGraphArgs &graphArgs)
185 {
186 int ret = 0;
187 for (size_t i = 0; i < graphArgs.operands.size(); i++) {
188 const OHNNOperandTest &operandTem = graphArgs.operands[i];
189 NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(),
190 (uint32_t) operandTem.shape.size(),
191 operandTem.dataType, operandTem.format);
192 ret = OH_NNModel_AddTensorToModel(model, tensorDesc);
193 if (ret != OH_NN_SUCCESS) {
194 LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
195 return ret;
196 }
197
198 NN_QuantParam* quantParam = OH_NNQuantParam_Create();
199 double scales = 0.2;
200 int32_t zeroPoints = 0;
201 uint32_t numBits = 8;
202 ret = OH_NNQuantParam_SetScales(quantParam, &scales, 1);
203 ret = OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 1);
204 ret = OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1);
205 ret = OH_NNModel_SetTensorQuantParams(model, i, quantParam);
206 ret = OH_NNQuantParam_Destroy(&quantParam);
207 ret = OH_NNModel_SetTensorType(model, i, operandTem.type);
208 if (ret != OH_NN_SUCCESS) {
209 LOGE("[NNRtTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret);
210 return ret;
211 }
212 if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) !=
213 graphArgs.paramIndices.end()) {
214 ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length);
215 if (ret != OH_NN_SUCCESS) {
216 LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret);
217 return ret;
218 }
219 }
220 }
221 ret = SingleModelBuildEndStep(model, graphArgs);
222 return ret;
223 }
224
GetDeviceID(size_t * deviceId)225 OH_NN_ReturnCode GetDeviceID(size_t *deviceId)
226 {
227 OH_NN_ReturnCode ret = OH_NN_FAILED;
228 const size_t *devicesID{nullptr};
229 uint32_t devicesCount{0};
230 ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount);
231 if (ret != OH_NN_SUCCESS) {
232 LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret);
233 return ret;
234 }
235 if (devicesCount <= NO_DEVICE_COUNT) {
236 LOGE("[NNRtTest] devicesCount <= 0 devicesCount=%d\n", devicesCount);
237 return OH_NN_FAILED;
238 }
239
240 const char *name = nullptr;
241 std::string deviceName{"Device-CPU_TestVendor_v2_0"};
242 for (uint32_t i = 0; i < devicesCount; i++) {
243 name = nullptr;
244 ret = OH_NNDevice_GetName(devicesID[i], &name);
245 if (ret != OH_NN_SUCCESS) {
246 LOGE("[NNRtTest] OH_NNDevice_GetName failed! ret=%d\n", ret);
247 return ret;
248 }
249
250 std::string sName(name);
251 if (deviceName == sName) {
252 *deviceId = devicesID[i];
253 return OH_NN_SUCCESS;
254 }
255 }
256 return OH_NN_FAILED;
257 }
258
SetDevice(OH_NNCompilation * compilation)259 OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation)
260 {
261 OH_NN_ReturnCode ret = OH_NN_FAILED;
262 const size_t *devicesID{nullptr};
263 uint32_t devicesCount{0};
264 ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount);
265 if (ret != OH_NN_SUCCESS) {
266 LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret);
267 return ret;
268 }
269 if (devicesCount <= NO_DEVICE_COUNT) {
270 LOGE("[NNRtTest] devicesCount <= 0 devicesCount=%d\n", devicesCount);
271 return OH_NN_FAILED;
272 }
273
274 const char *name = nullptr;
275 std::string deviceName{"Device-CPU_TestVendor_v2_0"};
276 for (uint32_t i = 0; i < devicesCount; i++) {
277 name = nullptr;
278 ret = OH_NNDevice_GetName(devicesID[i], &name);
279 if (ret != OH_NN_SUCCESS) {
280 LOGE("[NNRtTest] OH_NNDevice_GetName failed! ret=%d\n", ret);
281 return ret;
282 }
283
284 std::string sName(name);
285 if (deviceName == sName) {
286 ret = OH_NNCompilation_SetDevice(compilation, devicesID[i]);
287 if (ret != OH_NN_SUCCESS) {
288 LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret);
289 return ret;
290 }
291 return OH_NN_SUCCESS;
292 }
293 }
294 return OH_NN_FAILED;
295 }
296
CompileGraphMock(OH_NNCompilation * compilation,const OHNNCompileParam & compileParam)297 int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam)
298 {
299 int ret = 0;
300 ret = SetDevice(compilation);
301 if (ret != OH_NN_SUCCESS) {
302 LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret);
303 return ret;
304 }
305 // set cache
306 if (!compileParam.cacheDir.empty()) {
307 ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(),
308 compileParam.cacheVersion);
309 if (ret != OH_NN_SUCCESS) {
310 LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret);
311 return ret;
312 }
313 }
314 // set performance
315 if (compileParam.performanceMode != OH_NN_PERFORMANCE_NONE) {
316 ret = OH_NNCompilation_SetPerformanceMode(compilation, compileParam.performanceMode);
317 if (ret != OH_NN_SUCCESS) {
318 LOGE("[NNRtTest] OH_NNCompilation_SetPerformanceMode failed! ret=%d\n", ret);
319 return ret;
320 }
321 }
322 // set priority
323 if (compileParam.priority != OH_NN_PRIORITY_NONE) {
324 ret = OH_NNCompilation_SetPriority(compilation, compileParam.priority);
325 if (ret != OH_NN_SUCCESS) {
326 LOGE("[NNRtTest] OH_NNCompilation_SetPriority failed! ret=%d\n", ret);
327 return ret;
328 }
329 }
330 // enable fp16
331 if (compileParam.enableFp16) {
332 ret = OH_NNCompilation_EnableFloat16(compilation, compileParam.enableFp16);
333 if (ret != OH_NN_SUCCESS) {
334 LOGE("[NNRtTest] OH_NNCompilation_EnableFloat16 failed! ret=%d\n", ret);
335 return ret;
336 }
337 }
338 // build
339 ret = OH_NNCompilation_Build(compilation);
340 return ret;
341 }
342
Free(OH_NNModel * model,OH_NNCompilation * compilation,OH_NNExecutor * executor)343 void Free(OH_NNModel *model, OH_NNCompilation *compilation, OH_NNExecutor *executor)
344 {
345 if (model != nullptr) {
346 OH_NNModel_Destroy(&model);
347 ASSERT_EQ(nullptr, model);
348 }
349 if (compilation != nullptr) {
350 OH_NNCompilation_Destroy(&compilation);
351 ASSERT_EQ(nullptr, compilation);
352 }
353 if (executor != nullptr) {
354 OH_NNExecutor_Destroy(&executor);
355 ASSERT_EQ(nullptr, executor);
356 }
357 }
358
CheckPath(const std::string & path)359 PathType CheckPath(const std::string &path)
360 {
361 if (path.empty()) {
362 LOGI("CheckPath: path is null");
363 return PathType::NOT_FOUND;
364 }
365 struct stat buf{};
366 if (stat(path.c_str(), &buf) == 0) {
367 if (buf.st_mode & S_IFDIR) {
368 return PathType::DIR;
369 } else if (buf.st_mode & S_IFREG) {
370 return PathType::FILE;
371 } else {
372 return PathType::UNKNOWN;
373 }
374 }
375 LOGI("%s not found", path.c_str());
376 return PathType::NOT_FOUND;
377 }
378
DeleteFile(const std::string & path)379 bool DeleteFile(const std::string &path)
380 {
381 if (path.empty()) {
382 LOGI("DeleteFile: path is null");
383 return false;
384 }
385 if (CheckPath(path) == PathType::NOT_FOUND) {
386 LOGI("not found: %s", path.c_str());
387 return true;
388 }
389 if (remove(path.c_str()) == 0) {
390 LOGI("deleted: %s", path.c_str());
391 return true;
392 }
393 LOGI("delete failed: %s", path.c_str());
394 return false;
395 }
396
CopyFile(const std::string & srcPath,const std::string & dstPath)397 void CopyFile(const std::string &srcPath, const std::string &dstPath)
398 {
399 std::ifstream src(srcPath, std::ios::binary);
400 std::ofstream dst(dstPath, std::ios::binary);
401
402 dst << src.rdbuf();
403 }
404
ConcatPath(const std::string & str1,const std::string & str2)405 std::string ConcatPath(const std::string &str1, const std::string &str2)
406 {
407 // boundary
408 if (str2.empty()) {
409 return str1;
410 }
411 if (str1.empty()) {
412 return str2;
413 }
414 // concat
415 char end = str1[str1.size() - 1];
416 if (end == '\\' or end == '/') {
417 return str1 + str2;
418 } else {
419 return str1 + '/' + str2;
420 }
421 }
422
DeleteFolder(const std::string & path)423 void DeleteFolder(const std::string &path)
424 {
425 if (path.empty()) {
426 LOGI("DeletePath: path is null");
427 return;
428 }
429
430 DIR *dir = opendir(path.c_str());
431 // check is dir ?
432 if (dir == nullptr) {
433 LOGE("[NNRtTest] Can not open dir. Check path or permission! path: %s", path.c_str());
434 return;
435 }
436 struct dirent *file;
437 // read all the files in dir
438 std::vector <std::string> pathList;
439 while ((file = readdir(dir)) != nullptr) {
440 // skip "." and ".."
441 if (strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0) {
442 continue;
443 }
444 if (file->d_type == DT_DIR) {
445 std::string filePath = path + "/" + file->d_name;
446 DeleteFolder(filePath); // 递归执行
447 } else {
448 pathList.emplace_back(ConcatPath(path, file->d_name));
449 }
450 }
451 closedir(dir);
452 pathList.emplace_back(path);
453 LOGI("[Common] Delete folder %s", path.c_str());
454 for (auto &i : pathList) {
455 DeleteFile(i);
456 }
457 }
458
CreateFolder(const std::string & path)459 bool CreateFolder(const std::string &path)
460 {
461 if (path.empty()) {
462 LOGI("CreateFolder: path is empty");
463 return false;
464 }
465 LOGI("CreateFolder:%s", path.c_str());
466 mode_t mode = 0700;
467 for (size_t i = 1; i < path.size() - 1; i++) {
468 if (path[i] != '/') {
469 continue;
470 }
471 PathType ret = CheckPath(path.substr(0, i));
472 switch (ret) {
473 case PathType::DIR:
474 continue;
475 case PathType::NOT_FOUND:
476 LOGI("mkdir: %s", path.substr(0, i).c_str());
477 mkdir(path.substr(0, i).c_str(), mode);
478 break;
479 default:
480 LOGI("error: %s", path.substr(0, i).c_str());
481 return false;
482 }
483 }
484 mkdir(path.c_str(), mode);
485 return CheckPath(path) == PathType::DIR;
486 }
487
CheckOutput(const float * output,const float * expect)488 bool CheckOutput(const float* output, const float* expect)
489 {
490 if (output == nullptr || expect == nullptr) {
491 LOGE("[NNRtTest] output or expect is nullptr\n");
492 return false;
493 }
494 for (int i = 0; i < ELEMENT_COUNT; i++) {
495 if (std::abs(float(output[i]) - float(expect[i])) > 1e-8) {
496 for (int j = 0; j < ELEMENT_COUNT; j++) {
497 LOGE("[NNRtTest] output %d not match: expect:%f, actual:%f\n", j, float(expect[j]), float(output[j]));
498 }
499 return false;
500 }
501 }
502 return true;
503 }
504
505 //创建定长模型
ConstructAddModel(OH_NNModel ** model)506 void ConstructAddModel(OH_NNModel **model)
507 {
508 *model = OH_NNModel_Construct();
509 ASSERT_NE(nullptr, model);
510 AddModel addModel;
511 OHNNGraphArgs graphArgs = addModel.graphArgs;
512 ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(*model, graphArgs));
513 }
514
515 //定长模型创建compilation
ConstructCompilation(OH_NNCompilation ** compilation)516 void ConstructCompilation(OH_NNCompilation **compilation)
517 {
518 // todo model什么时候释放
519 OH_NNModel* model = nullptr;
520 ConstructAddModel(&model);
521 *compilation = OH_NNCompilation_Construct(model);
522 ASSERT_NE(nullptr, *compilation);
523 }
524
525 //通过定长compilation创建executor
CreateExecutor(OH_NNExecutor ** executor)526 void CreateExecutor(OH_NNExecutor **executor)
527 {
528 OH_NNCompilation *compilation = nullptr;
529 ConstructCompilation(&compilation);
530 OHNNCompileParam compileParam{
531 .performanceMode = OH_NN_PERFORMANCE_HIGH,
532 .priority = OH_NN_PRIORITY_HIGH,
533 };
534 ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam));
535 *executor = OH_NNExecutor_Construct(compilation);
536 ASSERT_NE(nullptr, *executor);
537 OH_NNCompilation_Destroy(&compilation);
538 }
539
CreateDynamicExecutor(OH_NNExecutor ** executor)540 void CreateDynamicExecutor(OH_NNExecutor **executor)
541 {
542 OH_NNModel *model = OH_NNModel_Construct();
543 ASSERT_NE(nullptr, model);
544 AvgPoolDynamicModel avgModel;
545 OHNNGraphArgs graphArgs = avgModel.graphArgs;
546 ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs));
547
548 OH_NNCompilation *compilation = OH_NNCompilation_Construct(model);
549 ASSERT_NE(nullptr, compilation);
550
551 OHNNCompileParam compileParam{
552 .performanceMode = OH_NN_PERFORMANCE_HIGH,
553 .priority = OH_NN_PRIORITY_HIGH,
554 };
555 ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam));
556 *executor = OH_NNExecutor_Construct(compilation);
557 ASSERT_NE(nullptr, *executor);
558 OH_NNModel_Destroy(&model);
559 OH_NNCompilation_Destroy(&compilation);
560 }
561
GetExecutorInputOutputTensorDesc(OH_NNExecutor * executor,std::vector<NN_TensorDesc * > & inputTensorDescs,size_t & inputCount,std::vector<NN_TensorDesc * > & outputTensorDescs,size_t & outputCount)562 void GetExecutorInputOutputTensorDesc(OH_NNExecutor* executor,
563 std::vector<NN_TensorDesc*>& inputTensorDescs, size_t& inputCount,
564 std::vector<NN_TensorDesc*>& outputTensorDescs, size_t& outputCount)
565 {
566 OH_NN_ReturnCode ret = OH_NNExecutor_GetInputCount(executor, &inputCount);
567 ASSERT_EQ(OH_NN_SUCCESS, ret);
568 NN_TensorDesc* tensorDescTmp = nullptr;
569 for (size_t i = 0; i < inputCount; ++i) {
570 tensorDescTmp = OH_NNExecutor_CreateInputTensorDesc(executor, i);
571 ASSERT_NE(nullptr, tensorDescTmp);
572 inputTensorDescs.emplace_back(tensorDescTmp);
573 }
574
575 ret = OH_NNExecutor_GetOutputCount(executor, &outputCount);
576 ASSERT_EQ(OH_NN_SUCCESS, ret);
577 for (size_t i = 0; i < outputCount; ++i) {
578 tensorDescTmp = OH_NNExecutor_CreateOutputTensorDesc(executor, i);
579 ASSERT_NE(nullptr, tensorDescTmp);
580 outputTensorDescs.emplace_back(tensorDescTmp);
581 }
582 }
583
GetExecutorInputOutputTensorByDesc(OH_NNExecutor * executor,std::vector<NN_Tensor * > & inputTensors,const std::vector<NN_TensorDesc * > & inputTensorDescs,std::vector<NN_Tensor * > & outputTensors,const std::vector<NN_TensorDesc * > & outputTensorDescs)584 void GetExecutorInputOutputTensorByDesc(OH_NNExecutor* executor,
585 std::vector<NN_Tensor*>& inputTensors, const std::vector<NN_TensorDesc*>& inputTensorDescs,
586 std::vector<NN_Tensor*>& outputTensors, const std::vector<NN_TensorDesc*>& outputTensorDescs)
587 {
588 size_t deviceID = 0;
589 if (OH_NN_SUCCESS != GetDeviceID(&deviceID)) {
590 LOGE("Get deviceid failed.");
591 return;
592 }
593 NN_Tensor* tensor = nullptr;
594 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
595 tensor = nullptr;
596 tensor = OH_NNTensor_Create(deviceID, inputTensorDescs[i]);
597 ASSERT_NE(nullptr, tensor);
598 inputTensors.emplace_back(tensor);
599 }
600
601 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
602 tensor = nullptr;
603 tensor = OH_NNTensor_Create(deviceID, outputTensorDescs[i]);
604 ASSERT_NE(nullptr, tensor);
605 outputTensors.emplace_back(tensor);
606 }
607 }
608
GetExecutorInputOutputTensor(OH_NNExecutor * executor,std::vector<NN_Tensor * > & inputTensors,size_t & inputCount,std::vector<NN_Tensor * > & outputTensors,size_t & outputCount)609 void GetExecutorInputOutputTensor(OH_NNExecutor* executor, std::vector<NN_Tensor*>& inputTensors, size_t& inputCount,
610 std::vector<NN_Tensor*>& outputTensors, size_t& outputCount)
611 {
612 std::vector<NN_TensorDesc*> inputTensorDescs;
613 std::vector<NN_TensorDesc*> outputTensorDescs;
614 OH_NN_ReturnCode ret = OH_NNExecutor_GetInputCount(executor, &inputCount);
615 ASSERT_EQ(OH_NN_SUCCESS, ret);
616 NN_TensorDesc* tensorDescTmp = nullptr;
617 for (size_t i = 0; i < inputCount; ++i) {
618 tensorDescTmp = OH_NNExecutor_CreateInputTensorDesc(executor, i);
619 ASSERT_NE(nullptr, tensorDescTmp);
620 inputTensorDescs.emplace_back(tensorDescTmp);
621 }
622
623 ret = OH_NNExecutor_GetOutputCount(executor, &outputCount);
624 ASSERT_EQ(OH_NN_SUCCESS, ret);
625 for (size_t i = 0; i < outputCount; ++i) {
626 tensorDescTmp = OH_NNExecutor_CreateOutputTensorDesc(executor, i);
627 ASSERT_NE(nullptr, tensorDescTmp);
628 outputTensorDescs.emplace_back(tensorDescTmp);
629 }
630
631 size_t deviceID = 0;
632 if (OH_NN_SUCCESS != GetDeviceID(&deviceID)) {
633 LOGE("Get deviceid failed.");
634 return;
635 }
636 NN_Tensor* tensor = nullptr;
637 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
638 tensor = nullptr;
639 tensor = OH_NNTensor_Create(deviceID, inputTensorDescs[i]);
640 ASSERT_NE(nullptr, tensor);
641 inputTensors.emplace_back(tensor);
642 }
643
644 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
645 tensor = nullptr;
646 tensor = OH_NNTensor_Create(deviceID, outputTensorDescs[i]);
647 ASSERT_NE(nullptr, tensor);
648 outputTensors.emplace_back(tensor);
649 }
650
651 DestroyTensorDesc(inputTensorDescs, outputTensorDescs);
652 }
653
DestroyTensorDesc(std::vector<NN_TensorDesc * > & inputTensorDescs,std::vector<NN_TensorDesc * > & outputTensorDescs)654 OH_NN_ReturnCode DestroyTensorDesc(
655 std::vector<NN_TensorDesc*>& inputTensorDescs, std::vector<NN_TensorDesc*>& outputTensorDescs)
656 {
657 // 销毁输入输出tensordesc
658 OH_NN_ReturnCode returnCode {OH_NN_FAILED};
659 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
660 returnCode = OH_NNTensorDesc_Destroy(&inputTensorDescs[i]);
661 if (returnCode != OH_NN_SUCCESS) {
662 LOGE("End2EndTest::OH_NNTensorDesc_Destroy failed.");
663 return returnCode;
664 }
665 }
666 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
667 returnCode = OH_NNTensorDesc_Destroy(&outputTensorDescs[i]);
668 if (returnCode != OH_NN_SUCCESS) {
669 LOGE("End2EndTest::OH_NNTensorDesc_Destroy failed.");
670 return returnCode;
671 }
672 }
673
674 return OH_NN_SUCCESS;
675 }
676
DestroyTensor(std::vector<NN_Tensor * > & inputTensors,std::vector<NN_Tensor * > & outputTensors)677 OH_NN_ReturnCode DestroyTensor(
678 std::vector<NN_Tensor*>& inputTensors, std::vector<NN_Tensor*>& outputTensors)
679 {
680 // 清理输入输出Tensor
681 OH_NN_ReturnCode returnCode {OH_NN_FAILED};
682 for (size_t i = 0; i < inputTensors.size(); ++i) {
683 returnCode = OH_NNTensor_Destroy(&inputTensors[i]);
684 if (returnCode != OH_NN_SUCCESS) {
685 LOGE("End2EndTest::OH_NNTensor_Destroy failed.");
686 return returnCode;
687 }
688 }
689 for (size_t i = 0; i < outputTensors.size(); ++i) {
690 returnCode = OH_NNTensor_Destroy(&outputTensors[i]);
691 if (returnCode != OH_NN_SUCCESS) {
692 LOGE("End2EndTest::OH_NNTensor_Destroy failed.");
693 return returnCode;
694 }
695 }
696
697 return OH_NN_SUCCESS;
698 }
699 } // namespace Test
700 } // namespace NeuralNetworkRuntime
701 } // namespace OHOS