1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include <vector>
16
17 #include "nncore_utils.h"
18
19 using namespace testing::ext;
20 using namespace OHOS::NeuralNetworkRuntime::Test;
21
22 namespace OHOS::NeuralNetworkCore {
RunDone(void * pointer,OH_NN_ReturnCode returnCode,void * pointerArray[],int32_t intNum)23 void RunDone(void *pointer, OH_NN_ReturnCode returnCode, void* pointerArray[], int32_t intNum)
24 {
25 return;
26 }
27
ServiceDied(void * point)28 void ServiceDied(void* point)
29 {
30 return;
31 }
32 class ExecutorTest : public testing::Test {
33 protected:
34 OHNNCompileParam m_compileParam;
35 AddModel addModel;
36 OHNNGraphArgs graphArgs = addModel.graphArgs;
37 };
38
39 /**
40 * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Executor_0100
41 * @tc.desc: compilation为空,返回失败
42 * @tc.type: FUNC
43 */
44 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Construct_Executor_0100, Function | MediumTest | Level1)
45 {
46 OH_NNCompilation *compilation = nullptr;
47 ASSERT_EQ(nullptr, OH_NNExecutor_Construct(compilation));
48 }
49
50 /**
51 * @tc.name: SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0100
52 * @tc.desc: 重复释放executor,返回失败
53 * @tc.type: FUNC
54 */
55 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0100, Function | MediumTest | Level1)
56 {
57 OH_NNExecutor *executor = nullptr;
58 CreateExecutor(&executor);
59
60 OH_NNExecutor_Destroy(&executor);
61 ASSERT_EQ(nullptr, executor);
62 OH_NNExecutor_Destroy(&executor);
63 }
64
65 /**
66 * @tc.name: SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0200
67 * @tc.desc: 正常释放,检查executor为空
68 * @tc.type: FUNC
69 */
70 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0200, Function | MediumTest | Level1)
71 {
72 OH_NNExecutor *executor = nullptr;
73 CreateExecutor(&executor);
74
75 OH_NNExecutor_Destroy(&executor);
76 ASSERT_EQ(nullptr, executor);
77 }
78
79 /**
80 * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0100
81 * @tc.desc: 在推理完成时设置executor,executor为空,返回失败
82 * @tc.type: FUNC
83 */
84 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0100, Function | MediumTest | Level1)
85 {
86 NN_OnRunDone onRunDone = RunDone;
87 OH_NNExecutor *executor = nullptr;
88 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOnRunDone(executor, onRunDone));
89 }
90
91 /**
92 * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0200
93 * @tc.desc: 在推理完成时设置executor,合法参数返回不支持
94 * @tc.type: FUNC
95 */
96 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0200, Function | MediumTest | Level1)
97 {
98 NN_OnRunDone onRunDone= RunDone;
99 OH_NNExecutor *executor = nullptr;
100 CreateExecutor(&executor);
101 ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNExecutor_SetOnRunDone(executor, onRunDone));
102 OH_NNExecutor_Destroy(&executor);
103 }
104
105 /**
106 * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0100
107 * @tc.desc: executor为空,返回失败
108 * @tc.type: FUNC
109 */
110 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0100, Function | MediumTest | Level1)
111 {
112 NN_OnServiceDied onServiceDied = ServiceDied;
113 OH_NNExecutor *executor = nullptr;
114
115 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOnServiceDied(executor, onServiceDied));
116 }
117
118 /**
119 * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0200
120 * @tc.desc: 合法参数,返回不支持
121 * @tc.type: FUNC
122 */
123 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0200, Function | MediumTest | Level1)
124 {
125 NN_OnServiceDied onServiceDied = ServiceDied;
126 OH_NNExecutor *executor = nullptr;
127 CreateExecutor(&executor);
128
129 ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNExecutor_SetOnServiceDied(executor, onServiceDied));
130 OH_NNExecutor_Destroy(&executor);
131 }
132
133 /**
134 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0100
135 * @tc.desc: executor为空,返回失败
136 * @tc.type: FUNC
137 */
138 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0100, Function | MediumTest | Level1)
139 {
140 vector<NN_Tensor*>inputTensors, outputTensors;
141 size_t inputCount = 0;
142 size_t outputCount = 0;
143 OH_NNExecutor* executor = nullptr;
144 CreateExecutor(&executor);
145 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
146 OH_NNExecutor_Destroy(&executor);
147 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
148 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
149 outputTensors.data(), outputCount));
150 }
151
152 /**
153 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0200
154 * @tc.desc: executor sync推理,inputTensor数组为空,返回失败
155 * @tc.type: FUNC
156 */
157 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0200, Function | MediumTest | Level1)
158 {
159 vector<NN_Tensor*> inputTensors, outputTensors;
160 size_t inputCount = 0;
161 size_t outputCount = 0;
162 OH_NNExecutor* executor = nullptr;
163 CreateExecutor(&executor);
164 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
165 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, nullptr, inputCount,
166 outputTensors.data(), outputCount));
167 OH_NNExecutor_Destroy(&executor);
168 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
169 }
170
171 /**
172 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0300
173 * @tc.desc: executor sync推理,outputTensor数组为空,返回失败
174 * @tc.type: FUNC
175 */
176 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0300, Function | MediumTest | Level1)
177 {
178 vector<NN_Tensor*> inputTensors, outputTensors;
179 size_t inputCount = 0;
180 size_t outputCount = 0;
181 OH_NNExecutor* executor = nullptr;
182 CreateExecutor(&executor);
183 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
184 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
185 nullptr, outputCount));
186 OH_NNExecutor_Destroy(&executor);
187 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
188 }
189
190 /**
191 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0400
192 * @tc.desc: executor sync推理,inputCount为0,返回失败
193 * @tc.type: FUNC
194 */
195 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0400, Function | MediumTest | Level1)
196 {
197 vector<NN_Tensor*> inputTensors, outputTensors;
198 size_t inputCount = 0;
199 size_t outputCount = 0;
200 OH_NNExecutor* executor = nullptr;
201 CreateExecutor(&executor);
202 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
203 inputCount = 0;
204 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
205 outputTensors.data(), outputCount));
206 OH_NNExecutor_Destroy(&executor);
207 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
208 }
209
210 /**
211 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0500
212 * @tc.desc: executor sync推理,outputCount为0,返回失败
213 * @tc.type: FUNC
214 */
215 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0500, Function | MediumTest | Level1)
216 {
217 vector<NN_Tensor*> inputTensors, outputTensors;
218 size_t inputCount = 0;
219 size_t outputCount = 0;
220 OH_NNExecutor* executor = nullptr;
221 CreateExecutor(&executor);
222 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
223 outputCount = 0;
224 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
225 outputTensors.data(), outputCount));
226 OH_NNExecutor_Destroy(&executor);
227 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
228 }
229
230 /**
231 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0600
232 * @tc.desc: executor sync推理,inputTensor个数不足,小于正确的输入数量,返回错误
233 * @tc.type: FUNC
234 */
235 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0600, Function | MediumTest | Level1)
236 {
237 vector<NN_Tensor*> inputTensors, outputTensors;
238 size_t inputCount = 0;
239 size_t outputCount = 0;
240 OH_NNExecutor* executor = nullptr;
241 CreateExecutor(&executor);
242 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
243 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount - 1,
244 outputTensors.data(), outputCount));
245 OH_NNExecutor_Destroy(&executor);
246 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
247 }
248
249 /**
250 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0700
251 * @tc.desc: executor sync推理,inputTensor数组个数超过inputNum,返回错误
252 * @tc.type: FUNC
253 */
254 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0700, Function | MediumTest | Level1)
255 {
256 vector<NN_Tensor*> inputTensors, outputTensors;
257 size_t inputCount = 0;
258 size_t outputCount = 0;
259 OH_NNExecutor* executor = nullptr;
260 CreateExecutor(&executor);
261 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
262 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount + 1,
263 outputTensors.data(), outputCount));
264 OH_NNExecutor_Destroy(&executor);
265 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
266 }
267
268 /**
269 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0800
270 * @tc.desc: executor sync推理,outputTensor个数不足,小于正确的输入数量,返回错误
271 * @tc.type: FUNC
272 */
273 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0800, Function | MediumTest | Level1)
274 {
275 vector<NN_Tensor*> inputTensors, outputTensors;
276 size_t inputCount = 0;
277 size_t outputCount = 0;
278 OH_NNExecutor* executor = nullptr;
279 CreateExecutor(&executor);
280 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
281 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
282 outputTensors.data(), outputCount - 1));
283 OH_NNExecutor_Destroy(&executor);
284 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
285 }
286
287 /**
288 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0900
289 * @tc.desc: executor sync推理,outputTensor数组个数超过outputNum,返回错误
290 * @tc.type: FUNC
291 */
292 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0900, Function | MediumTest | Level1)
293 {
294 vector<NN_Tensor*> inputTensors, outputTensors;
295 size_t inputCount = 0;
296 size_t outputCount = 0;
297 OH_NNExecutor* executor = nullptr;
298 CreateExecutor(&executor);
299 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
300 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
301 outputTensors.data(), outputCount + 1));
302 OH_NNExecutor_Destroy(&executor);
303 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
304 }
305
306 /**
307 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0100
308 * @tc.desc: executor async推理,executor为空,返回失败
309 * @tc.type: FUNC
310 */
311 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0100, Function | MediumTest | Level1)
312 {
313 vector<NN_Tensor*> inputTensors, outputTensors;
314 size_t inputCount = 0;
315 size_t outputCount = 0;
316 OH_NNExecutor* executor = nullptr;
317 CreateExecutor(&executor);
318 int32_t timeout = 60;
319 void* userData = (void*) executor;
320 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
321 OH_NNExecutor_Destroy(&executor);
322 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
323 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount,
324 outputTensors.data(), outputCount, timeout, userData));
325 }
326
327 /**
328 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0200
329 * @tc.desc: executor async推理,inputCount为0,返回失败
330 * @tc.type: FUNC
331 */
332 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0200, Function | MediumTest | Level1)
333 {
334 vector<NN_Tensor*> inputTensors, outputTensors;
335 size_t inputCount = 0;
336 size_t outputCount = 0;
337 OH_NNExecutor* executor = nullptr;
338 CreateExecutor(&executor);
339 int32_t timeout = 60;
340 void* userData = (void*) executor;
341 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
342 inputCount = 0;
343 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount,
344 outputTensors.data(), outputCount, timeout, userData));
345 OH_NNExecutor_Destroy(&executor);
346 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
347 }
348
349 /**
350 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0300
351 * @tc.desc: executor async推理,outputCount为0,返回失败
352 * @tc.type: FUNC
353 */
354 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0300, Function | MediumTest | Level1)
355 {
356 vector<NN_Tensor*> inputTensors, outputTensors;
357 size_t inputCount = 0;
358 size_t outputCount = 0;
359 OH_NNExecutor* executor = nullptr;
360 CreateExecutor(&executor);
361 int32_t timeout = 60;
362 void* userData = (void*) executor;
363 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
364 outputCount = 0;
365 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount,
366 outputTensors.data(), outputCount, timeout, userData));
367 OH_NNExecutor_Destroy(&executor);
368 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
369 }
370
371 /**
372 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0400
373 * @tc.desc: executor async推理,inputTensor为空指针
374 * @tc.type: FUNC
375 */
376 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0400, Function | MediumTest | Level1)
377 {
378 vector<NN_Tensor*> inputTensors, outputTensors;
379 size_t inputCount = 0;
380 size_t outputCount = 0;
381 OH_NNExecutor* executor = nullptr;
382 CreateExecutor(&executor);
383 int32_t timeout = 60;
384 void* userData = (void*) executor;
385 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
386 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, nullptr, inputCount,
387 outputTensors.data(), outputCount, timeout, userData));
388 OH_NNExecutor_Destroy(&executor);
389 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
390 }
391
392 /**
393 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0500
394 * @tc.desc: executor async推理,outputTensor为空指针
395 * @tc.type: FUNC
396 */
397 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0500, Function | MediumTest | Level1)
398 {
399 vector<NN_Tensor*> inputTensors, outputTensors;
400 size_t inputCount = 0;
401 size_t outputCount = 0;
402 OH_NNExecutor* executor = nullptr;
403 CreateExecutor(&executor);
404 int32_t timeout = 60;
405 void* userData = (void*) executor;
406 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
407 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount,
408 nullptr, outputCount, timeout, userData));
409 OH_NNExecutor_Destroy(&executor);
410 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
411 }
412
413 /**
414 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0600
415 * @tc.desc: executor async推理,定长模型返回不支持
416 * @tc.type: FUNC
417 */
418 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0600, Function | MediumTest | Level1)
419 {
420 vector<NN_Tensor*> inputTensors, outputTensors;
421 size_t inputCount = 0;
422 size_t outputCount = 0;
423 OH_NNExecutor* executor = nullptr;
424 CreateExecutor(&executor);
425 int32_t timeout = 60;
426 void* userData = (void*) executor;
427 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
428 ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount,
429 outputTensors.data(), outputCount,
430 timeout, userData));
431 OH_NNExecutor_Destroy(&executor);
432 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
433 }
434
435 /**
436 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0100
437 * @tc.desc: executor为空,返回失败
438 * @tc.type: FUNC
439 */
440 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0100, Function | MediumTest | Level1)
441 {
442 int32_t *outputDimensions = nullptr;
443 uint32_t outputDimensionCount = 0;
444 uint32_t outputIndex = 0;
445 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nullptr, outputIndex,
446 &outputDimensions, &outputDimensionCount));
447 }
448
449 /**
450 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0200
451 * @tc.desc: outputindex不存在,等于输出个数,返回失败
452 * @tc.type: FUNC
453 */
454 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0200, Function | MediumTest | Level1)
455 {
456 OH_NNExecutor* executor = nullptr;
457 CreateExecutor(&executor);
458
459 int32_t *outputDimensions = nullptr;
460 uint32_t outputDimensionCount = 0;
461 uint32_t addOutputIndex = 4;
462 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(executor, addOutputIndex,
463 &outputDimensions, &outputDimensionCount));
464 OH_NNExecutor_Destroy(&executor);
465 }
466
467 /**
468 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0300
469 * @tc.desc: 定长模型推理成功,获取输出维度成功
470 * @tc.type: FUNC
471 */
472 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0300, Function | MediumTest | Level1)
473 {
474 OH_NNExecutor* executor = nullptr;
475 CreateExecutor(&executor);
476
477 vector<NN_Tensor*> inputTensors;
478 vector<NN_Tensor*> outputTensors;
479 size_t inputCount = 0;
480 size_t outputCount = 0;
481 GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount);
482 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
483 outputTensors.data(), outputCount));
484
485 int32_t *outputDimensions = nullptr;
486 uint32_t outputDimensionCount = 0;
487 uint32_t addOutputIndex = 0;
488 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(executor, addOutputIndex,
489 &outputDimensions, &outputDimensionCount));
490
491 // 销毁Executor
492 OH_NNExecutor_Destroy(&executor);
493 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
494 }
495
496 /**
497 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0400
498 * @tc.desc: 变长模型推理成功,获取输出维度成功
499 * @tc.type: FUNC
500 */
501 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0400, Function | MediumTest | Level1)
502 {
503 OH_NNExecutor* executor = nullptr;
504 CreateDynamicExecutor(&executor);
505
506 // 创建输入输出tensorDesc
507 vector<NN_TensorDesc*> inputTensorDescs;
508 vector<NN_TensorDesc*> outputTensorDescs;
509 size_t inputCount = 0;
510 size_t outputCount = 0;
511 GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount);
512
513 // 设置输入维度合法
514 size_t *minInputDims = nullptr;
515 size_t *maxInputDims = nullptr;
516 size_t shapeLength = ZERO;
517 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
518 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims,
519 &maxInputDims, &shapeLength));
520 std::vector<int32_t> minInputDimsT;
521 for (size_t j = 0; j < shapeLength; ++j) {
522 minInputDimsT.emplace_back(static_cast<int32_t>(minInputDims[j]));
523 }
524 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength));
525 }
526 std::vector<int32_t> outputShape{1, 2, 2, 1};
527 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
528 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i],
529 outputShape.data(), outputShape.size()));
530 }
531
532 vector<NN_Tensor*> inputTensors;
533 vector<NN_Tensor*> outputTensors;
534 GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs);
535 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
536 outputTensors.data(), outputCount));
537
538 int32_t *outputDimensions = nullptr;
539 uint32_t outputDimensionCount = 0;
540 uint32_t addOutputIndex = 0;
541 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(executor, addOutputIndex,
542 &outputDimensions, &outputDimensionCount));
543
544 // 销毁Executor
545 OH_NNExecutor_Destroy(&executor);
546 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs));
547 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
548 }
549
550 /**
551 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0100
552 * @tc.desc: executor为空,返回失败
553 * @tc.type: FUNC
554 */
555 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0100, Function | MediumTest | Level1)
556 {
557 size_t inputCount = ZERO;
558 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputCount(nullptr, &inputCount));
559 }
560
561 /**
562 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0200
563 * @tc.desc: inputCount为空,返回失败
564 * @tc.type: FUNC
565 */
566 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0200, Function | MediumTest | Level1)
567 {
568 OH_NNExecutor* executor = nullptr;
569 CreateExecutor(&executor);
570 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputCount(executor, nullptr));
571 OH_NNExecutor_Destroy(&executor);
572 }
573
574 /**
575 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0300
576 * @tc.desc: 获取输入个数,返回成功
577 * @tc.type: FUNC
578 */
579 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0300, Function | MediumTest | Level1)
580 {
581 OH_NNExecutor* executor = nullptr;
582 CreateExecutor(&executor);
583
584 size_t inputCount = ZERO;
585 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &inputCount));
586 ASSERT_LT(ZERO, inputCount);
587 OH_NNExecutor_Destroy(&executor);
588 }
589
590 /**
591 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0100
592 * @tc.desc: executor为空,返回失败
593 * @tc.type: FUNC
594 */
595 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0100, Function | MediumTest | Level1)
596 {
597 size_t outputCount = 0;
598 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputCount(nullptr, &outputCount));
599 }
600
601 /**
602 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0200
603 * @tc.desc: outputCount为空,返回失败
604 * @tc.type: FUNC
605 */
606 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0200, Function | MediumTest | Level1)
607 {
608 OH_NNExecutor* executor = nullptr;
609 CreateExecutor(&executor);
610
611 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputCount(executor, nullptr));
612 OH_NNExecutor_Destroy(&executor);
613 }
614
615 /**
616 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0300
617 * @tc.desc: 获取输出个数,返回成功
618 * @tc.type: FUNC
619 */
620 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0300, Function | MediumTest | Level1)
621 {
622 OH_NNExecutor* executor = nullptr;
623 CreateExecutor(&executor);
624
625 size_t outputCount = ZERO;
626 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &outputCount));
627 ASSERT_LT(ZERO, outputCount);
628 OH_NNExecutor_Destroy(&executor);
629 }
630
631 /**
632 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0100
633 * @tc.desc: excutor为空,返回失败
634 * @tc.type: FUNC
635 */
636 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0100,
637 Function | MediumTest | Level1)
638 {
639 OH_NNExecutor* executor = nullptr;
640 CreateExecutor(&executor);
641
642 size_t index = ZERO;
643 ASSERT_EQ(nullptr, OH_NNExecutor_CreateInputTensorDesc(nullptr, index));
644 OH_NNExecutor_Destroy(&executor);
645 }
646
647 /**
648 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0200
649 * @tc.desc: 遍历创建输入tensorDesc,index小于输出个数,成功
650 * @tc.type: FUNC
651 */
652 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0200,
653 Function | MediumTest | Level1)
654 {
655 OH_NNExecutor* executor = nullptr;
656 CreateExecutor(&executor);
657 size_t inputCount = ZERO;
658 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputCount(executor, &inputCount));
659 NN_TensorDesc* tensorDesc = nullptr;
660 for (size_t i = 0; i < inputCount; i++) {
661 tensorDesc = OH_NNExecutor_CreateInputTensorDesc(executor, i);
662 ASSERT_NE(nullptr, tensorDesc);
663 OH_NNTensorDesc_Destroy(&tensorDesc);
664 }
665 OH_NNExecutor_Destroy(&executor);
666 }
667
668 /**
669 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0300
670 * @tc.desc: index等于输出个数,返回失败
671 * @tc.type: FUNC
672 */
673 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0300,
674 Function | MediumTest | Level1)
675 {
676 OH_NNExecutor* executor = nullptr;
677 CreateExecutor(&executor);
678 size_t inputCount = ZERO;
679 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputCount(executor, &inputCount));
680
681 ASSERT_EQ(nullptr, OH_NNExecutor_CreateInputTensorDesc(executor, inputCount));
682 OH_NNExecutor_Destroy(&executor);
683 }
684
685 /**
686 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0100
687 * @tc.desc: excutor为空,返回失败
688 * @tc.type: FUNC
689 */
690 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0100,
691 Function | MediumTest | Level1)
692 {
693 OH_NNExecutor* executor = nullptr;
694 CreateExecutor(&executor);
695
696 size_t index = ZERO;
697 ASSERT_EQ(nullptr, OH_NNExecutor_CreateOutputTensorDesc(nullptr, index));
698 OH_NNExecutor_Destroy(&executor);
699 }
700
701 /**
702 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0200
703 * @tc.desc: 遍历创建输入tensorDesc,index小于输出个数,成功
704 * @tc.type: FUNC
705 */
706 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0200,
707 Function | MediumTest | Level1)
708 {
709 OH_NNExecutor* executor = nullptr;
710 CreateExecutor(&executor);
711 size_t outputCount = ZERO;
712 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &outputCount));
713 NN_TensorDesc* tensorDesc = nullptr;
714 for (size_t i = 0; i < outputCount; i++) {
715 tensorDesc = OH_NNExecutor_CreateOutputTensorDesc(executor, i);
716 ASSERT_NE(nullptr, tensorDesc);
717 OH_NNTensorDesc_Destroy(&tensorDesc);
718 }
719 OH_NNExecutor_Destroy(&executor);
720 }
721
722 /**
723 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0300
724 * @tc.desc: index等于输出个数,返回失败
725 * @tc.type: FUNC
726 */
727 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0300,
728 Function | MediumTest | Level1)
729 {
730 OH_NNExecutor* executor = nullptr;
731 CreateExecutor(&executor);
732 size_t outputCount = ZERO;
733 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &outputCount));
734 ASSERT_EQ(nullptr, OH_NNExecutor_CreateOutputTensorDesc(executor, outputCount));
735 OH_NNExecutor_Destroy(&executor);
736 }
737
738 /**
739 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0100
740 * @tc.desc: excutor为空,返回失败
741 * @tc.type: FUNC
742 */
743 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0100, Function | MediumTest | Level1)
744 {
745 OH_NNExecutor* executor = nullptr;
746 size_t index = ZERO;
747 size_t *minInputDims = nullptr;
748 size_t *maxInputDims = nullptr;
749 size_t shapeLength = ZERO;
750 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputDimRange(executor, index, &minInputDims,
751 &maxInputDims, &shapeLength));
752 }
753
754 /**
755 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0200
756 * @tc.desc: 获取动态输入范围,设置index等于输入个数,超出限制,获取失败
757 * @tc.type: FUNC
758 */
759 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0200, Function | MediumTest | Level1)
760 {
761 OH_NNExecutor* executor = nullptr;
762 CreateDynamicExecutor(&executor);
763
764 size_t index = 6;
765 size_t *minInputDims = nullptr;
766 size_t *maxInputDims = nullptr;
767 size_t shapeLength = ZERO;
768
769 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputDimRange(executor, index, &minInputDims,
770 &maxInputDims, &shapeLength));
771 OH_NNExecutor_Destroy(&executor);
772 }
773
774 /**
775 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0300
776 * @tc.desc: 变长模型推理,获取输入维度,获取成功,设置输入维度等于最小临界值,成功
777 * @tc.type: FUNC
778 */
779 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0300, Function | MediumTest | Level1)
780 {
781 OH_NNExecutor* executor = nullptr;
782 CreateDynamicExecutor(&executor);
783
784 // 创建输入输出tensorDesc
785 vector<NN_TensorDesc*> inputTensorDescs;
786 vector<NN_TensorDesc*> outputTensorDescs;
787 size_t inputCount = 0;
788 size_t outputCount = 0;
789 GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount);
790
791 // 修改tensorDesc中shape为最小临界值
792 size_t *minInputDims = nullptr;
793 size_t *maxInputDims = nullptr;
794 size_t shapeLength = ZERO;
795 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
796 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims,
797 &maxInputDims, &shapeLength));
798 std::vector<int32_t> minInputDimsT;
799 for (size_t j = 0; j < shapeLength; ++j) {
800 minInputDimsT.emplace_back(static_cast<int32_t>(minInputDims[j]));
801 }
802 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength));
803 }
804 std::vector<int32_t> outputShape{1, 2, 2, 1};
805 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
806 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i],
807 outputShape.data(), outputShape.size()));
808 }
809
810 vector<NN_Tensor*> inputTensors;
811 vector<NN_Tensor*> outputTensors;
812 GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs);
813 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
814 outputTensors.data(), outputCount));
815
816 // 销毁Executor
817 OH_NNExecutor_Destroy(&executor);
818 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs));
819 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
820 }
821
822 /**
823 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0400
824 * @tc.desc: 变长模型编推理,获取输入维度,获取成功,设置输入维度等于最大临界值,成功
825 * @tc.type: FUNC
826 */
827 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0400, Function | MediumTest | Level1)
828 {
829 OH_NNExecutor* executor = nullptr;
830 CreateDynamicExecutor(&executor);
831
832 // 创建输入输出tensorDesc
833 vector<NN_TensorDesc*> inputTensorDescs;
834 vector<NN_TensorDesc*> outputTensorDescs;
835 size_t inputCount = 0;
836 size_t outputCount = 0;
837 GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount);
838
839 // 修改tensorDesc中shape为最大临界值
840 size_t *minInputDims = nullptr;
841 size_t *maxInputDims = nullptr;
842 size_t shapeLength = ZERO;
843 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
844 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims,
845 &maxInputDims, &shapeLength));
846 std::vector<int32_t> maxInputDimsT;
847 for (size_t j = 0; j < shapeLength; ++j) {
848 maxInputDimsT.emplace_back(static_cast<int32_t>(maxInputDims[j]));
849 }
850 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], maxInputDimsT.data(), shapeLength));
851 }
852 std::vector<int32_t> outputShape{1, 2, 2, 1};
853 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
854 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i],
855 outputShape.data(), outputShape.size()));
856 }
857
858 vector<NN_Tensor*> inputTensors;
859 vector<NN_Tensor*> outputTensors;
860 GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs);
861 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
862 outputTensors.data(), outputCount));
863
864 // 销毁Executor
865 OH_NNExecutor_Destroy(&executor);
866 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs));
867 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
868 }
869
870 /**
871 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0500
872 * @tc.desc: 变长模型推理,获取输入维度,获取成功,设置输入维度小于最小临界值,推理失败
873 * @tc.type: FUNC
874 */
875 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0500, Function | MediumTest | Level1)
876 {
877 OH_NNExecutor* executor = nullptr;
878 CreateDynamicExecutor(&executor);
879
880 // 创建输入输出tensorDesc
881 vector<NN_TensorDesc*> inputTensorDescs;
882 vector<NN_TensorDesc*> outputTensorDescs;
883 size_t inputCount = 0;
884 size_t outputCount = 0;
885 GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount);
886
887 // 设置输入维度小于最小临界值
888 size_t *minInputDims = nullptr;
889 size_t *maxInputDims = nullptr;
890 size_t shapeLength = ZERO;
891 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
892 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims,
893 &maxInputDims, &shapeLength));
894 std::vector<int32_t> minInputDimsT;
895 for (size_t j = 0; j < shapeLength; ++j) {
896 minInputDimsT.emplace_back(static_cast<int32_t>(minInputDims[j] - 1));
897 }
898 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength));
899 }
900 std::vector<int32_t> outputShape{1, 2, 2, 1};
901 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
902 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i],
903 outputShape.data(), outputShape.size()));
904 }
905
906 vector<NN_Tensor*> inputTensors;
907 vector<NN_Tensor*> outputTensors;
908 GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs);
909 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
910 outputTensors.data(), outputCount));
911
912 // 销毁Executor
913 OH_NNExecutor_Destroy(&executor);
914 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs));
915 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
916 }
917
918 /**
919 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0600
920 * @tc.desc: 变长模型推理,依次获取输入维度,获取成功,设置输入维度大于最大临界值,推理失败
921 * @tc.type: FUNC
922 */
923 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0600, Function | MediumTest | Level1)
924 {
925 OH_NNExecutor* executor = nullptr;
926 CreateDynamicExecutor(&executor);
927
928 // 创建输入输出tensorDesc
929 vector<NN_TensorDesc*> inputTensorDescs;
930 vector<NN_TensorDesc*> outputTensorDescs;
931 size_t inputCount = 0;
932 size_t outputCount = 0;
933 GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount);
934
935 // 设置输入维度大于最大临界值
936 size_t *minInputDims = nullptr;
937 size_t *maxInputDims = nullptr;
938 size_t shapeLength = ZERO;
939 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
940 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims,
941 &maxInputDims, &shapeLength));
942 std::vector<int32_t> maxInputDimsT;
943 for (size_t j = 0; j < shapeLength; ++j) {
944 maxInputDimsT.emplace_back(static_cast<int32_t>(maxInputDims[j] + 1));
945 }
946 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], maxInputDimsT.data(), shapeLength));
947 }
948 std::vector<int32_t> outputShape{1, 2, 2, 1};
949 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
950 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i],
951 outputShape.data(), outputShape.size()));
952 }
953
954 vector<NN_Tensor*> inputTensors;
955 vector<NN_Tensor*> outputTensors;
956 GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs);
957 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount,
958 outputTensors.data(), outputCount));
959
960 // 销毁Executor
961 OH_NNExecutor_Destroy(&executor);
962 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs));
963 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
964 }
965
966 /**
967 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0700
968 * @tc.desc: 变长模型推理,依次获取输入维度,获取成功,设置输入shape个数超过shapeNum,推理失败
969 * @tc.type: FUNC
970 */
971 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0700, Function | MediumTest | Level1)
972 {
973 OH_NNExecutor* executor = nullptr;
974 CreateDynamicExecutor(&executor);
975
976 // 创建输入输出tensorDesc
977 vector<NN_TensorDesc*> inputTensorDescs;
978 vector<NN_TensorDesc*> outputTensorDescs;
979 size_t inputCount = 0;
980 size_t outputCount = 0;
981 GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount);
982
983 // 修改tensorDesc中shape为最大临界值
984 size_t *minInputDims = nullptr;
985 size_t *maxInputDims = nullptr;
986 size_t shapeLength = ZERO;
987 for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
988 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims,
989 &maxInputDims, &shapeLength));
990 std::vector<int32_t> maxInputDimsT;
991 for (size_t j = 0; j < shapeLength; ++j) {
992 maxInputDimsT.emplace_back(static_cast<int32_t>(maxInputDims[j]));
993 }
994 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], maxInputDimsT.data(), shapeLength));
995 }
996 std::vector<int32_t> outputShape{1, 2, 2, 1};
997 for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
998 ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i],
999 outputShape.data(), outputShape.size()));
1000 }
1001
1002 vector<NN_Tensor*> inputTensors;
1003 vector<NN_Tensor*> outputTensors;
1004 GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs);
1005 ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount + 1,
1006 outputTensors.data(), outputCount));
1007
1008 // 销毁Executor
1009 OH_NNExecutor_Destroy(&executor);
1010 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs));
1011 ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors));
1012 }
1013
1014 /**
1015 * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0800
1016 * @tc.desc: 定长模型推理调用,返回正确
1017 * @tc.type: FUNC
1018 */
1019 HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0800, Function | MediumTest | Level1)
1020 {
1021 OH_NNExecutor* executor = nullptr;
1022 CreateExecutor(&executor);
1023
1024 size_t index = ZERO;
1025 size_t *minInputDims = nullptr;
1026 size_t *maxInputDims = nullptr;
1027 size_t shapeLength = ZERO;
1028 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, index, &minInputDims,
1029 &maxInputDims, &shapeLength));
1030 OH_NNExecutor_Destroy(&executor);
1031 }
1032 } // namespace OHOS::NeuralNetworkCore
1033