1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <gtest/gtest.h>
17 #include <gmock/gmock.h>
18
19 #include "nnexecutor.h"
20 #include "nncompiler.h"
21 #include "nnbackend.h"
22 #include "device.h"
23 #include "prepared_model.h"
24 #include "neural_network_runtime/neural_network_runtime_type.h"
25 #include "utils.h"
26 #include "log.h"
27
28 using namespace testing;
29 using namespace testing::ext;
30 using namespace OHOS::NeuralNetworkRuntime;
31
32 namespace OHOS {
33 namespace NeuralNetworkRuntime {
34 namespace UnitTest {
35 class NNExecutorTest : public testing::Test {
36 public:
37 NNExecutorTest() = default;
38 ~NNExecutorTest() = default;
39
40 public:
41 uint32_t m_index {0};
42 const std::vector<int32_t> m_dim {3, 3};
43 const std::vector<int32_t> m_dimOut {3, 3};
44 const int32_t m_dimArry[2] {3, 3};
45 uint32_t m_dimensionCount {2};
46 float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8};
47 };
48
49 class MockIDevice : public Device {
50 public:
51 MOCK_METHOD1(GetDeviceName, OH_NN_ReturnCode(std::string&));
52 MOCK_METHOD1(GetVendorName, OH_NN_ReturnCode(std::string&));
53 MOCK_METHOD1(GetVersion, OH_NN_ReturnCode(std::string&));
54 MOCK_METHOD1(GetDeviceType, OH_NN_ReturnCode(OH_NN_DeviceType&));
55 MOCK_METHOD1(GetDeviceStatus, OH_NN_ReturnCode(DeviceStatus&));
56 MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
57 std::vector<bool>&));
58 MOCK_METHOD1(IsFloat16PrecisionSupported, OH_NN_ReturnCode(bool&));
59 MOCK_METHOD1(IsPerformanceModeSupported, OH_NN_ReturnCode(bool&));
60 MOCK_METHOD1(IsPrioritySupported, OH_NN_ReturnCode(bool&));
61 MOCK_METHOD1(IsDynamicInputSupported, OH_NN_ReturnCode(bool&));
62 MOCK_METHOD1(IsModelCacheSupported, OH_NN_ReturnCode(bool&));
63 MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
64 const ModelConfig&,
65 std::shared_ptr<PreparedModel>&));
66 MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(const void*,
67 const ModelConfig&,
68 std::shared_ptr<PreparedModel>&));
69 MOCK_METHOD4(PrepareModelFromModelCache, OH_NN_ReturnCode(const std::vector<Buffer>&,
70 const ModelConfig&,
71 std::shared_ptr<PreparedModel>&,
72 bool&));
73 MOCK_METHOD3(PrepareOfflineModel, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
74 const ModelConfig&,
75 std::shared_ptr<PreparedModel>&));
76 MOCK_METHOD1(AllocateBuffer, void*(size_t));
77 MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr<TensorDesc>));
78 MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr<NNTensor>));
79 MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*));
80 MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&));
81 MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t));
82 };
83
84 class MockIPreparedModel : public PreparedModel {
85 public:
86 MOCK_METHOD1(ExportModelCache, OH_NN_ReturnCode(std::vector<Buffer>&));
87 MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector<IOTensor>&,
88 const std::vector<IOTensor>&,
89 std::vector<std::vector<int32_t>>&,
90 std::vector<bool>&));
91 MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector<NN_Tensor*>&,
92 const std::vector<NN_Tensor*>&,
93 std::vector<std::vector<int32_t>>&,
94 std::vector<bool>&));
95 MOCK_CONST_METHOD1(GetModelID, OH_NN_ReturnCode(uint32_t&));
96 MOCK_METHOD2(GetInputDimRanges, OH_NN_ReturnCode(std::vector<std::vector<uint32_t>>&,
97 std::vector<std::vector<uint32_t>>&));
98 MOCK_METHOD0(ReleaseBuiltModel, OH_NN_ReturnCode());
99 };
100
101 class MockTensorDesc : public TensorDesc {
102 public:
103 MOCK_METHOD1(GetDataType, OH_NN_ReturnCode(OH_NN_DataType*));
104 MOCK_METHOD1(SetDataType, OH_NN_ReturnCode(OH_NN_DataType));
105 MOCK_METHOD1(GetFormat, OH_NN_ReturnCode(OH_NN_Format*));
106 MOCK_METHOD1(SetFormat, OH_NN_ReturnCode(OH_NN_Format));
107 MOCK_METHOD2(GetShape, OH_NN_ReturnCode(int32_t**, size_t*));
108 MOCK_METHOD2(SetShape, OH_NN_ReturnCode(const int32_t*, size_t));
109 MOCK_METHOD1(GetElementNum, OH_NN_ReturnCode(size_t*));
110 MOCK_METHOD1(GetByteSize, OH_NN_ReturnCode(size_t*));
111 MOCK_METHOD1(SetName, OH_NN_ReturnCode(const char*));
112 MOCK_METHOD1(GetName, OH_NN_ReturnCode(const char**));
113 };
114
SetTensor(OH_NN_DataType dataType,uint32_t dimensionCount,const int32_t * dimensions,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)115 OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions,
116 const OH_NN_QuantParam *quantParam, OH_NN_TensorType type)
117 {
118 OH_NN_Tensor tensor;
119 tensor.dataType = dataType;
120 tensor.dimensionCount = dimensionCount;
121 tensor.dimensions = dimensions;
122 tensor.quantParam = quantParam;
123 tensor.type = type;
124
125 return tensor;
126 }
127
128 /**
129 * @tc.name: nnexecutortest_construct_001
130 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
131 * @tc.type: FUNC
132 */
133 HWTEST_F(NNExecutorTest, nnexecutortest_construct_001, TestSize.Level0)
134 {
135 LOGE("NNExecutor nnexecutortest_construct_001");
136 size_t m_backendID {0};
137 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
138
139 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
140 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
141 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
142
143 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
144 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
145 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
146 int32_t expectDim[2] = {3, 3};
147 int32_t* ptr = expectDim;
148 uint32_t dimensionCount = 2;
149 tensorDesr->SetShape(ptr, dimensionCount);
150 pair1.first = tensorDesr;
151 pair2.first = tensorDesr;
152 m_inputTensorDescs.emplace_back(pair1);
153 m_inputTensorDescs.emplace_back(pair2);
154 m_outputTensorDescs.emplace_back(pair1);
155 m_outputTensorDescs.emplace_back(pair2);
156
157 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
158 size_t length = 9 * sizeof(float);
159 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
160 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
161
162 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
163 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
164 EXPECT_NE(nullptr, nnExecutor);
165
166 OH_NN_Memory** memory = nullptr;
167 void* const data = dataArry;
168 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
169 OH_NN_Memory* mPtr = &memoryPtr;
170 memory = &mPtr;
171
172 OH_NN_ReturnCode retOutput = nnExecutor->CreateOutputMemory(m_index, length, memory);
173 EXPECT_EQ(OH_NN_SUCCESS, retOutput);
174 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
175 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
176 OH_NN_ReturnCode retinput = nnExecutor->CreateInputMemory(m_index, length, memory);
177 EXPECT_EQ(OH_NN_SUCCESS, retinput);
178
179 delete nnExecutor;
180
181 testing::Mock::AllowLeak(device.get());
182 }
183
184 /**
185 * @tc.name: nnexecutortest_getinputdimrange_001
186 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
187 * @tc.type: FUNC
188 */
189 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_001, TestSize.Level0)
190 {
191 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_001");
192 size_t m_backendID {0};
193 std::shared_ptr<Device> m_device {nullptr};
194
195 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
196 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
197 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
198 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
199 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
200 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
201 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
202
203 size_t index = 0;
204 size_t min = 1;
205 size_t max = 10;
206 size_t *minInputDims = &min;
207 size_t *maxInputDIms = &max;
208 size_t shapeLength = 0;
209 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
210 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
211
212 testing::Mock::AllowLeak(mockIPreparedMode.get());
213 }
214
215 /**
216 * @tc.name: nnexecutortest_getinputdimrange_002
217 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
218 * @tc.type: FUNC
219 */
220 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_002, TestSize.Level0)
221 {
222 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_002");
223 size_t m_backendID {0};
224 std::shared_ptr<Device> m_device {nullptr};
225
226 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
227 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
228 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
229 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
230 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
231 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
232 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
233
234 size_t index = 0;
235 size_t max = 10;
236 size_t *maxInputDIms = &max;
237 size_t shapeLength = 0;
238 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, nullptr, &maxInputDIms, &shapeLength);
239 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
240
241 testing::Mock::AllowLeak(mockIPreparedMode.get());
242 }
243
244 /**
245 * @tc.name: nnexecutortest_getinputdimrange_003
246 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
247 * @tc.type: FUNC
248 */
249 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_003, TestSize.Level0)
250 {
251 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_003");
252 size_t m_backendID {0};
253 std::shared_ptr<Device> m_device {nullptr};
254
255 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
256 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
257 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
258 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
259 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
260 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
261 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
262
263 size_t index = 0;
264 size_t min = 1;
265 size_t *minInputDims = &min;
266 size_t shapeLength = 0;
267 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, nullptr, &shapeLength);
268 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
269
270 testing::Mock::AllowLeak(mockIPreparedMode.get());
271 }
272
273 /**
274 * @tc.name: nnexecutortest_getinputdimrange_004
275 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
276 * @tc.type: FUNC
277 */
278 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_004, TestSize.Level0)
279 {
280 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_004");
281 size_t m_backendID {0};
282 std::shared_ptr<Device> m_device {nullptr};
283
284 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
285 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
286 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
287 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
288 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
289 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
290 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
291
292 size_t index = 0;
293 size_t min = 1;
294 size_t max = 10;
295 size_t *minInputDims = &min;
296 size_t *maxInputDIms = &max;
297 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, nullptr);
298 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
299
300 testing::Mock::AllowLeak(mockIPreparedMode.get());
301 }
302
303 /**
304 * @tc.name: nnexecutortest_getinputdimrange_005
305 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
306 * @tc.type: FUNC
307 */
308 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_005, TestSize.Level0)
309 {
310 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_005");
311 size_t m_backendID {0};
312 std::shared_ptr<Device> m_device {nullptr};
313
314 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
315 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
316 .WillRepeatedly(::testing::Return(OH_NN_SUCCESS));
317 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
318 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
319 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
320 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
321
322 size_t index = 0;
323 size_t min = 1;
324 size_t max = 10;
325 size_t *minInputDims = &min;
326 size_t *maxInputDIms = &max;
327 size_t shapeLength = 0;
328 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
329 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
330
331 testing::Mock::AllowLeak(mockIPreparedMode.get());
332 }
333
334 /**
335 * @tc.name: nnexecutortest_getinputdimrange_006
336 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
337 * @tc.type: FUNC
338 */
339 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_006, TestSize.Level0)
340 {
341 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_006");
342 size_t m_backendID {0};
343 std::shared_ptr<Device> m_device {nullptr};
344
345 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
346
347 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
348 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
349 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
350 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anonf68af7900102(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 351 std::vector<std::vector<uint32_t>>& maxInputDims) {
352 // 这里直接修改传入的引用参数
353 minInputDims = minDims;
354 maxInputDims = maxDims;
355 return OH_NN_SUCCESS; // 假设成功的状态码
356 }));
357 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
358 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
359 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
360 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
361
362 size_t index = 0;
363 size_t min = 1;
364 size_t max = 10;
365 size_t *minInputDims = &min;
366 size_t *maxInputDIms = &max;
367 size_t shapeLength = 0;
368 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
369 EXPECT_EQ(OH_NN_SUCCESS, ret);
370
371 testing::Mock::AllowLeak(mockIPreparedMode.get());
372 }
373
374 /**
375 * @tc.name: nnexecutortest_getinputdimrange_007
376 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
377 * @tc.type: FUNC
378 */
379 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_007, TestSize.Level0)
380 {
381 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_007");
382 size_t m_backendID {0};
383 std::shared_ptr<Device> m_device {nullptr};
384
385 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
386
387 std::vector<std::vector<uint32_t>> minDims = {{1, 2}, {1, 2, 3}};
388 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
389 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
390 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anonf68af7900202(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 391 std::vector<std::vector<uint32_t>>& maxInputDims) {
392 // 这里直接修改传入的引用参数
393 minInputDims = minDims;
394 maxInputDims = maxDims;
395 return OH_NN_SUCCESS; // 假设成功的状态码
396 }));
397 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
398 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
399 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
400 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
401
402 size_t index = 0;
403 size_t min = 1;
404 size_t max = 10;
405 size_t *minInputDims = &min;
406 size_t *maxInputDIms = &max;
407 size_t shapeLength = 0;
408 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
409 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
410
411 testing::Mock::AllowLeak(mockIPreparedMode.get());
412 }
413
414 /**
415 * @tc.name: nnexecutortest_getinputdimrange_008
416 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
417 * @tc.type: FUNC
418 */
419 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_008, TestSize.Level0)
420 {
421 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_008");
422 size_t m_backendID {0};
423 std::shared_ptr<Device> m_device {nullptr};
424
425 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
426
427 std::vector<std::vector<uint32_t>> minDims = {{1, 2}};
428 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
429 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
430 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anonf68af7900302(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 431 std::vector<std::vector<uint32_t>>& maxInputDims) {
432 // 这里直接修改传入的引用参数
433 minInputDims = minDims;
434 maxInputDims = maxDims;
435 return OH_NN_SUCCESS; // 假设成功的状态码
436 }));
437 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
438 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
439 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
440 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
441
442 size_t index = 0;
443 size_t min = 1;
444 size_t max = 10;
445 size_t *minInputDims = &min;
446 size_t *maxInputDIms = &max;
447 size_t shapeLength = 0;
448 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
449 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
450
451 testing::Mock::AllowLeak(mockIPreparedMode.get());
452 }
453
454 /**
455 * @tc.name: nnexecutortest_getoutputshape_001
456 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
457 * @tc.type: FUNC
458 */
459 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_001, TestSize.Level0)
460 {
461 LOGE("GetOutputShape nnexecutortest_getoutputshape_001");
462 size_t m_backendID {0};
463 std::shared_ptr<Device> m_device {nullptr};
464 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
465 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
466 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
467 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
468 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
469
470 int32_t expectDim[2] = {3, 3};
471 int32_t* ptr = expectDim;
472 int32_t** dimensions = &ptr;
473 uint32_t dimensionCount = 2;
474 uint32_t* shapeNum = &dimensionCount;
475 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
476 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
477 }
478
479 /**
480 * @tc.name: nnexecutortest_getoutputshape_002
481 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
482 * @tc.type: FUNC
483 */
484 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_002, TestSize.Level0)
485 {
486 LOGE("GetOutputShape nnexecutortest_getoutputshape_002");
487 size_t m_backendID {0};
488 std::shared_ptr<Device> m_device {nullptr};
489 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
490 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
491 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
492 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
493 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
494 m_outputTensorDescs.emplace_back(pair1);
495 m_outputTensorDescs.emplace_back(pair2);
496 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
497 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
498
499 int32_t expectDim[2] = {3, 3};
500 int32_t* ptr = expectDim;
501 int32_t** dimensions = &ptr;
502 uint32_t dimensionCount = 2;
503 uint32_t* shapeNum = &dimensionCount;
504 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
505 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
506 }
507
508 /**
509 * @tc.name: nnexecutortest_getoutputshape_003
510 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
511 * @tc.type: FUNC
512 */
513 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_003, TestSize.Level0)
514 {
515 LOGE("GetOutputShape nnexecutortest_getoutputshape_003");
516 size_t m_backendID {0};
517 std::shared_ptr<Device> m_device {nullptr};
518 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
519 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
520 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
521 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
522 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
523 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
524 pair1.first = tensorDesr;
525 m_outputTensorDescs.emplace_back(pair1);
526 m_outputTensorDescs.emplace_back(pair2);
527 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
528 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
529
530 int32_t expectDim[2] = {3, 3};
531 int32_t* ptr = expectDim;
532 int32_t** dimensions = &ptr;
533 uint32_t dimensionCount = 2;
534 uint32_t* shapeNum = &dimensionCount;
535 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
536 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
537 }
538
539 /**
540 * @tc.name: nnexecutortest_getoutputshape_004
541 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
542 * @tc.type: FUNC
543 */
544 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_004, TestSize.Level0)
545 {
546 LOGE("GetOutputShape nnexecutortest_getoutputshape_004");
547 size_t m_backendID {0};
548 std::shared_ptr<Device> m_device {nullptr};
549 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
550 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
551 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
552 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
553 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
554 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
555
556 int32_t expectDim[2] = {3, 3};
557 int32_t* ptr = expectDim;
558 uint32_t dimensionCount = 2;
559 tensorDesr->SetShape(ptr, dimensionCount);
560 pair1.first = tensorDesr;
561 pair2.first = tensorDesr;
562 m_outputTensorDescs.emplace_back(pair1);
563 m_outputTensorDescs.emplace_back(pair2);
564 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
565 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
566
567 int32_t expectDim2[2] = {3, 3};
568 int32_t* ptr2 = expectDim2;
569 int32_t** dimensions = &ptr2;
570 uint32_t* shapeNum = &dimensionCount;
571 *dimensions = nullptr;
572 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
573 EXPECT_EQ(OH_NN_SUCCESS, ret);
574 }
575
576 /**
577 * @tc.name: nnexecutortest_getinputnum_001
578 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
579 * @tc.type: FUNC
580 */
581 HWTEST_F(NNExecutorTest, nnexecutortest_getinputnum_001, TestSize.Level0)
582 {
583 LOGE("GetInputNum nnexecutortest_getinputnum_001");
584 size_t m_backendID {0};
585 std::shared_ptr<Device> m_device {nullptr};
586 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
587 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
588 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
589 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
590 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
591
592 size_t ret = nnExecutor->GetInputNum();
593 EXPECT_EQ(0, ret);
594 }
595
596 /**
597 * @tc.name: nnexecutortest_getoutputnum_001
598 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
599 * @tc.type: FUNC
600 */
601 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputnum_001, TestSize.Level0)
602 {
603 LOGE("GetOutputNum nnexecutortest_getoutputnum_001");
604 size_t m_backendID {0};
605 std::shared_ptr<Device> m_device {nullptr};
606 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
607 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
608 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
609 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
610 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
611
612
613 size_t ret = nnExecutor->GetOutputNum();
614 EXPECT_EQ(0, ret);
615 }
616
617 /**
618 * @tc.name: nnexecutortest_createinputtensordesc_001
619 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
620 * @tc.type: FUNC
621 */
622 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_001, TestSize.Level0)
623 {
624 LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_001");
625 size_t m_backendID {0};
626 std::shared_ptr<Device> m_device {nullptr};
627 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
628 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
629 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
630 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
631 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
632
633 size_t index = 1;
634 NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
635 EXPECT_EQ(nullptr, ret);
636 }
637
638 /**
639 * @tc.name: nnexecutortest_createinputtensordesc_002
640 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
641 * @tc.type: FUNC
642 */
643 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_002, TestSize.Level0)
644 {
645 LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_002");
646 size_t m_backendID {0};
647 std::shared_ptr<Device> m_device {nullptr};
648 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
649 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
650 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
651
652 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
653 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
654 m_inputTensorDescs.emplace_back(pair1);
655 m_inputTensorDescs.emplace_back(pair2);
656
657 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
658 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
659
660 size_t index = 1;
661 NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
662 EXPECT_EQ(nullptr, ret);
663 }
664
665 /**
666 * @tc.name: nnexecutortest_createinputtensordesc_003
667 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
668 * @tc.type: FUNC
669 */
670 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_003, TestSize.Level0)
671 {
672 LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_003");
673 size_t m_backendID {0};
674 std::shared_ptr<Device> m_device {nullptr};
675 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
676 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
677 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
678
679 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
680 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
681 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
682 int32_t expectDim[2] = {3, 3};
683 int32_t* ptr = expectDim;
684 uint32_t dimensionCount = 2;
685 tensorDesr->SetShape(ptr, dimensionCount);
686 pair1.first = tensorDesr;
687 pair2.first = tensorDesr;
688 m_inputTensorDescs.emplace_back(pair1);
689 m_inputTensorDescs.emplace_back(pair2);
690
691 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
692 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
693
694 size_t index = 0;
695 NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
696 EXPECT_NE(nullptr, ret);
697 }
698
699 /**
700 * @tc.name: nnexecutortest_createoutputtensordesc_001
701 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
702 * @tc.type: FUNC
703 */
704 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_001, TestSize.Level0)
705 {
706 LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_001");
707 size_t m_backendID {0};
708 std::shared_ptr<Device> m_device {nullptr};
709 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
710 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
711 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
712 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
713 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
714
715 size_t index = 1;
716 NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
717 EXPECT_EQ(nullptr, ret);
718 }
719
720 /**
721 * @tc.name: nnexecutortest_createoutputtensordesc_002
722 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
723 * @tc.type: FUNC
724 */
725 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_002, TestSize.Level0)
726 {
727 LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_002");
728 size_t m_backendID {0};
729 std::shared_ptr<Device> m_device {nullptr};
730 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
731 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
732 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
733
734 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
735 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
736 m_outputTensorDescs.emplace_back(pair1);
737 m_outputTensorDescs.emplace_back(pair2);
738
739 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
740 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
741
742 size_t index = 1;
743 NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
744 EXPECT_EQ(nullptr, ret);
745 }
746
747 /**
748 * @tc.name: nnexecutortest_createoutputtensordesc_003
749 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
750 * @tc.type: FUNC
751 */
752 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_003, TestSize.Level0)
753 {
754 LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_003");
755 size_t m_backendID {0};
756 std::shared_ptr<Device> m_device {nullptr};
757 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
758 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
759 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
760
761 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
762 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
763 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
764 int32_t expectDim[2] = {3, 3};
765 int32_t* ptr = expectDim;
766 uint32_t dimensionCount = 2;
767 tensorDesr->SetShape(ptr, dimensionCount);
768 pair1.first = tensorDesr;
769 pair2.first = tensorDesr;
770 m_outputTensorDescs.emplace_back(pair1);
771 m_outputTensorDescs.emplace_back(pair2);
772
773 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
774 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
775
776 size_t index = 1;
777 NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
778 EXPECT_NE(nullptr, ret);
779 }
780
MyOnRunDone(void * userData,OH_NN_ReturnCode errCode,void * outputTensor[],int32_t outputCount)781 void MyOnRunDone(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount)
782 {
783 LOGE("MyOnRunDone");
784 // 在这里处理你的逻辑,例如:
785 if (errCode != OH_NN_SUCCESS) {
786 // 处理错误
787 LOGE("Neural network execution failed with error code: %d", errCode);
788 } else {
789 // 使用 outputTensor[] 和 outputCount 处理成功的结果
790 // 例如,outputTensor 可能指向了神经网络输出数据的内存位置
791 }
792 // 如果 userData 指向了需要清理的资源,在这里进行清理
793 }
794
795 /**
796 * @tc.name: nnexecutortest_setonrundone_001
797 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
798 * @tc.type: FUNC
799 */
800 HWTEST_F(NNExecutorTest, nnexecutortest_setonrundone_001, TestSize.Level0)
801 {
802 LOGE("SetOnRunDone nnexecutortest_setonrundone_001");
803 size_t m_backendID {0};
804 std::shared_ptr<Device> m_device {nullptr};
805 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
806 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
807 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
808 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
809 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
810
811 OH_NN_ReturnCode ret = nnExecutor->SetOnRunDone(MyOnRunDone);
812 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
813 }
814
MyOnServiceDied(void * userData)815 void MyOnServiceDied(void *userData)
816 {
817 LOGE("MyOnServiceDied");
818 }
819
820 /**
821 * @tc.name: nnexecutortest_setonservicedied_001
822 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
823 * @tc.type: FUNC
824 */
825 HWTEST_F(NNExecutorTest, nnexecutortest_setonservicedied_001, TestSize.Level0)
826 {
827 LOGE("SetOnServiceDied nnexecutortest_setonservicedied_001");
828 size_t m_backendID {0};
829 std::shared_ptr<Device> m_device {nullptr};
830 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
831 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
832 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
833 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
834 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
835
836 OH_NN_ReturnCode ret = nnExecutor->SetOnServiceDied(MyOnServiceDied);
837 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
838 }
839
840 /**
841 * @tc.name: nnexecutortest_runsync_001
842 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
843 * @tc.type: FUNC
844 */
845 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_001, TestSize.Level0)
846 {
847 LOGE("RunSync nnexecutortest_runsync_001");
848 size_t m_backendID {0};
849 std::shared_ptr<Device> m_device {nullptr};
850 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
851 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
852 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
853 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
854 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
855
856 size_t inputSize = 1;
857 size_t outputSize = 1;
858 OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize);
859 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
860 }
861
862 /**
863 * @tc.name: nnexecutortest_runsync_002
864 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
865 * @tc.type: FUNC
866 */
867 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_002, TestSize.Level0)
868 {
869 LOGE("RunAsync nnexecutortest_runsync_002");
870 size_t m_backendID {0};
871 std::shared_ptr<Device> m_device {nullptr};
872 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
873 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
874 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
875 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
876 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
877
878 size_t inputSize = 0;
879 size_t outputSize = 1;
880 OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize);
881 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
882 }
883
884 /**
885 * @tc.name: nnexecutortest_runsync_003
886 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
887 * @tc.type: FUNC
888 */
889 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_003, TestSize.Level0)
890 {
891 LOGE("RunAsync nnexecutortest_runsync_003");
892 size_t m_backendID {0};
893 std::shared_ptr<Device> m_device {nullptr};
894
895 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
896
897 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
898 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
899 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
900 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anonf68af7900402(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 901 std::vector<std::vector<uint32_t>>& maxInputDims) {
902 // 这里直接修改传入的引用参数
903 minInputDims = minDims;
904 maxInputDims = maxDims;
905 return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码
906 }));
907
908 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
909 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
910
911 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
912 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
913 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
914 int32_t expectDim[2] = {3, 3};
915 int32_t* ptr = expectDim;
916 uint32_t dimensionCount = 2;
917 tensorDesr->SetShape(ptr, dimensionCount);
918 pair1.first = tensorDesr;
919 pair2.first = tensorDesr;
920 m_inputTensorDescs.emplace_back(pair1);
921 m_inputTensorDescs.emplace_back(pair2);
922 m_outputTensorDescs.emplace_back(pair1);
923 m_outputTensorDescs.emplace_back(pair2);
924
925 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
926 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
927
928 size_t backendID = 1;
929 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
930 TensorDesc desc;
931 TensorDesc* tensorDesc = &desc;
932
933 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
934 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
935
936 size_t inputSize = 2;
937 size_t outputSize = 2;
938 OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
939 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
940
941 testing::Mock::AllowLeak(mockIPreparedMode.get());
942 testing::Mock::AllowLeak(device.get());
943 }
944
945 /**
946 * @tc.name: nnexecutortest_runsync_004
947 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
948 * @tc.type: FUNC
949 */
950 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_004, TestSize.Level0)
951 {
952 LOGE("RunAsync nnexecutortest_runsync_004");
953 size_t m_backendID {0};
954 std::shared_ptr<Device> m_device {nullptr};
955
956 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
957
958 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
959 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
960 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
961 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anonf68af7900502(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 962 std::vector<std::vector<uint32_t>>& maxInputDims) {
963 // 这里直接修改传入的引用参数
964 minInputDims = minDims;
965 maxInputDims = maxDims;
966 return OH_NN_SUCCESS; // 假设成功的状态码
967 }));
968
969 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
970 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
971
972 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
973 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
974 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
975 int32_t expectDim[2] = {3, 3};
976 int32_t* ptr = expectDim;
977 uint32_t dimensionCount = 2;
978 tensorDesr->SetShape(ptr, dimensionCount);
979 pair1.first = tensorDesr;
980 pair2.first = tensorDesr;
981 m_inputTensorDescs.emplace_back(pair1);
982 m_inputTensorDescs.emplace_back(pair2);
983 m_outputTensorDescs.emplace_back(pair1);
984 m_outputTensorDescs.emplace_back(pair2);
985
986 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
987 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
988
989 size_t backendID = 1;
990 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
991 TensorDesc desc;
992 TensorDesc* tensorDesc = &desc;
993
994 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
995 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
996
997 size_t inputSize = 2;
998 size_t outputSize = 2;
999 OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
1000 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1001
1002 testing::Mock::AllowLeak(mockIPreparedMode.get());
1003 }
1004
1005 /**
1006 * @tc.name: nnexecutortest_runsync_005
1007 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1008 * @tc.type: FUNC
1009 */
1010 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_005, TestSize.Level0)
1011 {
1012 LOGE("RunAsync nnexecutortest_runsync_005");
1013 size_t m_backendID {0};
1014 std::shared_ptr<Device> m_device {nullptr};
1015
1016 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1017
1018 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}, {1, 2, 3}};
1019 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1020 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1021 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anonf68af7900602(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1022 std::vector<std::vector<uint32_t>>& maxInputDims) {
1023 // 这里直接修改传入的引用参数
1024 minInputDims = minDims;
1025 maxInputDims = maxDims;
1026 return OH_NN_SUCCESS; // 假设成功的状态码
1027 }));
1028
1029 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1030 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1031
1032 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1033 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1034 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1035 int32_t expectDim[2] = {3, 3};
1036 int32_t* ptr = expectDim;
1037 uint32_t dimensionCount = 2;
1038 tensorDesr->SetShape(ptr, dimensionCount);
1039 pair1.first = tensorDesr;
1040 pair2.first = tensorDesr;
1041 m_inputTensorDescs.emplace_back(pair1);
1042 m_inputTensorDescs.emplace_back(pair2);
1043 m_outputTensorDescs.emplace_back(pair1);
1044 m_outputTensorDescs.emplace_back(pair2);
1045
1046 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1047 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1048
1049 size_t backendID = 1;
1050 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1051 TensorDesc desc;
1052 TensorDesc* tensorDesc = &desc;
1053
1054 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1055 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1056
1057 size_t inputSize = 2;
1058 size_t outputSize = 2;
1059 OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
1060 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1061
1062 testing::Mock::AllowLeak(mockIPreparedMode.get());
1063 }
1064
1065 /**
1066 * @tc.name: nnexecutortest_runasync_001
1067 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1068 * @tc.type: FUNC
1069 */
1070 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_001, TestSize.Level0)
1071 {
1072 LOGE("RunAsync nnexecutortest_runasync_001");
1073 size_t m_backendID {0};
1074 std::shared_ptr<Device> m_device {nullptr};
1075 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1076 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1077 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1078 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1079 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1080
1081 void* buffer = m_dataArry;
1082 size_t inputSize = 1;
1083 size_t outputSize = 1;
1084 int32_t timeout = 10;
1085 OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer);
1086 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1087 }
1088
1089 /**
1090 * @tc.name: nnexecutortest_runasync_002
1091 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1092 * @tc.type: FUNC
1093 */
1094 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_002, TestSize.Level0)
1095 {
1096 LOGE("RunAsync nnexecutortest_runasync_001");
1097 size_t m_backendID {0};
1098 std::shared_ptr<Device> m_device {nullptr};
1099 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1100 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1101 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1102 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1103 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1104
1105 void* buffer = m_dataArry;
1106 size_t inputSize = 0;
1107 size_t outputSize = 1;
1108 int32_t timeout = 10;
1109 OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer);
1110 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1111 }
1112
1113 /**
1114 * @tc.name: nnexecutortest_runasync_003
1115 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1116 * @tc.type: FUNC
1117 */
1118 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_003, TestSize.Level0)
1119 {
1120 LOGE("RunAsync nnexecutortest_runasync_003");
1121 size_t m_backendID {0};
1122 std::shared_ptr<Device> m_device {nullptr};
1123
1124 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1125
1126 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
1127 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1128 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1129 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anonf68af7900702(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1130 std::vector<std::vector<uint32_t>>& maxInputDims) {
1131 // 这里直接修改传入的引用参数
1132 minInputDims = minDims;
1133 maxInputDims = maxDims;
1134 return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码
1135 }));
1136
1137 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1138 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1139
1140 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1141 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1142 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1143 int32_t expectDim[2] = {3, 3};
1144 int32_t* ptr = expectDim;
1145 uint32_t dimensionCount = 2;
1146 tensorDesr->SetShape(ptr, dimensionCount);
1147 pair1.first = tensorDesr;
1148 pair2.first = tensorDesr;
1149 m_inputTensorDescs.emplace_back(pair1);
1150 m_inputTensorDescs.emplace_back(pair2);
1151 m_outputTensorDescs.emplace_back(pair1);
1152 m_outputTensorDescs.emplace_back(pair2);
1153 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1154 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1155
1156 size_t backendID = 1;
1157 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1158 TensorDesc desc;
1159 TensorDesc* tensorDesc = &desc;
1160
1161 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1162 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1163
1164 void* buffer = m_dataArry;
1165 size_t inputSize = 2;
1166 size_t outputSize = 2;
1167 int32_t timeout = 10;
1168 OH_NN_ReturnCode ret = nnExecutor->RunAsync(&tensor, inputSize, &tensor, outputSize, timeout, buffer);
1169 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1170
1171 testing::Mock::AllowLeak(mockIPreparedMode.get());
1172 }
1173
1174 /**
1175 * @tc.name: nnexecutortest_getbackendid_001
1176 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1177 * @tc.type: FUNC
1178 */
1179 HWTEST_F(NNExecutorTest, nnexecutortest_getbackendid_001, TestSize.Level0)
1180 {
1181 LOGE("GetBackendID nnexecutortest_getbackendid_001");
1182 size_t m_backendID {0};
1183 std::shared_ptr<Device> m_device {nullptr};
1184 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1185 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1186 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1187 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1188 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1189
1190 size_t ret = nnExecutor->GetBackendID();
1191 EXPECT_EQ(0, ret);
1192 }
1193
1194 /**
1195 * @tc.name: nnexecutortest_setinput_001
1196 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1197 * @tc.type: FUNC
1198 */
1199 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_001, TestSize.Level0)
1200 {
1201 LOGE("SetInput nnexecutortest_setinput_001");
1202 size_t m_backendID {0};
1203 std::shared_ptr<Device> m_device {nullptr};
1204 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1205 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1206 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1207 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1208 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1209 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1210 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1211
1212 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1213 void* buffer = m_dataArry;
1214 size_t length = 9 * sizeof(float);
1215
1216 OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1217 EXPECT_EQ(OH_NN_FAILED, ret);
1218
1219 testing::Mock::AllowLeak(mockIPreparedMode.get());
1220 }
1221
1222 /**
1223 * @tc.name: nnexecutortest_setinput_002
1224 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1225 * @tc.type: FUNC
1226 */
1227 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_002, TestSize.Level0)
1228 {
1229 LOGE("SetInput nnexecutortest_setinput_002");
1230 size_t m_backendID {0};
1231 std::shared_ptr<Device> m_device {nullptr};
1232 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1233 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1234 .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN));
1235 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1236 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1237
1238 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1239 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1240 m_inputTensorDescs.emplace_back(pair1);
1241 m_inputTensorDescs.emplace_back(pair2);
1242
1243 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1244 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1245
1246 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1247 void* buffer = m_dataArry;
1248 size_t length = 9 * sizeof(float);
1249
1250 OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1251 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1252
1253 testing::Mock::AllowLeak(mockIPreparedMode.get());
1254 }
1255
1256 /**
1257 * @tc.name: nnexecutortest_setinput_003
1258 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1259 * @tc.type: FUNC
1260 */
1261 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_003, TestSize.Level0)
1262 {
1263 LOGE("SetInput nnexecutortest_setinput_003");
1264 size_t m_backendID {0};
1265 std::shared_ptr<Device> m_device {nullptr};
1266 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1267 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1268 .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN));
1269 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1270 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1271
1272 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1273 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1274 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1275 int32_t expectDim[2] = {3, 3};
1276 int32_t* ptr = expectDim;
1277 uint32_t dimensionCount = 2;
1278 tensorDesr->SetShape(ptr, dimensionCount);
1279 pair1.first = tensorDesr;
1280 pair2.first = tensorDesr;
1281 m_inputTensorDescs.emplace_back(pair1);
1282 m_inputTensorDescs.emplace_back(pair2);
1283
1284 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1285 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1286
1287 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1288 void* buffer = m_dataArry;
1289 size_t length = 9 * sizeof(float);
1290
1291 OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1292 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1293
1294 testing::Mock::AllowLeak(mockIPreparedMode.get());
1295 }
1296
1297 /**
1298 * @tc.name: nnexecutortest_setinputfrommemory_001
1299 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1300 * @tc.type: FUNC
1301 */
1302 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_001, TestSize.Level0)
1303 {
1304 LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_001");
1305 size_t m_backendID {0};
1306 std::shared_ptr<Device> m_device {nullptr};
1307 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1308 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1309 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1310 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1311 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1312 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1313 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1314
1315 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1316 void* const data = m_dataArry;
1317 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1318
1319 OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1320 EXPECT_EQ(OH_NN_FAILED, ret);
1321
1322 testing::Mock::AllowLeak(mockIPreparedMode.get());
1323 }
1324
1325 /**
1326 * @tc.name: nnexecutortest_setinputfrommemory_002
1327 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1328 * @tc.type: FUNC
1329 */
1330 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_002, TestSize.Level0)
1331 {
1332 LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_002");
1333 size_t m_backendID {0};
1334 std::shared_ptr<Device> m_device {nullptr};
1335 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1336 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1337 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1338 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1339 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1340
1341 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1342 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1343 m_inputTensorDescs.emplace_back(pair1);
1344 m_inputTensorDescs.emplace_back(pair2);
1345 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1346 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1347
1348 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1349 void* const data = m_dataArry;
1350 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1351
1352 OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1353 EXPECT_EQ(OH_NN_FAILED, ret);
1354
1355 testing::Mock::AllowLeak(mockIPreparedMode.get());
1356 }
1357
1358 /**
1359 * @tc.name: nnexecutortest_setinputfrommemory_003
1360 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1361 * @tc.type: FUNC
1362 */
1363 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_003, TestSize.Level0)
1364 {
1365 LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_003");
1366 size_t m_backendID {0};
1367 std::shared_ptr<Device> m_device {nullptr};
1368 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1369 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1370 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1371 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1372 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1373
1374 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1375 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1376 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1377 int32_t expectDim[2] = {3, 3};
1378 int32_t* ptr = expectDim;
1379 uint32_t dimensionCount = 2;
1380 tensorDesr->SetShape(ptr, dimensionCount);
1381 pair1.first = tensorDesr;
1382 pair2.first = tensorDesr;
1383 m_inputTensorDescs.emplace_back(pair1);
1384 m_inputTensorDescs.emplace_back(pair2);
1385 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1386 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1387
1388 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1389 void* const data = m_dataArry;
1390 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1391
1392 OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1393 EXPECT_EQ(OH_NN_FAILED, ret);
1394
1395 testing::Mock::AllowLeak(mockIPreparedMode.get());
1396 }
1397
1398 /**
1399 * @tc.name: nnexecutortest_setoutput_001
1400 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1401 * @tc.type: FUNC
1402 */
1403 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_001, TestSize.Level0)
1404 {
1405 LOGE("SetOutput nnexecutortest_setoutput_001");
1406 size_t m_backendID {0};
1407 std::shared_ptr<Device> m_device {nullptr};
1408 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1409 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1410 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1411 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1412 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1413
1414 size_t length = 9 * sizeof(float);
1415 void* buffer = m_dataArry;
1416
1417 OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1418 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1419 }
1420
1421 /**
1422 * @tc.name: nnexecutortest_setoutput_002
1423 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1424 * @tc.type: FUNC
1425 */
1426 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_002, TestSize.Level0)
1427 {
1428 LOGE("SetOutput nnexecutortest_setoutput_002");
1429 size_t m_backendID {0};
1430 std::shared_ptr<Device> m_device {nullptr};
1431 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1432 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1433 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1434
1435 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1436 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1437 m_outputTensorDescs.emplace_back(pair1);
1438 m_outputTensorDescs.emplace_back(pair2);
1439 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1440 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1441
1442 size_t length = 9 * sizeof(float);
1443 void* buffer = m_dataArry;
1444
1445 OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1446 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1447 }
1448
1449 /**
1450 * @tc.name: nnexecutortest_setoutput_003
1451 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1452 * @tc.type: FUNC
1453 */
1454 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_003, TestSize.Level0)
1455 {
1456 LOGE("SetOutput nnexecutortest_setoutput_003");
1457 size_t m_backendID {0};
1458 std::shared_ptr<Device> m_device {nullptr};
1459 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1460 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1461 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1462
1463 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1464 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1465 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1466 int32_t expectDim[2] = {3, 3};
1467 int32_t* ptr = expectDim;
1468 uint32_t dimensionCount = 2;
1469 tensorDesr->SetShape(ptr, dimensionCount);
1470 pair1.first = tensorDesr;
1471 pair2.first = tensorDesr;
1472 m_outputTensorDescs.emplace_back(pair1);
1473 m_outputTensorDescs.emplace_back(pair2);
1474 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1475 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1476
1477 size_t length = 9 * sizeof(float);
1478 void* buffer = m_dataArry;
1479
1480 OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1481 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1482 }
1483
1484 /**
1485 * @tc.name: nnexecutortest_setoutputfrommemory_001
1486 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1487 * @tc.type: FUNC
1488 */
1489 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_001, TestSize.Level0)
1490 {
1491 LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_001");
1492 size_t m_backendID {0};
1493 std::shared_ptr<Device> m_device {nullptr};
1494 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1495 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1496 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1497 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1498 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1499
1500 void* const data = m_dataArry;
1501 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1502
1503 OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1504 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1505 }
1506
1507 /**
1508 * @tc.name: nnexecutortest_setoutputfrommemory_002
1509 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1510 * @tc.type: FUNC
1511 */
1512 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_002, TestSize.Level0)
1513 {
1514 LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_002");
1515 size_t m_backendID {0};
1516 std::shared_ptr<Device> m_device {nullptr};
1517 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1518 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1519 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1520
1521 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1522 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1523 m_outputTensorDescs.emplace_back(pair1);
1524 m_outputTensorDescs.emplace_back(pair2);
1525 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1526 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1527
1528 void* const data = m_dataArry;
1529 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1530
1531 OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1532 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1533 }
1534
1535 /**
1536 * @tc.name: nnexecutortest_setoutputfrommemory_003
1537 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1538 * @tc.type: FUNC
1539 */
1540 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_003, TestSize.Level0)
1541 {
1542 LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_003");
1543 size_t m_backendID {0};
1544 std::shared_ptr<Device> m_device {nullptr};
1545 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1546 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1547 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1548
1549 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1550 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1551 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1552 int32_t expectDim[2] = {3, 3};
1553 int32_t* ptr = expectDim;
1554 uint32_t dimensionCount = 2;
1555 tensorDesr->SetShape(ptr, dimensionCount);
1556 pair1.first = tensorDesr;
1557 pair2.first = tensorDesr;
1558 m_outputTensorDescs.emplace_back(pair1);
1559 m_outputTensorDescs.emplace_back(pair2);
1560 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1561 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1562
1563 void* const data = m_dataArry;
1564 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1565
1566 OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1567 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1568 }
1569
1570 /**
1571 * @tc.name: nnexecutortest_createinputmemory_001
1572 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1573 * @tc.type: FUNC
1574 */
1575 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_001, TestSize.Level0)
1576 {
1577 LOGE("CreateInputMemory nnexecutortest_createinputmemory_001");
1578 size_t m_backendID {0};
1579 std::shared_ptr<Device> m_device {nullptr};
1580 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1581 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1582 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1583 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1584 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1585
1586 OH_NN_Memory** memory = nullptr;
1587 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1588 void* const data = dataArry;
1589 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1590 OH_NN_Memory* ptr = &memoryPtr;
1591 memory = &ptr;
1592 size_t length = 9 * sizeof(float);
1593
1594 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1595 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1596 }
1597
1598 /**
1599 * @tc.name: nnexecutortest_createinputmemory_002
1600 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1601 * @tc.type: FUNC
1602 */
1603 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_002, TestSize.Level0)
1604 {
1605 LOGE("CreateInputMemory nnexecutortest_createinputmemory_002");
1606 size_t m_backendID {0};
1607 std::shared_ptr<Device> m_device {nullptr};
1608 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1609 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1610 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1611
1612 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1613 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1614 m_inputTensorDescs.emplace_back(pair1);
1615 m_inputTensorDescs.emplace_back(pair2);
1616 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1617 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1618
1619 OH_NN_Memory** memory = nullptr;
1620 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1621 void* const data = dataArry;
1622 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1623 OH_NN_Memory* ptr = &memoryPtr;
1624 memory = &ptr;
1625 size_t length = 9 * sizeof(float);
1626
1627 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1628 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1629 }
1630
1631 /**
1632 * @tc.name: nnexecutortest_createinputmemory_003
1633 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1634 * @tc.type: FUNC
1635 */
1636 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_003, TestSize.Level0)
1637 {
1638 LOGE("CreateInputMemory nnexecutortest_createinputmemory_003");
1639 size_t m_backendID {0};
1640 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1641
1642 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1643 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1644 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1645
1646 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1647 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1648 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1649 int32_t expectDim[2] = {3, 3};
1650 int32_t* ptr = expectDim;
1651 uint32_t dimensionCount = 2;
1652 tensorDesr->SetShape(ptr, dimensionCount);
1653 pair1.first = tensorDesr;
1654 pair2.first = tensorDesr;
1655 m_inputTensorDescs.emplace_back(pair1);
1656 m_inputTensorDescs.emplace_back(pair2);
1657
1658 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1659 size_t length = 9 * sizeof(float);
1660 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1661 .WillRepeatedly(::testing::Return(nullptr));
1662
1663 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1664 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1665
1666 OH_NN_Memory** memory = nullptr;
1667 void* const data = dataArry;
1668 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1669 OH_NN_Memory* mPtr = &memoryPtr;
1670 memory = &mPtr;
1671
1672 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1673 EXPECT_EQ(OH_NN_MEMORY_ERROR, ret);
1674
1675 testing::Mock::AllowLeak(device.get());
1676 }
1677
1678 /**
1679 * @tc.name: nnexecutortest_createinputmemory_004
1680 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1681 * @tc.type: FUNC
1682 */
1683 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_004, TestSize.Level0)
1684 {
1685 LOGE("CreateInputMemory nnexecutortest_createinputmemory_004");
1686 size_t m_backendID {0};
1687 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1688
1689 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1690 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1691 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1692
1693 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1694 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1695 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1696 int32_t expectDim[2] = {3, 3};
1697 int32_t* ptr = expectDim;
1698 uint32_t dimensionCount = 2;
1699 tensorDesr->SetShape(ptr, dimensionCount);
1700 pair1.first = tensorDesr;
1701 pair2.first = tensorDesr;
1702 m_inputTensorDescs.emplace_back(pair1);
1703 m_inputTensorDescs.emplace_back(pair2);
1704
1705 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1706 size_t length = 9 * sizeof(float);
1707 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1708 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
1709
1710 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1711 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1712
1713 OH_NN_Memory** memory = nullptr;
1714 void* const data = dataArry;
1715 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1716 OH_NN_Memory* mPtr = &memoryPtr;
1717 memory = &mPtr;
1718
1719 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1720 EXPECT_EQ(OH_NN_SUCCESS, ret);
1721
1722 testing::Mock::AllowLeak(device.get());
1723 }
1724
1725 /**
1726 * @tc.name: nnexecutortest_destroyinputmemory_001
1727 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1728 * @tc.type: FUNC
1729 */
1730 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_001, TestSize.Level0)
1731 {
1732 LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_001");
1733 size_t m_backendID {0};
1734 std::shared_ptr<Device> m_device {nullptr};
1735 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1736 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1737 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1738 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1739 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1740
1741 size_t length = 9 * sizeof(float);
1742 OH_NN_Memory** memory = nullptr;
1743 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1744 void* const data = dataArry;
1745 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1746 OH_NN_Memory* ptr = &memoryPtr;
1747 memory = &ptr;
1748
1749 nnExecutor->CreateInputMemory(m_index, length, memory);
1750 OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
1751 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1752 }
1753
1754 /**
1755 * @tc.name: nnexecutortest_destroyinputmemory_002
1756 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1757 * @tc.type: FUNC
1758 */
1759 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_002, TestSize.Level0)
1760 {
1761 LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_002");
1762 size_t m_backendID {0};
1763 std::shared_ptr<Device> m_device {nullptr};
1764 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1765 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1766 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1767
1768 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1769 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1770 m_inputTensorDescs.emplace_back(pair1);
1771 m_inputTensorDescs.emplace_back(pair2);
1772 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1773 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1774
1775 size_t length = 9 * sizeof(float);
1776 OH_NN_Memory** memory = nullptr;
1777 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1778 void* const data = dataArry;
1779 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1780 OH_NN_Memory* ptr = &memoryPtr;
1781 memory = &ptr;
1782
1783 nnExecutor->CreateInputMemory(m_index, length, memory);
1784 OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
1785 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1786 }
1787
1788 /**
1789 * @tc.name: nnexecutortest_destroyinputmemory_003
1790 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1791 * @tc.type: FUNC
1792 */
1793 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_003, TestSize.Level0)
1794 {
1795 LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_003");
1796 size_t m_backendID {0};
1797 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1798 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1799 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1800 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1801
1802 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1803 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1804 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1805 int32_t expectDim[2] = {3, 3};
1806 int32_t* ptr = expectDim;
1807 uint32_t dimensionCount = 2;
1808 tensorDesr->SetShape(ptr, dimensionCount);
1809 pair1.first = tensorDesr;
1810 pair2.first = tensorDesr;
1811 m_inputTensorDescs.emplace_back(pair1);
1812 m_inputTensorDescs.emplace_back(pair2);
1813
1814 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1815 size_t length = 9 * sizeof(float);
1816 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1817 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
1818 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1819 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1820
1821 OH_NN_Memory** memory = nullptr;
1822 void* const data = dataArry;
1823 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1824 OH_NN_Memory* mPtr = &memoryPtr;
1825 memory = &mPtr;
1826
1827 nnExecutor->CreateInputMemory(m_index, length, memory);
1828 OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
1829 EXPECT_EQ(OH_NN_SUCCESS, ret);
1830
1831 testing::Mock::AllowLeak(device.get());
1832 }
1833
1834 /**
1835 * @tc.name: nnexecutortest_createoutputmemory_001
1836 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1837 * @tc.type: FUNC
1838 */
1839 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_001, TestSize.Level0)
1840 {
1841 LOGE("CreateOutputMemory nnexecutortest_createoutputmemory_001");
1842 size_t m_backendID {0};
1843 std::shared_ptr<Device> m_device {nullptr};
1844 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1845 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1846 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1847 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1848 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1849
1850 size_t length = 9 * sizeof(float);
1851 OH_NN_Memory** memory = nullptr;
1852 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1853 void* const data = dataArry;
1854 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1855 OH_NN_Memory* ptr = &memoryPtr;
1856 memory = &ptr;
1857
1858 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1859 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1860 }
1861
1862 /**
1863 * @tc.name: nnexecutortest_createoutputmemory_002
1864 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1865 * @tc.type: FUNC
1866 */
1867 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_002, TestSize.Level0)
1868 {
1869 LOGE("CreateInputMemory nnexecutortest_createoutputmemory_002");
1870 size_t m_backendID {0};
1871 std::shared_ptr<Device> m_device {nullptr};
1872 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1873 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1874 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1875
1876 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1877 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1878 m_outputTensorDescs.emplace_back(pair1);
1879 m_outputTensorDescs.emplace_back(pair2);
1880 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1881 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1882
1883 OH_NN_Memory** memory = nullptr;
1884 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1885 void* const data = dataArry;
1886 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1887 OH_NN_Memory* ptr = &memoryPtr;
1888 memory = &ptr;
1889 size_t length = 9 * sizeof(float);
1890
1891 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1892 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1893 }
1894
1895 /**
1896 * @tc.name: nnexecutortest_createoutputmemory_003
1897 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1898 * @tc.type: FUNC
1899 */
1900 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_003, TestSize.Level0)
1901 {
1902 LOGE("CreateInputMemory nnexecutortest_createoutputmemory_003");
1903 size_t m_backendID {0};
1904 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1905
1906 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1907 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1908 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1909
1910 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1911 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1912 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1913 int32_t expectDim[2] = {3, 3};
1914 int32_t* ptr = expectDim;
1915 uint32_t dimensionCount = 2;
1916 tensorDesr->SetShape(ptr, dimensionCount);
1917 pair1.first = tensorDesr;
1918 pair2.first = tensorDesr;
1919 m_outputTensorDescs.emplace_back(pair1);
1920 m_outputTensorDescs.emplace_back(pair2);
1921
1922 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1923 size_t length = 9 * sizeof(float);
1924 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
1925 .WillRepeatedly(::testing::Return(nullptr));
1926
1927 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1928 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1929
1930 OH_NN_Memory** memory = nullptr;
1931 void* const data = dataArry;
1932 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1933 OH_NN_Memory* mPtr = &memoryPtr;
1934 memory = &mPtr;
1935
1936 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1937 EXPECT_EQ(OH_NN_MEMORY_ERROR, ret);
1938
1939 testing::Mock::AllowLeak(device.get());
1940 }
1941
1942 /**
1943 * @tc.name: nnexecutortest_createoutputmemory_004
1944 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1945 * @tc.type: FUNC
1946 */
1947 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_004, TestSize.Level0)
1948 {
1949 LOGE("CreateInputMemory nnexecutortest_createoutputmemory_004");
1950 size_t m_backendID {0};
1951 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1952
1953 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1954 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1955 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1956
1957 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1958 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1959 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1960 int32_t expectDim[2] = {3, 3};
1961 int32_t* ptr = expectDim;
1962 uint32_t dimensionCount = 2;
1963 tensorDesr->SetShape(ptr, dimensionCount);
1964 pair1.first = tensorDesr;
1965 pair2.first = tensorDesr;
1966 m_outputTensorDescs.emplace_back(pair1);
1967 m_outputTensorDescs.emplace_back(pair2);
1968
1969 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1970 size_t length = 9 * sizeof(float);
1971 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
1972 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
1973
1974 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1975 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1976
1977 OH_NN_Memory** memory = nullptr;
1978 void* const data = dataArry;
1979 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1980 OH_NN_Memory* mPtr = &memoryPtr;
1981 memory = &mPtr;
1982
1983 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1984 EXPECT_EQ(OH_NN_SUCCESS, ret);
1985
1986 testing::Mock::AllowLeak(device.get());
1987 }
1988
1989 /**
1990 * @tc.name: nnexecutortest_destroyoutputmemory_001
1991 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1992 * @tc.type: FUNC
1993 */
1994 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_001, TestSize.Level0)
1995 {
1996 LOGE("DestroyOutputMemory nnexecutortest_destroyoutputmemory_001");
1997 size_t m_backendID {0};
1998 std::shared_ptr<Device> m_device {nullptr};
1999 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2000 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2001 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2002 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2003 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
2004
2005 size_t length = 9 * sizeof(float);
2006 OH_NN_Memory** memory = nullptr;
2007 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2008 void* const data = dataArry;
2009 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2010 OH_NN_Memory* ptr = &memoryPtr;
2011 memory = &ptr;
2012
2013 nnExecutor->CreateOutputMemory(m_index, length, memory);
2014 OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2015 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2016 }
2017
2018 /**
2019 * @tc.name: nnexecutortest_destroyoutputmemory_002
2020 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2021 * @tc.type: FUNC
2022 */
2023 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_002, TestSize.Level0)
2024 {
2025 LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_002");
2026 size_t m_backendID {0};
2027 std::shared_ptr<Device> m_device {nullptr};
2028 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2029 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2030 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2031
2032 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2033 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2034 m_outputTensorDescs.emplace_back(pair1);
2035 m_outputTensorDescs.emplace_back(pair2);
2036 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2037 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
2038
2039 size_t length = 9 * sizeof(float);
2040 OH_NN_Memory** memory = nullptr;
2041 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2042 void* const data = dataArry;
2043 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2044 OH_NN_Memory* ptr = &memoryPtr;
2045 memory = &ptr;
2046
2047 nnExecutor->CreateOutputMemory(m_index, length, memory);
2048 OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2049 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2050 }
2051
2052 /**
2053 * @tc.name: nnexecutortest_destroyoutputmemory_003
2054 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2055 * @tc.type: FUNC
2056 */
2057 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_003, TestSize.Level0)
2058 {
2059 LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_003");
2060 size_t m_backendID {0};
2061 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
2062 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2063 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2064 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2065
2066 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2067 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2068 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
2069 int32_t expectDim[2] = {3, 3};
2070 int32_t* ptr = expectDim;
2071 uint32_t dimensionCount = 2;
2072 tensorDesr->SetShape(ptr, dimensionCount);
2073 pair1.first = tensorDesr;
2074 pair2.first = tensorDesr;
2075 m_outputTensorDescs.emplace_back(pair1);
2076 m_outputTensorDescs.emplace_back(pair2);
2077
2078 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2079 size_t length = 9 * sizeof(float);
2080 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
2081 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
2082 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2083 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
2084
2085 OH_NN_Memory** memory = nullptr;
2086 void* const data = dataArry;
2087 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2088 OH_NN_Memory* mPtr = &memoryPtr;
2089 memory = &mPtr;
2090
2091 nnExecutor->CreateOutputMemory(m_index, length, memory);
2092 OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2093 EXPECT_EQ(OH_NN_SUCCESS, ret);
2094
2095 testing::Mock::AllowLeak(device.get());
2096 }
2097
2098 /**
2099 * @tc.name: nnexecutortest_run_001
2100 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2101 * @tc.type: FUNC
2102 */
2103 HWTEST_F(NNExecutorTest, nnexecutortest_run_001, TestSize.Level0)
2104 {
2105 LOGE("Run nnexecutortest_run_001");
2106 size_t m_backendID {0};
2107 std::shared_ptr<Device> m_device {nullptr};
2108 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2109 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2110 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2111 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2112 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2113 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2114 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2115
2116 size_t length = 9 * sizeof(float);
2117 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2118 void* buffer = m_dataArry;
2119
2120 nnExecutor->SetInput(m_index, tensor, buffer, length);
2121 nnExecutor->SetOutput(m_index, buffer, length);
2122 OH_NN_ReturnCode ret = nnExecutor->Run();
2123 EXPECT_EQ(OH_NN_SUCCESS, ret);
2124
2125 testing::Mock::AllowLeak(mockIPreparedMode.get());
2126 }
2127
2128 /**
2129 * @tc.name: nnexecutortest_run_002
2130 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2131 * @tc.type: FUNC
2132 */
2133 HWTEST_F(NNExecutorTest, nnexecutortest_run_002, TestSize.Level0)
2134 {
2135 LOGE("Run nnexecutortest_run_002");
2136 size_t m_backendID {0};
2137 std::shared_ptr<Device> m_device {nullptr};
2138 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2139 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2140 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2141 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2142 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2143
2144 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2145 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2146 m_inputTensorDescs.emplace_back(pair1);
2147 m_inputTensorDescs.emplace_back(pair2);
2148 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2149 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2150
2151 size_t length = 9 * sizeof(float);
2152 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2153 void* buffer = m_dataArry;
2154
2155 nnExecutor->SetInput(m_index, tensor, buffer, length);
2156 nnExecutor->SetOutput(m_index, buffer, length);
2157 OH_NN_ReturnCode ret = nnExecutor->Run();
2158 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2159
2160 testing::Mock::AllowLeak(mockIPreparedMode.get());
2161 }
2162
2163 /**
2164 * @tc.name: nnexecutortest_run_003
2165 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2166 * @tc.type: FUNC
2167 */
2168 HWTEST_F(NNExecutorTest, nnexecutortest_run_003, TestSize.Level0)
2169 {
2170 LOGE("Run nnexecutortest_run_003");
2171 size_t m_backendID {0};
2172 std::shared_ptr<Device> m_device {nullptr};
2173 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2174 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2175 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2176 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2177 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2178
2179 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2180 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2181 m_inputTensorDescs.emplace_back(pair1);
2182 m_inputTensorDescs.emplace_back(pair2);
2183 m_outputTensorDescs.emplace_back(pair1);
2184 m_outputTensorDescs.emplace_back(pair2);
2185 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2186 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2187
2188 size_t length = 9 * sizeof(float);
2189 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2190 void* buffer = m_dataArry;
2191
2192 nnExecutor->SetInput(m_index, tensor, buffer, length);
2193 nnExecutor->SetOutput(m_index, buffer, length);
2194 OH_NN_ReturnCode ret = nnExecutor->Run();
2195 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2196
2197 testing::Mock::AllowLeak(mockIPreparedMode.get());
2198 }
2199
2200 /**
2201 * @tc.name: nnexecutortest_setextensionconfig_001
2202 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2203 * @tc.type: FUNC
2204 */
2205 HWTEST_F(NNExecutorTest, nnexecutortest_setextensionconfig_001, TestSize.Level0)
2206 {
2207 LOGE("SetExtensionConfig nnexecutortest_setextensionconfig_001");
2208 size_t m_backendID {0};
2209 std::shared_ptr<Device> m_device {nullptr};
2210 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2211 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2212 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2213 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2214 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2215
2216 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2217 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2218 m_inputTensorDescs.emplace_back(pair1);
2219 m_inputTensorDescs.emplace_back(pair2);
2220 m_outputTensorDescs.emplace_back(pair1);
2221 m_outputTensorDescs.emplace_back(pair2);
2222 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2223 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2224
2225 std::unordered_map<std::string, std::vector<char>> configMap;
2226 std::string callingPidStr = "callingPid";
2227 std::vector<char> vecCallingPid(callingPidStr.begin(), callingPidStr.end());
2228 configMap["callingPid"] = vecCallingPid;
2229
2230 std::string hiaiModelIdStr = "hiaiModelId";
2231 std::vector<char> vechiaiModelId(hiaiModelIdStr.begin(), hiaiModelIdStr.end());
2232 configMap["hiaiModelId"] = vechiaiModelId;
2233
2234 std::string vecNeedLatencyStr = "isNeedModelLatency";
2235 std::vector<char> vecNeedLatency(vecNeedLatencyStr.begin(), vecNeedLatencyStr.end());
2236 configMap["isNeedModelLatency"] = vecNeedLatency;
2237 OH_NN_ReturnCode retSetExtensionConfig = nnExecutor->SetExtensionConfig(configMap);
2238 EXPECT_EQ(OH_NN_SUCCESS, retSetExtensionConfig);
2239
2240 ExecutorConfig* retGetExecutorConfig = nnExecutor->GetExecutorConfig();
2241 EXPECT_NE(nullptr, retGetExecutorConfig);
2242
2243 testing::Mock::AllowLeak(mockIPreparedMode.get());
2244 }
2245 } // namespace UnitTest
2246 } // namespace NeuralNetworkRuntime
2247 } // namespace OHOS