• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <gtest/gtest.h>
17 #include <gmock/gmock.h>
18 
19 #include "nnexecutor.h"
20 #include "nncompiler.h"
21 #include "nnbackend.h"
22 #include "device.h"
23 #include "prepared_model.h"
24 #include "neural_network_runtime/neural_network_runtime_type.h"
25 #include "utils.h"
26 #include "log.h"
27 
28 using namespace testing;
29 using namespace testing::ext;
30 using namespace OHOS::NeuralNetworkRuntime;
31 
32 namespace OHOS {
33 namespace NeuralNetworkRuntime {
34 namespace UnitTest {
35 class NNExecutorTest : public testing::Test {
36 public:
37     NNExecutorTest() = default;
38     ~NNExecutorTest() = default;
39 
40 public:
41     uint32_t m_index {0};
42     const std::vector<int32_t> m_dim {3, 3};
43     const std::vector<int32_t> m_dimOut {3, 3};
44     const int32_t m_dimArry[2] {3, 3};
45     uint32_t m_dimensionCount {2};
46     float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8};
47 };
48 
49 class MockIDevice : public Device {
50 public:
51     MOCK_METHOD1(GetDeviceName, OH_NN_ReturnCode(std::string&));
52     MOCK_METHOD1(GetVendorName, OH_NN_ReturnCode(std::string&));
53     MOCK_METHOD1(GetVersion, OH_NN_ReturnCode(std::string&));
54     MOCK_METHOD1(GetDeviceType, OH_NN_ReturnCode(OH_NN_DeviceType&));
55     MOCK_METHOD1(GetDeviceStatus, OH_NN_ReturnCode(DeviceStatus&));
56     MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
57         std::vector<bool>&));
58     MOCK_METHOD1(IsFloat16PrecisionSupported, OH_NN_ReturnCode(bool&));
59     MOCK_METHOD1(IsPerformanceModeSupported, OH_NN_ReturnCode(bool&));
60     MOCK_METHOD1(IsPrioritySupported, OH_NN_ReturnCode(bool&));
61     MOCK_METHOD1(IsDynamicInputSupported, OH_NN_ReturnCode(bool&));
62     MOCK_METHOD1(IsModelCacheSupported, OH_NN_ReturnCode(bool&));
63     MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
64                                           const ModelConfig&,
65                                           std::shared_ptr<PreparedModel>&));
66     MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(const void*,
67                                           const ModelConfig&,
68                                           std::shared_ptr<PreparedModel>&));
69     MOCK_METHOD4(PrepareModelFromModelCache, OH_NN_ReturnCode(const std::vector<Buffer>&,
70                                                               const ModelConfig&,
71                                                               std::shared_ptr<PreparedModel>&,
72                                                               bool&));
73     MOCK_METHOD3(PrepareOfflineModel, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
74                                                  const ModelConfig&,
75                                                  std::shared_ptr<PreparedModel>&));
76     MOCK_METHOD1(AllocateBuffer, void*(size_t));
77     MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr<TensorDesc>));
78     MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr<NNTensor>));
79     MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*));
80     MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&));
81     MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t));
82     MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&));
83 };
84 
85 class MockIPreparedModel : public PreparedModel {
86 public:
87     MOCK_METHOD1(ExportModelCache, OH_NN_ReturnCode(std::vector<Buffer>&));
88     MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector<IOTensor>&,
89                                  const std::vector<IOTensor>&,
90                                  std::vector<std::vector<int32_t>>&,
91                                  std::vector<bool>&));
92     MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector<NN_Tensor*>&,
93                                  const std::vector<NN_Tensor*>&,
94                                  std::vector<std::vector<int32_t>>&,
95                                  std::vector<bool>&));
96     MOCK_CONST_METHOD1(GetModelID, OH_NN_ReturnCode(uint32_t&));
97     MOCK_METHOD2(GetInputDimRanges, OH_NN_ReturnCode(std::vector<std::vector<uint32_t>>&,
98                                                std::vector<std::vector<uint32_t>>&));
99     MOCK_METHOD0(ReleaseBuiltModel, OH_NN_ReturnCode());
100 };
101 
102 class MockTensorDesc : public TensorDesc {
103 public:
104     MOCK_METHOD1(GetDataType, OH_NN_ReturnCode(OH_NN_DataType*));
105     MOCK_METHOD1(SetDataType, OH_NN_ReturnCode(OH_NN_DataType));
106     MOCK_METHOD1(GetFormat, OH_NN_ReturnCode(OH_NN_Format*));
107     MOCK_METHOD1(SetFormat, OH_NN_ReturnCode(OH_NN_Format));
108     MOCK_METHOD2(GetShape, OH_NN_ReturnCode(int32_t**, size_t*));
109     MOCK_METHOD2(SetShape, OH_NN_ReturnCode(const int32_t*, size_t));
110     MOCK_METHOD1(GetElementNum, OH_NN_ReturnCode(size_t*));
111     MOCK_METHOD1(GetByteSize, OH_NN_ReturnCode(size_t*));
112     MOCK_METHOD1(SetName, OH_NN_ReturnCode(const char*));
113     MOCK_METHOD1(GetName, OH_NN_ReturnCode(const char**));
114 };
115 
SetTensor(OH_NN_DataType dataType,uint32_t dimensionCount,const int32_t * dimensions,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)116 OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions,
117     const OH_NN_QuantParam *quantParam, OH_NN_TensorType type)
118 {
119     OH_NN_Tensor tensor;
120     tensor.dataType = dataType;
121     tensor.dimensionCount = dimensionCount;
122     tensor.dimensions = dimensions;
123     tensor.quantParam = quantParam;
124     tensor.type = type;
125 
126     return tensor;
127 }
128 
129 /**
130  * @tc.name: nnexecutortest_construct_001
131  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
132  * @tc.type: FUNC
133  */
134 HWTEST_F(NNExecutorTest, nnexecutortest_construct_001, TestSize.Level0)
135 {
136     LOGE("NNExecutor nnexecutortest_construct_001");
137     size_t m_backendID {0};
138     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
139 
140     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
141     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
142     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
143 
144     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
145     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
146     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
147     int32_t expectDim[2] = {3, 3};
148     int32_t* ptr = expectDim;
149     uint32_t dimensionCount = 2;
150     tensorDesr->SetShape(ptr, dimensionCount);
151     pair1.first = tensorDesr;
152     pair2.first = tensorDesr;
153     m_inputTensorDescs.emplace_back(pair1);
154     m_inputTensorDescs.emplace_back(pair2);
155     m_outputTensorDescs.emplace_back(pair1);
156     m_outputTensorDescs.emplace_back(pair2);
157 
158     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
159     size_t length = 9 * sizeof(float);
160     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
161         .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
162 
163     ExtensionConfig extensionConfig;
164     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
165     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
166 
167     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
168         m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
169         false, performance, priority);
170     EXPECT_NE(nullptr, nnExecutor);
171 
172     OH_NN_Memory** memory = nullptr;
173     void* const data = dataArry;
174     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
175     OH_NN_Memory* mPtr = &memoryPtr;
176     memory = &mPtr;
177 
178     OH_NN_ReturnCode retOutput = nnExecutor->CreateOutputMemory(m_index, length, memory);
179     EXPECT_EQ(OH_NN_SUCCESS, retOutput);
180     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
181         .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
182     OH_NN_ReturnCode retinput = nnExecutor->CreateInputMemory(m_index, length, memory);
183     EXPECT_EQ(OH_NN_SUCCESS, retinput);
184 
185     delete nnExecutor;
186 
187     testing::Mock::AllowLeak(device.get());
188 }
189 
190 /**
191  * @tc.name: nnexecutortest_getinputdimrange_001
192  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
193  * @tc.type: FUNC
194  */
195 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_001, TestSize.Level0)
196 {
197     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_001");
198     size_t m_backendID {0};
199     std::shared_ptr<Device> m_device {nullptr};
200 
201     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
202     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
203         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
204     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
205     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
206     ExtensionConfig extensionConfig;
207     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
208     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
209 
210     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
211         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
212         false, performance, priority);
213 
214     size_t index = 0;
215     size_t min = 1;
216     size_t max = 10;
217     size_t *minInputDims = &min;
218     size_t *maxInputDIms = &max;
219     size_t shapeLength = 0;
220     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
221     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
222 
223     testing::Mock::AllowLeak(mockIPreparedMode.get());
224 }
225 
226 /**
227  * @tc.name: nnexecutortest_getinputdimrange_002
228  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
229  * @tc.type: FUNC
230  */
231 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_002, TestSize.Level0)
232 {
233     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_002");
234     size_t m_backendID {0};
235     std::shared_ptr<Device> m_device {nullptr};
236 
237     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
238     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
239         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
240     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
241     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
242     ExtensionConfig extensionConfig;
243     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
244     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
245 
246     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
247         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
248         false, performance, priority);
249 
250     size_t index = 0;
251     size_t max = 10;
252     size_t *maxInputDIms = &max;
253     size_t shapeLength = 0;
254     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, nullptr, &maxInputDIms, &shapeLength);
255     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
256 
257     testing::Mock::AllowLeak(mockIPreparedMode.get());
258 }
259 
260 /**
261  * @tc.name: nnexecutortest_getinputdimrange_003
262  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
263  * @tc.type: FUNC
264  */
265 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_003, TestSize.Level0)
266 {
267     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_003");
268     size_t m_backendID {0};
269     std::shared_ptr<Device> m_device {nullptr};
270 
271     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
272     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
273         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
274     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
275     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
276     ExtensionConfig extensionConfig;
277     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
278     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
279 
280     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
281         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
282         false, performance, priority);
283 
284     size_t index = 0;
285     size_t min = 1;
286     size_t *minInputDims = &min;
287     size_t shapeLength = 0;
288     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, nullptr, &shapeLength);
289     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
290 
291     testing::Mock::AllowLeak(mockIPreparedMode.get());
292 }
293 
294 /**
295  * @tc.name: nnexecutortest_getinputdimrange_004
296  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
297  * @tc.type: FUNC
298  */
299 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_004, TestSize.Level0)
300 {
301     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_004");
302     size_t m_backendID {0};
303     std::shared_ptr<Device> m_device {nullptr};
304 
305     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
306     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
307         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
308     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
309     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
310     ExtensionConfig extensionConfig;
311     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
312     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
313 
314     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
315         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
316         false, performance, priority);
317 
318     size_t index = 0;
319     size_t min = 1;
320     size_t max = 10;
321     size_t *minInputDims = &min;
322     size_t *maxInputDIms = &max;
323     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, nullptr);
324     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
325 
326     testing::Mock::AllowLeak(mockIPreparedMode.get());
327 }
328 
329 /**
330  * @tc.name: nnexecutortest_getinputdimrange_005
331  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
332  * @tc.type: FUNC
333  */
334 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_005, TestSize.Level0)
335 {
336     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_005");
337     size_t m_backendID {0};
338     std::shared_ptr<Device> m_device {nullptr};
339 
340     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
341     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
342         .WillRepeatedly(::testing::Return(OH_NN_SUCCESS));
343     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
344     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
345     ExtensionConfig extensionConfig;
346     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
347     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
348 
349     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
350         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
351         false, performance, priority);
352 
353     size_t index = 0;
354     size_t min = 1;
355     size_t max = 10;
356     size_t *minInputDims = &min;
357     size_t *maxInputDIms = &max;
358     size_t shapeLength = 0;
359     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
360     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
361 
362     testing::Mock::AllowLeak(mockIPreparedMode.get());
363 }
364 
365 /**
366  * @tc.name: nnexecutortest_getinputdimrange_006
367  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
368  * @tc.type: FUNC
369  */
370 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_006, TestSize.Level0)
371 {
372     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_006");
373     size_t m_backendID {0};
374     std::shared_ptr<Device> m_device {nullptr};
375 
376     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
377 
378     std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
379     std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
380     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
381         .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anon0b5f4ff20102(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 382             std::vector<std::vector<uint32_t>>& maxInputDims) {
383                 // 这里直接修改传入的引用参数
384                 minInputDims = minDims;
385                 maxInputDims = maxDims;
386                 return OH_NN_SUCCESS; // 假设成功的状态码
387             }));
388     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
389     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
390     ExtensionConfig extensionConfig;
391     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
392     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
393 
394     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
395         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
396         false, performance, priority);
397 
398     size_t index = 0;
399     size_t min = 1;
400     size_t max = 10;
401     size_t *minInputDims = &min;
402     size_t *maxInputDIms = &max;
403     size_t shapeLength = 0;
404     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
405     EXPECT_EQ(OH_NN_SUCCESS, ret);
406 
407     testing::Mock::AllowLeak(mockIPreparedMode.get());
408 }
409 
410 /**
411  * @tc.name: nnexecutortest_getinputdimrange_007
412  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
413  * @tc.type: FUNC
414  */
415 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_007, TestSize.Level0)
416 {
417     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_007");
418     size_t m_backendID {0};
419     std::shared_ptr<Device> m_device {nullptr};
420 
421     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
422 
423     std::vector<std::vector<uint32_t>> minDims = {{1, 2}, {1, 2, 3}};
424     std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
425     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
426         .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anon0b5f4ff20202(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 427             std::vector<std::vector<uint32_t>>& maxInputDims) {
428                 // 这里直接修改传入的引用参数
429                 minInputDims = minDims;
430                 maxInputDims = maxDims;
431                 return OH_NN_SUCCESS; // 假设成功的状态码
432             }));
433     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
434     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
435     ExtensionConfig extensionConfig;
436     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
437     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
438 
439     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
440         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
441         false, performance, priority);
442 
443     size_t index = 0;
444     size_t min = 1;
445     size_t max = 10;
446     size_t *minInputDims = &min;
447     size_t *maxInputDIms = &max;
448     size_t shapeLength = 0;
449     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
450     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
451 
452     testing::Mock::AllowLeak(mockIPreparedMode.get());
453 }
454 
455 /**
456  * @tc.name: nnexecutortest_getinputdimrange_008
457  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
458  * @tc.type: FUNC
459  */
460 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_008, TestSize.Level0)
461 {
462     LOGE("GetInputDimRange nnexecutortest_getinputdimrange_008");
463     size_t m_backendID {0};
464     std::shared_ptr<Device> m_device {nullptr};
465 
466     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
467 
468     std::vector<std::vector<uint32_t>> minDims = {{1, 2}};
469     std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
470     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
471         .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anon0b5f4ff20302(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 472             std::vector<std::vector<uint32_t>>& maxInputDims) {
473                 // 这里直接修改传入的引用参数
474                 minInputDims = minDims;
475                 maxInputDims = maxDims;
476                 return OH_NN_SUCCESS; // 假设成功的状态码
477             }));
478     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
479     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
480     ExtensionConfig extensionConfig;
481     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
482     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
483 
484     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
485         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
486         false, performance, priority);
487 
488     size_t index = 0;
489     size_t min = 1;
490     size_t max = 10;
491     size_t *minInputDims = &min;
492     size_t *maxInputDIms = &max;
493     size_t shapeLength = 0;
494     OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
495     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
496 
497     testing::Mock::AllowLeak(mockIPreparedMode.get());
498 }
499 
500 /**
501  * @tc.name: nnexecutortest_getoutputshape_001
502  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
503  * @tc.type: FUNC
504  */
505 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_001, TestSize.Level0)
506 {
507     LOGE("GetOutputShape nnexecutortest_getoutputshape_001");
508     size_t m_backendID {0};
509     std::shared_ptr<Device> m_device {nullptr};
510     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
511     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
512     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
513     ExtensionConfig extensionConfig;
514     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
515     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
516 
517     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
518         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
519         false, performance, priority);
520 
521     int32_t expectDim[2] = {3, 3};
522     int32_t* ptr = expectDim;
523     int32_t** dimensions = &ptr;
524     uint32_t dimensionCount = 2;
525     uint32_t* shapeNum = &dimensionCount;
526     OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
527     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
528 }
529 
530 /**
531  * @tc.name: nnexecutortest_getoutputshape_002
532  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
533  * @tc.type: FUNC
534  */
535 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_002, TestSize.Level0)
536 {
537     LOGE("GetOutputShape nnexecutortest_getoutputshape_002");
538     size_t m_backendID {0};
539     std::shared_ptr<Device> m_device {nullptr};
540     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
541     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
542     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
543     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
544     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
545     m_outputTensorDescs.emplace_back(pair1);
546     m_outputTensorDescs.emplace_back(pair2);
547     ExtensionConfig extensionConfig;
548     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
549     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
550 
551     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
552         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
553         false, performance, priority);
554 
555     int32_t expectDim[2] = {3, 3};
556     int32_t* ptr = expectDim;
557     int32_t** dimensions = &ptr;
558     uint32_t dimensionCount = 2;
559     uint32_t* shapeNum = &dimensionCount;
560     OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
561     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
562 }
563 
564 /**
565  * @tc.name: nnexecutortest_getoutputshape_003
566  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
567  * @tc.type: FUNC
568  */
569 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_003, TestSize.Level0)
570 {
571     LOGE("GetOutputShape nnexecutortest_getoutputshape_003");
572     size_t m_backendID {0};
573     std::shared_ptr<Device> m_device {nullptr};
574     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
575     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
576     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
577     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
578     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
579     ExtensionConfig extensionConfig;
580     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
581     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
582 
583     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
584     pair1.first = tensorDesr;
585     m_outputTensorDescs.emplace_back(pair1);
586     m_outputTensorDescs.emplace_back(pair2);
587     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
588         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
589         false, performance, priority);
590 
591     int32_t expectDim[2] = {3, 3};
592     int32_t* ptr = expectDim;
593     int32_t** dimensions = &ptr;
594     uint32_t dimensionCount = 2;
595     uint32_t* shapeNum = &dimensionCount;
596     OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
597     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
598 }
599 
600 /**
601  * @tc.name: nnexecutortest_getoutputshape_004
602  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
603  * @tc.type: FUNC
604  */
605 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_004, TestSize.Level0)
606 {
607     LOGE("GetOutputShape nnexecutortest_getoutputshape_004");
608     size_t m_backendID {0};
609     std::shared_ptr<Device> m_device {nullptr};
610     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
611     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
612     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
613     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
614     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
615     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
616 
617     int32_t expectDim[2] = {3, 3};
618     int32_t* ptr = expectDim;
619     uint32_t dimensionCount = 2;
620     tensorDesr->SetShape(ptr, dimensionCount);
621     pair1.first = tensorDesr;
622     pair2.first = tensorDesr;
623     m_outputTensorDescs.emplace_back(pair1);
624     m_outputTensorDescs.emplace_back(pair2);
625     ExtensionConfig extensionConfig;
626     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
627     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
628 
629     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
630         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
631         false, performance, priority);
632 
633     int32_t expectDim2[2] = {3, 3};
634     int32_t* ptr2 = expectDim2;
635     int32_t** dimensions = &ptr2;
636     uint32_t* shapeNum = &dimensionCount;
637     *dimensions = nullptr;
638     OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
639     EXPECT_EQ(OH_NN_SUCCESS, ret);
640 }
641 
642 /**
643  * @tc.name: nnexecutortest_getinputnum_001
644  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
645  * @tc.type: FUNC
646  */
647 HWTEST_F(NNExecutorTest, nnexecutortest_getinputnum_001, TestSize.Level0)
648 {
649     LOGE("GetInputNum nnexecutortest_getinputnum_001");
650     size_t m_backendID {0};
651     std::shared_ptr<Device> m_device {nullptr};
652     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
653     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
654     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
655     ExtensionConfig extensionConfig;
656     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
657     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
658 
659     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
660         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
661         false, performance, priority);
662 
663     size_t ret = nnExecutor->GetInputNum();
664     EXPECT_EQ(0, ret);
665 }
666 
667 /**
668  * @tc.name: nnexecutortest_getoutputnum_001
669  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
670  * @tc.type: FUNC
671  */
672 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputnum_001, TestSize.Level0)
673 {
674     LOGE("GetOutputNum nnexecutortest_getoutputnum_001");
675     size_t m_backendID {0};
676     std::shared_ptr<Device> m_device {nullptr};
677     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
678     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
679     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
680     ExtensionConfig extensionConfig;
681     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
682     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
683 
684     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
685         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
686         false, performance, priority);
687 
688 
689     size_t ret = nnExecutor->GetOutputNum();
690     EXPECT_EQ(0, ret);
691 }
692 
693 /**
694  * @tc.name: nnexecutortest_createinputtensordesc_001
695  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
696  * @tc.type: FUNC
697  */
698 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_001, TestSize.Level0)
699 {
700     LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_001");
701     size_t m_backendID {0};
702     std::shared_ptr<Device> m_device {nullptr};
703     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
704     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
705     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
706     ExtensionConfig extensionConfig;
707     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
708     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
709 
710     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
711         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
712         false, performance, priority);
713 
714     size_t index = 1;
715     NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
716     EXPECT_EQ(nullptr, ret);
717 }
718 
719 /**
720  * @tc.name: nnexecutortest_createinputtensordesc_002
721  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
722  * @tc.type: FUNC
723  */
724 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_002, TestSize.Level0)
725 {
726     LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_002");
727     size_t m_backendID {0};
728     std::shared_ptr<Device> m_device {nullptr};
729     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
730     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
731     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
732 
733     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
734     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
735     m_inputTensorDescs.emplace_back(pair1);
736     m_inputTensorDescs.emplace_back(pair2);
737 
738     ExtensionConfig extensionConfig;
739     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
740     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
741 
742     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
743         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
744         false, performance, priority);
745 
746     size_t index = 1;
747     NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
748     EXPECT_EQ(nullptr, ret);
749 }
750 
751 /**
752  * @tc.name: nnexecutortest_createinputtensordesc_003
753  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
754  * @tc.type: FUNC
755  */
756 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_003, TestSize.Level0)
757 {
758     LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_003");
759     size_t m_backendID {0};
760     std::shared_ptr<Device> m_device {nullptr};
761     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
762     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
763     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
764 
765     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
766     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
767     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
768     int32_t expectDim[2] = {3, 3};
769     int32_t* ptr = expectDim;
770     uint32_t dimensionCount = 2;
771     tensorDesr->SetShape(ptr, dimensionCount);
772     pair1.first = tensorDesr;
773     pair2.first = tensorDesr;
774     m_inputTensorDescs.emplace_back(pair1);
775     m_inputTensorDescs.emplace_back(pair2);
776     ExtensionConfig extensionConfig;
777     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
778     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
779 
780     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
781         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
782         false, performance, priority);
783 
784     size_t index = 0;
785     NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
786     EXPECT_NE(nullptr, ret);
787 }
788 
789 /**
790  * @tc.name: nnexecutortest_createoutputtensordesc_001
791  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
792  * @tc.type: FUNC
793  */
794 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_001, TestSize.Level0)
795 {
796     LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_001");
797     size_t m_backendID {0};
798     std::shared_ptr<Device> m_device {nullptr};
799     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
800     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
801     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
802     ExtensionConfig extensionConfig;
803     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
804     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
805 
806     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
807         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
808         false, performance, priority);
809 
810     size_t index = 1;
811     NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
812     EXPECT_EQ(nullptr, ret);
813 }
814 
815 /**
816  * @tc.name: nnexecutortest_createoutputtensordesc_002
817  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
818  * @tc.type: FUNC
819  */
820 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_002, TestSize.Level0)
821 {
822     LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_002");
823     size_t m_backendID {0};
824     std::shared_ptr<Device> m_device {nullptr};
825     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
826     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
827     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
828 
829     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
830     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
831     m_outputTensorDescs.emplace_back(pair1);
832     m_outputTensorDescs.emplace_back(pair2);
833 
834     ExtensionConfig extensionConfig;
835     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
836     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
837 
838     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
839         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
840         false, performance, priority);
841 
842     size_t index = 1;
843     NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
844     EXPECT_EQ(nullptr, ret);
845 }
846 
847 /**
848  * @tc.name: nnexecutortest_createoutputtensordesc_003
849  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
850  * @tc.type: FUNC
851  */
852 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_003, TestSize.Level0)
853 {
854     LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_003");
855     size_t m_backendID {0};
856     std::shared_ptr<Device> m_device {nullptr};
857     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
858     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
859     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
860 
861     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
862     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
863     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
864     int32_t expectDim[2] = {3, 3};
865     int32_t* ptr = expectDim;
866     uint32_t dimensionCount = 2;
867     tensorDesr->SetShape(ptr, dimensionCount);
868     pair1.first = tensorDesr;
869     pair2.first = tensorDesr;
870     m_outputTensorDescs.emplace_back(pair1);
871     m_outputTensorDescs.emplace_back(pair2);
872 
873     ExtensionConfig extensionConfig;
874     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
875     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
876 
877     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
878         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
879         false, performance, priority);
880 
881     size_t index = 1;
882     NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
883     EXPECT_NE(nullptr, ret);
884 }
885 
MyOnRunDone(void * userData,OH_NN_ReturnCode errCode,void * outputTensor[],int32_t outputCount)886 void MyOnRunDone(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount)
887 {
888     LOGE("MyOnRunDone");
889     // 在这里处理你的逻辑,例如:
890     if (errCode != OH_NN_SUCCESS) {
891         // 处理错误
892         LOGE("Neural network execution failed with error code: %d", errCode);
893     } else {
894         // 使用 outputTensor[] 和 outputCount 处理成功的结果
895         // 例如,outputTensor 可能指向了神经网络输出数据的内存位置
896     }
897     // 如果 userData 指向了需要清理的资源,在这里进行清理
898 }
899 
900 /**
901  * @tc.name: nnexecutortest_setonrundone_001
902  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
903  * @tc.type: FUNC
904  */
905 HWTEST_F(NNExecutorTest, nnexecutortest_setonrundone_001, TestSize.Level0)
906 {
907     LOGE("SetOnRunDone nnexecutortest_setonrundone_001");
908     size_t m_backendID {0};
909     std::shared_ptr<Device> m_device {nullptr};
910     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
911     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
912     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
913     ExtensionConfig extensionConfig;
914     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
915     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
916 
917     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
918         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
919         false, performance, priority);
920 
921     OH_NN_ReturnCode ret = nnExecutor->SetOnRunDone(MyOnRunDone);
922     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
923 }
924 
MyOnServiceDied(void * userData)925 void MyOnServiceDied(void *userData)
926 {
927     LOGE("MyOnServiceDied");
928 }
929 
930 /**
931  * @tc.name: nnexecutortest_setonservicedied_001
932  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
933  * @tc.type: FUNC
934  */
935 HWTEST_F(NNExecutorTest, nnexecutortest_setonservicedied_001, TestSize.Level0)
936 {
937     LOGE("SetOnServiceDied nnexecutortest_setonservicedied_001");
938     size_t m_backendID {0};
939     std::shared_ptr<Device> m_device {nullptr};
940     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
941     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
942     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
943     ExtensionConfig extensionConfig;
944     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
945     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
946 
947     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
948         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
949         false, performance, priority);
950 
951     OH_NN_ReturnCode ret = nnExecutor->SetOnServiceDied(MyOnServiceDied);
952     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
953 }
954 
955 /**
956  * @tc.name: nnexecutortest_runsync_001
957  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
958  * @tc.type: FUNC
959  */
960 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_001, TestSize.Level0)
961 {
962     LOGE("RunSync nnexecutortest_runsync_001");
963     size_t m_backendID {0};
964     std::shared_ptr<Device> m_device {nullptr};
965     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
966     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
967     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
968     ExtensionConfig extensionConfig;
969     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
970     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
971 
972     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
973         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
974         false, performance, priority);
975 
976     size_t inputSize = 1;
977     size_t outputSize = 1;
978     OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize);
979     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
980 }
981 
982 /**
983  * @tc.name: nnexecutortest_runsync_002
984  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
985  * @tc.type: FUNC
986  */
987 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_002, TestSize.Level0)
988 {
989     LOGE("RunAsync nnexecutortest_runsync_002");
990     size_t m_backendID {0};
991     std::shared_ptr<Device> m_device {nullptr};
992     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
993     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
994     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
995     ExtensionConfig extensionConfig;
996     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
997     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
998 
999     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1000         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1001         false, performance, priority);
1002 
1003     size_t inputSize = 0;
1004     size_t outputSize = 1;
1005     OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize);
1006     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1007 }
1008 
1009 /**
1010  * @tc.name: nnexecutortest_runsync_003
1011  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1012  * @tc.type: FUNC
1013  */
1014 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_003, TestSize.Level0)
1015 {
1016     LOGE("RunAsync nnexecutortest_runsync_003");
1017     size_t m_backendID {0};
1018     std::shared_ptr<Device> m_device {nullptr};
1019 
1020     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1021 
1022     std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
1023     std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1024     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1025         .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anon0b5f4ff20402(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1026             std::vector<std::vector<uint32_t>>& maxInputDims) {
1027                 // 这里直接修改传入的引用参数
1028                 minInputDims = minDims;
1029                 maxInputDims = maxDims;
1030                 return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码
1031             }));
1032 
1033     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1034     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1035 
1036     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1037     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1038     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1039     int32_t expectDim[2] = {3, 3};
1040     int32_t* ptr = expectDim;
1041     uint32_t dimensionCount = 2;
1042     tensorDesr->SetShape(ptr, dimensionCount);
1043     pair1.first = tensorDesr;
1044     pair2.first = tensorDesr;
1045     m_inputTensorDescs.emplace_back(pair1);
1046     m_inputTensorDescs.emplace_back(pair2);
1047     m_outputTensorDescs.emplace_back(pair1);
1048     m_outputTensorDescs.emplace_back(pair2);
1049 
1050     ExtensionConfig extensionConfig;
1051     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1052     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1053 
1054     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1055         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1056         false, performance, priority);
1057 
1058     size_t backendID = 1;
1059     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1060     TensorDesc desc;
1061     TensorDesc* tensorDesc = &desc;
1062 
1063     std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1064     NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1065 
1066     size_t inputSize = 2;
1067     size_t outputSize = 2;
1068     OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
1069     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1070 
1071     testing::Mock::AllowLeak(mockIPreparedMode.get());
1072     testing::Mock::AllowLeak(device.get());
1073 }
1074 
1075 /**
1076  * @tc.name: nnexecutortest_runsync_004
1077  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1078  * @tc.type: FUNC
1079  */
1080 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_004, TestSize.Level0)
1081 {
1082     LOGE("RunAsync nnexecutortest_runsync_004");
1083     size_t m_backendID {0};
1084     std::shared_ptr<Device> m_device {nullptr};
1085 
1086     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1087 
1088     std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
1089     std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1090     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1091         .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anon0b5f4ff20502(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1092             std::vector<std::vector<uint32_t>>& maxInputDims) {
1093                 // 这里直接修改传入的引用参数
1094                 minInputDims = minDims;
1095                 maxInputDims = maxDims;
1096                 return OH_NN_SUCCESS; // 假设成功的状态码
1097             }));
1098 
1099     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1100     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1101 
1102     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1103     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1104     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1105     int32_t expectDim[2] = {3, 3};
1106     int32_t* ptr = expectDim;
1107     uint32_t dimensionCount = 2;
1108     tensorDesr->SetShape(ptr, dimensionCount);
1109     pair1.first = tensorDesr;
1110     pair2.first = tensorDesr;
1111     m_inputTensorDescs.emplace_back(pair1);
1112     m_inputTensorDescs.emplace_back(pair2);
1113     m_outputTensorDescs.emplace_back(pair1);
1114     m_outputTensorDescs.emplace_back(pair2);
1115 
1116     ExtensionConfig extensionConfig;
1117     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1118     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1119 
1120     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1121         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1122         false, performance, priority);
1123 
1124     size_t backendID = 1;
1125     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1126     TensorDesc desc;
1127     TensorDesc* tensorDesc = &desc;
1128 
1129     std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1130     NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1131 
1132     size_t inputSize = 2;
1133     size_t outputSize = 2;
1134     OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
1135     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1136 
1137     testing::Mock::AllowLeak(mockIPreparedMode.get());
1138 }
1139 
1140 /**
1141  * @tc.name: nnexecutortest_runsync_005
1142  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1143  * @tc.type: FUNC
1144  */
1145 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_005, TestSize.Level0)
1146 {
1147     LOGE("RunAsync nnexecutortest_runsync_005");
1148     size_t m_backendID {0};
1149     std::shared_ptr<Device> m_device {nullptr};
1150 
1151     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1152 
1153     std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}, {1, 2, 3}};
1154     std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1155     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1156         .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anon0b5f4ff20602(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1157             std::vector<std::vector<uint32_t>>& maxInputDims) {
1158                 // 这里直接修改传入的引用参数
1159                 minInputDims = minDims;
1160                 maxInputDims = maxDims;
1161                 return OH_NN_SUCCESS; // 假设成功的状态码
1162             }));
1163 
1164     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1165     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1166 
1167     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1168     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1169     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1170     int32_t expectDim[2] = {3, 3};
1171     int32_t* ptr = expectDim;
1172     uint32_t dimensionCount = 2;
1173     tensorDesr->SetShape(ptr, dimensionCount);
1174     pair1.first = tensorDesr;
1175     pair2.first = tensorDesr;
1176     m_inputTensorDescs.emplace_back(pair1);
1177     m_inputTensorDescs.emplace_back(pair2);
1178     m_outputTensorDescs.emplace_back(pair1);
1179     m_outputTensorDescs.emplace_back(pair2);
1180 
1181     ExtensionConfig extensionConfig;
1182     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1183     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1184 
1185     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1186         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1187         false, performance, priority);
1188 
1189     size_t backendID = 1;
1190     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1191     TensorDesc desc;
1192     TensorDesc* tensorDesc = &desc;
1193 
1194     std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1195     NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1196 
1197     size_t inputSize = 2;
1198     size_t outputSize = 2;
1199     OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
1200     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1201 
1202     testing::Mock::AllowLeak(mockIPreparedMode.get());
1203 }
1204 
1205 /**
1206  * @tc.name: nnexecutortest_runasync_001
1207  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1208  * @tc.type: FUNC
1209  */
1210 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_001, TestSize.Level0)
1211 {
1212     LOGE("RunAsync nnexecutortest_runasync_001");
1213     size_t m_backendID {0};
1214     std::shared_ptr<Device> m_device {nullptr};
1215     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1216     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1217     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1218     ExtensionConfig extensionConfig;
1219     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1220     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1221 
1222     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1223         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1224         false, performance, priority);
1225 
1226     void* buffer = m_dataArry;
1227     size_t inputSize = 1;
1228     size_t outputSize = 1;
1229     int32_t timeout = 10;
1230     OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer);
1231     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1232 }
1233 
1234 /**
1235  * @tc.name: nnexecutortest_runasync_002
1236  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1237  * @tc.type: FUNC
1238  */
1239 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_002, TestSize.Level0)
1240 {
1241     LOGE("RunAsync nnexecutortest_runasync_001");
1242     size_t m_backendID {0};
1243     std::shared_ptr<Device> m_device {nullptr};
1244     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1245     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1246     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1247     ExtensionConfig extensionConfig;
1248     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1249     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1250 
1251     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1252         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1253         false, performance, priority);
1254 
1255     void* buffer = m_dataArry;
1256     size_t inputSize = 0;
1257     size_t outputSize = 1;
1258     int32_t timeout = 10;
1259     OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer);
1260     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1261 }
1262 
1263 /**
1264  * @tc.name: nnexecutortest_runasync_003
1265  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1266  * @tc.type: FUNC
1267  */
1268 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_003, TestSize.Level0)
1269 {
1270     LOGE("RunAsync nnexecutortest_runasync_003");
1271     size_t m_backendID {0};
1272     std::shared_ptr<Device> m_device {nullptr};
1273 
1274     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1275 
1276     std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
1277     std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1278     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1279         .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anon0b5f4ff20702(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1280             std::vector<std::vector<uint32_t>>& maxInputDims) {
1281                 // 这里直接修改传入的引用参数
1282                 minInputDims = minDims;
1283                 maxInputDims = maxDims;
1284                 return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码
1285             }));
1286 
1287     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1288     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1289 
1290     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1291     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1292     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1293     int32_t expectDim[2] = {3, 3};
1294     int32_t* ptr = expectDim;
1295     uint32_t dimensionCount = 2;
1296     tensorDesr->SetShape(ptr, dimensionCount);
1297     pair1.first = tensorDesr;
1298     pair2.first = tensorDesr;
1299     m_inputTensorDescs.emplace_back(pair1);
1300     m_inputTensorDescs.emplace_back(pair2);
1301     m_outputTensorDescs.emplace_back(pair1);
1302     m_outputTensorDescs.emplace_back(pair2);
1303     ExtensionConfig extensionConfig;
1304     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1305     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1306 
1307     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1308         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1309         false, performance, priority);
1310 
1311     size_t backendID = 1;
1312     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1313     TensorDesc desc;
1314     TensorDesc* tensorDesc = &desc;
1315 
1316     std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1317     NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1318 
1319     void* buffer = m_dataArry;
1320     size_t inputSize = 2;
1321     size_t outputSize = 2;
1322     int32_t timeout = 10;
1323     OH_NN_ReturnCode ret = nnExecutor->RunAsync(&tensor, inputSize, &tensor, outputSize, timeout, buffer);
1324     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1325 
1326     testing::Mock::AllowLeak(mockIPreparedMode.get());
1327 }
1328 
1329 /**
1330  * @tc.name: nnexecutortest_getbackendid_001
1331  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1332  * @tc.type: FUNC
1333  */
1334 HWTEST_F(NNExecutorTest, nnexecutortest_getbackendid_001, TestSize.Level0)
1335 {
1336     LOGE("GetBackendID nnexecutortest_getbackendid_001");
1337     size_t m_backendID {0};
1338     std::shared_ptr<Device> m_device {nullptr};
1339     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1340     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1341     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1342     ExtensionConfig extensionConfig;
1343     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1344     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1345 
1346     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1347         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1348         false, performance, priority);
1349 
1350     size_t ret = nnExecutor->GetBackendID();
1351     EXPECT_EQ(0, ret);
1352 }
1353 
1354 /**
1355  * @tc.name: nnexecutortest_setinput_001
1356  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1357  * @tc.type: FUNC
1358  */
1359 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_001, TestSize.Level0)
1360 {
1361     LOGE("SetInput nnexecutortest_setinput_001");
1362     size_t m_backendID {0};
1363     std::shared_ptr<Device> m_device {nullptr};
1364     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1365     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1366         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1367     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1368     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1369     ExtensionConfig extensionConfig;
1370     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1371     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1372 
1373     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1374         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1375         false, performance, priority);
1376 
1377     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1378     void* buffer = m_dataArry;
1379     size_t length = 9 * sizeof(float);
1380 
1381     OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1382     EXPECT_EQ(OH_NN_FAILED, ret);
1383 
1384     testing::Mock::AllowLeak(mockIPreparedMode.get());
1385 }
1386 
1387 /**
1388  * @tc.name: nnexecutortest_setinput_002
1389  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1390  * @tc.type: FUNC
1391  */
1392 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_002, TestSize.Level0)
1393 {
1394     LOGE("SetInput nnexecutortest_setinput_002");
1395     size_t m_backendID {0};
1396     std::shared_ptr<Device> m_device {nullptr};
1397     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1398     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1399         .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN));
1400     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1401     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1402 
1403     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1404     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1405     m_inputTensorDescs.emplace_back(pair1);
1406     m_inputTensorDescs.emplace_back(pair2);
1407     ExtensionConfig extensionConfig;
1408     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1409     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1410 
1411     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1412         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1413         false, performance, priority);
1414 
1415     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1416     void* buffer = m_dataArry;
1417     size_t length = 9 * sizeof(float);
1418 
1419     OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1420     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1421 
1422     testing::Mock::AllowLeak(mockIPreparedMode.get());
1423 }
1424 
1425 /**
1426  * @tc.name: nnexecutortest_setinput_003
1427  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1428  * @tc.type: FUNC
1429  */
1430 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_003, TestSize.Level0)
1431 {
1432     LOGE("SetInput nnexecutortest_setinput_003");
1433     size_t m_backendID {0};
1434     std::shared_ptr<Device> m_device {nullptr};
1435     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1436     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1437         .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN));
1438     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1439     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1440 
1441     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1442     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1443     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1444     int32_t expectDim[2] = {3, 3};
1445     int32_t* ptr = expectDim;
1446     uint32_t dimensionCount = 2;
1447     tensorDesr->SetShape(ptr, dimensionCount);
1448     pair1.first = tensorDesr;
1449     pair2.first = tensorDesr;
1450     m_inputTensorDescs.emplace_back(pair1);
1451     m_inputTensorDescs.emplace_back(pair2);
1452     ExtensionConfig extensionConfig;
1453     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1454     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1455 
1456     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1457         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1458         false, performance, priority);
1459 
1460     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1461     void* buffer = m_dataArry;
1462     size_t length = 9 * sizeof(float);
1463 
1464     OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1465     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1466 
1467     testing::Mock::AllowLeak(mockIPreparedMode.get());
1468 }
1469 
1470 /**
1471  * @tc.name: nnexecutortest_setinputfrommemory_001
1472  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1473  * @tc.type: FUNC
1474  */
1475 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_001, TestSize.Level0)
1476 {
1477     LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_001");
1478     size_t m_backendID {0};
1479     std::shared_ptr<Device> m_device {nullptr};
1480     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1481     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1482         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1483     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1484     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1485     ExtensionConfig extensionConfig;
1486     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1487     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1488 
1489     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1490         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1491         false, performance, priority);
1492 
1493     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1494     void* const data = m_dataArry;
1495     OH_NN_Memory memory = {data, 9 * sizeof(float)};
1496 
1497     OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1498     EXPECT_EQ(OH_NN_FAILED, ret);
1499 
1500     testing::Mock::AllowLeak(mockIPreparedMode.get());
1501 }
1502 
1503 /**
1504  * @tc.name: nnexecutortest_setinputfrommemory_002
1505  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1506  * @tc.type: FUNC
1507  */
1508 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_002, TestSize.Level0)
1509 {
1510     LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_002");
1511     size_t m_backendID {0};
1512     std::shared_ptr<Device> m_device {nullptr};
1513     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1514     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1515         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1516     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1517     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1518 
1519     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1520     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1521     m_inputTensorDescs.emplace_back(pair1);
1522     m_inputTensorDescs.emplace_back(pair2);
1523     ExtensionConfig extensionConfig;
1524     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1525     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1526 
1527     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1528         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1529         false, performance, priority);
1530 
1531     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1532     void* const data = m_dataArry;
1533     OH_NN_Memory memory = {data, 9 * sizeof(float)};
1534 
1535     OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1536     EXPECT_EQ(OH_NN_FAILED, ret);
1537 
1538     testing::Mock::AllowLeak(mockIPreparedMode.get());
1539 }
1540 
1541 /**
1542  * @tc.name: nnexecutortest_setinputfrommemory_003
1543  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1544  * @tc.type: FUNC
1545  */
1546 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_003, TestSize.Level0)
1547 {
1548     LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_003");
1549     size_t m_backendID {0};
1550     std::shared_ptr<Device> m_device {nullptr};
1551     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1552     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1553         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1554     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1555     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1556 
1557     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1558     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1559     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1560     int32_t expectDim[2] = {3, 3};
1561     int32_t* ptr = expectDim;
1562     uint32_t dimensionCount = 2;
1563     tensorDesr->SetShape(ptr, dimensionCount);
1564     pair1.first = tensorDesr;
1565     pair2.first = tensorDesr;
1566     m_inputTensorDescs.emplace_back(pair1);
1567     m_inputTensorDescs.emplace_back(pair2);
1568     ExtensionConfig extensionConfig;
1569     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1570     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1571 
1572     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1573         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1574         false, performance, priority);
1575 
1576     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1577     void* const data = m_dataArry;
1578     OH_NN_Memory memory = {data, 9 * sizeof(float)};
1579 
1580     OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1581     EXPECT_EQ(OH_NN_FAILED, ret);
1582 
1583     testing::Mock::AllowLeak(mockIPreparedMode.get());
1584 }
1585 
1586 /**
1587  * @tc.name: nnexecutortest_setoutput_001
1588  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1589  * @tc.type: FUNC
1590  */
1591 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_001, TestSize.Level0)
1592 {
1593     LOGE("SetOutput nnexecutortest_setoutput_001");
1594     size_t m_backendID {0};
1595     std::shared_ptr<Device> m_device {nullptr};
1596     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1597     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1598     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1599     ExtensionConfig extensionConfig;
1600     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1601     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1602 
1603     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1604         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1605         false, performance, priority);
1606 
1607     size_t length = 9 * sizeof(float);
1608     void* buffer = m_dataArry;
1609 
1610     OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1611     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1612 }
1613 
1614 /**
1615  * @tc.name: nnexecutortest_setoutput_002
1616  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1617  * @tc.type: FUNC
1618  */
1619 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_002, TestSize.Level0)
1620 {
1621     LOGE("SetOutput nnexecutortest_setoutput_002");
1622     size_t m_backendID {0};
1623     std::shared_ptr<Device> m_device {nullptr};
1624     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1625     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1626     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1627 
1628     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1629     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1630     m_outputTensorDescs.emplace_back(pair1);
1631     m_outputTensorDescs.emplace_back(pair2);
1632     ExtensionConfig extensionConfig;
1633     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1634     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1635 
1636     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1637         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1638         false, performance, priority);
1639 
1640     size_t length = 9 * sizeof(float);
1641     void* buffer = m_dataArry;
1642 
1643     OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1644     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1645 }
1646 
1647 /**
1648  * @tc.name: nnexecutortest_setoutput_003
1649  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1650  * @tc.type: FUNC
1651  */
1652 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_003, TestSize.Level0)
1653 {
1654     LOGE("SetOutput nnexecutortest_setoutput_003");
1655     size_t m_backendID {0};
1656     std::shared_ptr<Device> m_device {nullptr};
1657     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1658     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1659     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1660 
1661     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1662     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1663     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1664     int32_t expectDim[2] = {3, 3};
1665     int32_t* ptr = expectDim;
1666     uint32_t dimensionCount = 2;
1667     tensorDesr->SetShape(ptr, dimensionCount);
1668     pair1.first = tensorDesr;
1669     pair2.first = tensorDesr;
1670     m_outputTensorDescs.emplace_back(pair1);
1671     m_outputTensorDescs.emplace_back(pair2);
1672     ExtensionConfig extensionConfig;
1673     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1674     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1675 
1676     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1677         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1678         false, performance, priority);
1679 
1680     size_t length = 9 * sizeof(float);
1681     void* buffer = m_dataArry;
1682 
1683     OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1684     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1685 }
1686 
1687 /**
1688  * @tc.name: nnexecutortest_setoutputfrommemory_001
1689  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1690  * @tc.type: FUNC
1691  */
1692 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_001, TestSize.Level0)
1693 {
1694     LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_001");
1695     size_t m_backendID {0};
1696     std::shared_ptr<Device> m_device {nullptr};
1697     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1698     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1699     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1700     ExtensionConfig extensionConfig;
1701     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1702     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1703 
1704     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1705         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1706         false, performance, priority);
1707 
1708     void* const data = m_dataArry;
1709     OH_NN_Memory memory = {data, 9 * sizeof(float)};
1710 
1711     OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1712     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1713 }
1714 
1715 /**
1716  * @tc.name: nnexecutortest_setoutputfrommemory_002
1717  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1718  * @tc.type: FUNC
1719  */
1720 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_002, TestSize.Level0)
1721 {
1722     LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_002");
1723     size_t m_backendID {0};
1724     std::shared_ptr<Device> m_device {nullptr};
1725     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1726     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1727     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1728 
1729     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1730     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1731     m_outputTensorDescs.emplace_back(pair1);
1732     m_outputTensorDescs.emplace_back(pair2);
1733     ExtensionConfig extensionConfig;
1734     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1735     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1736 
1737     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1738         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1739         false, performance, priority);
1740 
1741     void* const data = m_dataArry;
1742     OH_NN_Memory memory = {data, 9 * sizeof(float)};
1743 
1744     OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1745     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1746 }
1747 
1748 /**
1749  * @tc.name: nnexecutortest_setoutputfrommemory_003
1750  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1751  * @tc.type: FUNC
1752  */
1753 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_003, TestSize.Level0)
1754 {
1755     LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_003");
1756     size_t m_backendID {0};
1757     std::shared_ptr<Device> m_device {nullptr};
1758     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1759     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1760     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1761 
1762     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1763     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1764     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1765     int32_t expectDim[2] = {3, 3};
1766     int32_t* ptr = expectDim;
1767     uint32_t dimensionCount = 2;
1768     tensorDesr->SetShape(ptr, dimensionCount);
1769     pair1.first = tensorDesr;
1770     pair2.first = tensorDesr;
1771     m_outputTensorDescs.emplace_back(pair1);
1772     m_outputTensorDescs.emplace_back(pair2);
1773     ExtensionConfig extensionConfig;
1774     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1775     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1776 
1777     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1778         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1779         false, performance, priority);
1780 
1781     void* const data = m_dataArry;
1782     OH_NN_Memory memory = {data, 9 * sizeof(float)};
1783 
1784     OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1785     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1786 }
1787 
1788 /**
1789  * @tc.name: nnexecutortest_createinputmemory_001
1790  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1791  * @tc.type: FUNC
1792  */
1793 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_001, TestSize.Level0)
1794 {
1795     LOGE("CreateInputMemory nnexecutortest_createinputmemory_001");
1796     size_t m_backendID {0};
1797     std::shared_ptr<Device> m_device {nullptr};
1798     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1799     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1800     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1801     ExtensionConfig extensionConfig;
1802     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1803     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1804 
1805     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1806         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1807         false, performance, priority);
1808 
1809     OH_NN_Memory** memory = nullptr;
1810     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1811     void* const data = dataArry;
1812     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1813     OH_NN_Memory* ptr = &memoryPtr;
1814     memory = &ptr;
1815     size_t length = 9 * sizeof(float);
1816 
1817     OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1818     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1819 }
1820 
1821 /**
1822  * @tc.name: nnexecutortest_createinputmemory_002
1823  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1824  * @tc.type: FUNC
1825  */
1826 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_002, TestSize.Level0)
1827 {
1828     LOGE("CreateInputMemory nnexecutortest_createinputmemory_002");
1829     size_t m_backendID {0};
1830     std::shared_ptr<Device> m_device {nullptr};
1831     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1832     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1833     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1834 
1835     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1836     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1837     m_inputTensorDescs.emplace_back(pair1);
1838     m_inputTensorDescs.emplace_back(pair2);
1839     ExtensionConfig extensionConfig;
1840     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1841     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1842 
1843     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1844         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1845         false, performance, priority);
1846 
1847     OH_NN_Memory** memory = nullptr;
1848     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1849     void* const data = dataArry;
1850     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1851     OH_NN_Memory* ptr = &memoryPtr;
1852     memory = &ptr;
1853     size_t length = 9 * sizeof(float);
1854 
1855     OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1856     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1857 }
1858 
1859 /**
1860  * @tc.name: nnexecutortest_createinputmemory_003
1861  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1862  * @tc.type: FUNC
1863  */
1864 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_003, TestSize.Level0)
1865 {
1866     LOGE("CreateInputMemory nnexecutortest_createinputmemory_003");
1867     size_t m_backendID {0};
1868     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1869 
1870     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1871     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1872     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1873 
1874     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1875     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1876     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1877     int32_t expectDim[2] = {3, 3};
1878     int32_t* ptr = expectDim;
1879     uint32_t dimensionCount = 2;
1880     tensorDesr->SetShape(ptr, dimensionCount);
1881     pair1.first = tensorDesr;
1882     pair2.first = tensorDesr;
1883     m_inputTensorDescs.emplace_back(pair1);
1884     m_inputTensorDescs.emplace_back(pair2);
1885     ExtensionConfig extensionConfig;
1886     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1887     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1888 
1889     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1890     size_t length = 9 * sizeof(float);
1891     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1892         .WillRepeatedly(::testing::Return(nullptr));
1893 
1894     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1895         m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1896         false, performance, priority);
1897 
1898     OH_NN_Memory** memory = nullptr;
1899     void* const data = dataArry;
1900     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1901     OH_NN_Memory* mPtr = &memoryPtr;
1902     memory = &mPtr;
1903 
1904     OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1905     EXPECT_EQ(OH_NN_MEMORY_ERROR, ret);
1906 
1907     testing::Mock::AllowLeak(device.get());
1908 }
1909 
1910 /**
1911  * @tc.name: nnexecutortest_createinputmemory_004
1912  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1913  * @tc.type: FUNC
1914  */
1915 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_004, TestSize.Level0)
1916 {
1917     LOGE("CreateInputMemory nnexecutortest_createinputmemory_004");
1918     size_t m_backendID {0};
1919     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1920 
1921     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1922     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1923     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1924 
1925     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1926     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1927     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1928     int32_t expectDim[2] = {3, 3};
1929     int32_t* ptr = expectDim;
1930     uint32_t dimensionCount = 2;
1931     tensorDesr->SetShape(ptr, dimensionCount);
1932     pair1.first = tensorDesr;
1933     pair2.first = tensorDesr;
1934     m_inputTensorDescs.emplace_back(pair1);
1935     m_inputTensorDescs.emplace_back(pair2);
1936     ExtensionConfig extensionConfig;
1937     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1938     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1939 
1940     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1941     size_t length = 9 * sizeof(float);
1942     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1943         .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
1944 
1945     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1946         m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1947         false, performance, priority);
1948 
1949     OH_NN_Memory** memory = nullptr;
1950     void* const data = dataArry;
1951     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1952     OH_NN_Memory* mPtr = &memoryPtr;
1953     memory = &mPtr;
1954 
1955     OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1956     EXPECT_EQ(OH_NN_SUCCESS, ret);
1957 
1958     testing::Mock::AllowLeak(device.get());
1959 }
1960 
1961 /**
1962  * @tc.name: nnexecutortest_destroyinputmemory_001
1963  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1964  * @tc.type: FUNC
1965  */
1966 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_001, TestSize.Level0)
1967 {
1968     LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_001");
1969     size_t m_backendID {0};
1970     std::shared_ptr<Device> m_device {nullptr};
1971     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1972     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1973     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1974     ExtensionConfig extensionConfig;
1975     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
1976     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
1977 
1978     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1979         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
1980         false, performance, priority);
1981 
1982     size_t length = 9 * sizeof(float);
1983     OH_NN_Memory** memory = nullptr;
1984     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1985     void* const data = dataArry;
1986     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1987     OH_NN_Memory* ptr = &memoryPtr;
1988     memory = &ptr;
1989 
1990     nnExecutor->CreateInputMemory(m_index, length, memory);
1991     OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
1992     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1993 }
1994 
1995 /**
1996  * @tc.name: nnexecutortest_destroyinputmemory_002
1997  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1998  * @tc.type: FUNC
1999  */
2000 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_002, TestSize.Level0)
2001 {
2002     LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_002");
2003     size_t m_backendID {0};
2004     std::shared_ptr<Device> m_device {nullptr};
2005     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2006     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2007     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2008 
2009     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2010     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2011     m_inputTensorDescs.emplace_back(pair1);
2012     m_inputTensorDescs.emplace_back(pair2);
2013     ExtensionConfig extensionConfig;
2014     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2015     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2016 
2017     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2018         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2019         false, performance, priority);
2020 
2021     size_t length = 9 * sizeof(float);
2022     OH_NN_Memory** memory = nullptr;
2023     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2024     void* const data = dataArry;
2025     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2026     OH_NN_Memory* ptr = &memoryPtr;
2027     memory = &ptr;
2028 
2029     nnExecutor->CreateInputMemory(m_index, length, memory);
2030     OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
2031     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2032 }
2033 
2034 /**
2035  * @tc.name: nnexecutortest_destroyinputmemory_003
2036  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2037  * @tc.type: FUNC
2038  */
2039 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_003, TestSize.Level0)
2040 {
2041     LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_003");
2042     size_t m_backendID {0};
2043     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
2044     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2045     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2046     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2047 
2048     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2049     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2050     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
2051     int32_t expectDim[2] = {3, 3};
2052     int32_t* ptr = expectDim;
2053     uint32_t dimensionCount = 2;
2054     tensorDesr->SetShape(ptr, dimensionCount);
2055     pair1.first = tensorDesr;
2056     pair2.first = tensorDesr;
2057     m_inputTensorDescs.emplace_back(pair1);
2058     m_inputTensorDescs.emplace_back(pair2);
2059     ExtensionConfig extensionConfig;
2060     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2061     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2062 
2063     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2064     size_t length = 9 * sizeof(float);
2065     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
2066         .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
2067     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2068         m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2069         false, performance, priority);
2070 
2071     OH_NN_Memory** memory = nullptr;
2072     void* const data = dataArry;
2073     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2074     OH_NN_Memory* mPtr = &memoryPtr;
2075     memory = &mPtr;
2076 
2077     nnExecutor->CreateInputMemory(m_index, length, memory);
2078     OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
2079     EXPECT_EQ(OH_NN_SUCCESS, ret);
2080 
2081     testing::Mock::AllowLeak(device.get());
2082 }
2083 
2084 /**
2085  * @tc.name: nnexecutortest_createoutputmemory_001
2086  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2087  * @tc.type: FUNC
2088  */
2089 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_001, TestSize.Level0)
2090 {
2091     LOGE("CreateOutputMemory nnexecutortest_createoutputmemory_001");
2092     size_t m_backendID {0};
2093     std::shared_ptr<Device> m_device {nullptr};
2094     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2095     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2096     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2097     ExtensionConfig extensionConfig;
2098     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2099     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2100 
2101     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2102         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2103         false, performance, priority);
2104 
2105     size_t length = 9 * sizeof(float);
2106     OH_NN_Memory** memory = nullptr;
2107     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2108     void* const data = dataArry;
2109     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2110     OH_NN_Memory* ptr = &memoryPtr;
2111     memory = &ptr;
2112 
2113     OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
2114     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2115 }
2116 
2117 /**
2118  * @tc.name: nnexecutortest_createoutputmemory_002
2119  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2120  * @tc.type: FUNC
2121  */
2122 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_002, TestSize.Level0)
2123 {
2124     LOGE("CreateInputMemory nnexecutortest_createoutputmemory_002");
2125     size_t m_backendID {0};
2126     std::shared_ptr<Device> m_device {nullptr};
2127     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2128     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2129     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2130 
2131     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2132     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2133     m_outputTensorDescs.emplace_back(pair1);
2134     m_outputTensorDescs.emplace_back(pair2);
2135     ExtensionConfig extensionConfig;
2136     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2137     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2138 
2139     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2140         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2141         false, performance, priority);
2142 
2143     OH_NN_Memory** memory = nullptr;
2144     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2145     void* const data = dataArry;
2146     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2147     OH_NN_Memory* ptr = &memoryPtr;
2148     memory = &ptr;
2149     size_t length = 9 * sizeof(float);
2150 
2151     OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
2152     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2153 }
2154 
2155 /**
2156  * @tc.name: nnexecutortest_createoutputmemory_003
2157  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2158  * @tc.type: FUNC
2159  */
2160 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_003, TestSize.Level0)
2161 {
2162     LOGE("CreateInputMemory nnexecutortest_createoutputmemory_003");
2163     size_t m_backendID {0};
2164     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
2165 
2166     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2167     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2168     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2169 
2170     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2171     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2172     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
2173     int32_t expectDim[2] = {3, 3};
2174     int32_t* ptr = expectDim;
2175     uint32_t dimensionCount = 2;
2176     tensorDesr->SetShape(ptr, dimensionCount);
2177     pair1.first = tensorDesr;
2178     pair2.first = tensorDesr;
2179     m_outputTensorDescs.emplace_back(pair1);
2180     m_outputTensorDescs.emplace_back(pair2);
2181     ExtensionConfig extensionConfig;
2182     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2183     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2184 
2185     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2186     size_t length = 9 * sizeof(float);
2187     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
2188         .WillRepeatedly(::testing::Return(nullptr));
2189 
2190     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2191         m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2192         false, performance, priority);
2193 
2194     OH_NN_Memory** memory = nullptr;
2195     void* const data = dataArry;
2196     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2197     OH_NN_Memory* mPtr = &memoryPtr;
2198     memory = &mPtr;
2199 
2200     OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
2201     EXPECT_EQ(OH_NN_MEMORY_ERROR, ret);
2202 
2203     testing::Mock::AllowLeak(device.get());
2204 }
2205 
2206 /**
2207  * @tc.name: nnexecutortest_createoutputmemory_004
2208  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2209  * @tc.type: FUNC
2210  */
2211 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_004, TestSize.Level0)
2212 {
2213     LOGE("CreateInputMemory nnexecutortest_createoutputmemory_004");
2214     size_t m_backendID {0};
2215     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
2216 
2217     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2218     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2219     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2220 
2221     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2222     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2223     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
2224     int32_t expectDim[2] = {3, 3};
2225     int32_t* ptr = expectDim;
2226     uint32_t dimensionCount = 2;
2227     tensorDesr->SetShape(ptr, dimensionCount);
2228     pair1.first = tensorDesr;
2229     pair2.first = tensorDesr;
2230     m_outputTensorDescs.emplace_back(pair1);
2231     m_outputTensorDescs.emplace_back(pair2);
2232     ExtensionConfig extensionConfig;
2233     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2234     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2235 
2236     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2237     size_t length = 9 * sizeof(float);
2238     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
2239         .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
2240 
2241     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2242         m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2243         false, performance, priority);
2244 
2245     OH_NN_Memory** memory = nullptr;
2246     void* const data = dataArry;
2247     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2248     OH_NN_Memory* mPtr = &memoryPtr;
2249     memory = &mPtr;
2250 
2251     OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
2252     EXPECT_EQ(OH_NN_SUCCESS, ret);
2253 
2254     testing::Mock::AllowLeak(device.get());
2255 }
2256 
2257 /**
2258  * @tc.name: nnexecutortest_destroyoutputmemory_001
2259  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2260  * @tc.type: FUNC
2261  */
2262 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_001, TestSize.Level0)
2263 {
2264     LOGE("DestroyOutputMemory nnexecutortest_destroyoutputmemory_001");
2265     size_t m_backendID {0};
2266     std::shared_ptr<Device> m_device {nullptr};
2267     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2268     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2269     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2270     ExtensionConfig extensionConfig;
2271     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2272     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2273 
2274     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2275         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2276         false, performance, priority);
2277 
2278     size_t length = 9 * sizeof(float);
2279     OH_NN_Memory** memory = nullptr;
2280     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2281     void* const data = dataArry;
2282     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2283     OH_NN_Memory* ptr = &memoryPtr;
2284     memory = &ptr;
2285 
2286     nnExecutor->CreateOutputMemory(m_index, length, memory);
2287     OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2288     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2289 }
2290 
2291 /**
2292  * @tc.name: nnexecutortest_destroyoutputmemory_002
2293  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2294  * @tc.type: FUNC
2295  */
2296 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_002, TestSize.Level0)
2297 {
2298     LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_002");
2299     size_t m_backendID {0};
2300     std::shared_ptr<Device> m_device {nullptr};
2301     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2302     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2303     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2304 
2305     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2306     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2307     m_outputTensorDescs.emplace_back(pair1);
2308     m_outputTensorDescs.emplace_back(pair2);
2309     ExtensionConfig extensionConfig;
2310     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2311     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2312 
2313     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2314         m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2315         false, performance, priority);
2316 
2317     size_t length = 9 * sizeof(float);
2318     OH_NN_Memory** memory = nullptr;
2319     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2320     void* const data = dataArry;
2321     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2322     OH_NN_Memory* ptr = &memoryPtr;
2323     memory = &ptr;
2324 
2325     nnExecutor->CreateOutputMemory(m_index, length, memory);
2326     OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2327     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2328 }
2329 
2330 /**
2331  * @tc.name: nnexecutortest_destroyoutputmemory_003
2332  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2333  * @tc.type: FUNC
2334  */
2335 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_003, TestSize.Level0)
2336 {
2337     LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_003");
2338     size_t m_backendID {0};
2339     std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
2340     std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2341     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2342     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2343 
2344     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2345     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2346     std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
2347     int32_t expectDim[2] = {3, 3};
2348     int32_t* ptr = expectDim;
2349     uint32_t dimensionCount = 2;
2350     tensorDesr->SetShape(ptr, dimensionCount);
2351     pair1.first = tensorDesr;
2352     pair2.first = tensorDesr;
2353     m_outputTensorDescs.emplace_back(pair1);
2354     m_outputTensorDescs.emplace_back(pair2);
2355     ExtensionConfig extensionConfig;
2356     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2357     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2358 
2359     float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2360     size_t length = 9 * sizeof(float);
2361     EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
2362         .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
2363     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2364         m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2365         false, performance, priority);
2366 
2367     OH_NN_Memory** memory = nullptr;
2368     void* const data = dataArry;
2369     OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2370     OH_NN_Memory* mPtr = &memoryPtr;
2371     memory = &mPtr;
2372 
2373     nnExecutor->CreateOutputMemory(m_index, length, memory);
2374     OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2375     EXPECT_EQ(OH_NN_SUCCESS, ret);
2376 
2377     testing::Mock::AllowLeak(device.get());
2378 }
2379 
2380 /**
2381  * @tc.name: nnexecutortest_run_001
2382  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2383  * @tc.type: FUNC
2384  */
2385 HWTEST_F(NNExecutorTest, nnexecutortest_run_001, TestSize.Level0)
2386 {
2387     LOGE("Run nnexecutortest_run_001");
2388     size_t m_backendID {0};
2389     std::shared_ptr<Device> m_device {nullptr};
2390     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2391     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2392         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2393     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2394     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2395     ExtensionConfig extensionConfig;
2396     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2397     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2398 
2399     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2400         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2401         false, performance, priority);
2402 
2403     size_t length = 9 * sizeof(float);
2404     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2405     void* buffer = m_dataArry;
2406 
2407     nnExecutor->SetInput(m_index, tensor, buffer, length);
2408     nnExecutor->SetOutput(m_index, buffer, length);
2409     OH_NN_ReturnCode ret = nnExecutor->Run();
2410     EXPECT_EQ(OH_NN_SUCCESS, ret);
2411 
2412     testing::Mock::AllowLeak(mockIPreparedMode.get());
2413 }
2414 
2415 /**
2416  * @tc.name: nnexecutortest_run_002
2417  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2418  * @tc.type: FUNC
2419  */
2420 HWTEST_F(NNExecutorTest, nnexecutortest_run_002, TestSize.Level0)
2421 {
2422     LOGE("Run nnexecutortest_run_002");
2423     size_t m_backendID {0};
2424     std::shared_ptr<Device> m_device {nullptr};
2425     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2426     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2427         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2428     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2429     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2430 
2431     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2432     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2433     m_inputTensorDescs.emplace_back(pair1);
2434     m_inputTensorDescs.emplace_back(pair2);
2435     ExtensionConfig extensionConfig;
2436     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2437     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2438 
2439     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2440         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2441         false, performance, priority);
2442 
2443     size_t length = 9 * sizeof(float);
2444     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2445     void* buffer = m_dataArry;
2446 
2447     nnExecutor->SetInput(m_index, tensor, buffer, length);
2448     nnExecutor->SetOutput(m_index, buffer, length);
2449     OH_NN_ReturnCode ret = nnExecutor->Run();
2450     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2451 
2452     testing::Mock::AllowLeak(mockIPreparedMode.get());
2453 }
2454 
2455 /**
2456  * @tc.name: nnexecutortest_run_003
2457  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2458  * @tc.type: FUNC
2459  */
2460 HWTEST_F(NNExecutorTest, nnexecutortest_run_003, TestSize.Level0)
2461 {
2462     LOGE("Run nnexecutortest_run_003");
2463     size_t m_backendID {0};
2464     std::shared_ptr<Device> m_device {nullptr};
2465     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2466     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2467         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2468     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2469     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2470 
2471     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2472     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2473     m_inputTensorDescs.emplace_back(pair1);
2474     m_inputTensorDescs.emplace_back(pair2);
2475     m_outputTensorDescs.emplace_back(pair1);
2476     m_outputTensorDescs.emplace_back(pair2);
2477     ExtensionConfig extensionConfig;
2478     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2479     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2480 
2481     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2482         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2483         false, performance, priority);
2484 
2485     size_t length = 9 * sizeof(float);
2486     OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2487     void* buffer = m_dataArry;
2488 
2489     nnExecutor->SetInput(m_index, tensor, buffer, length);
2490     nnExecutor->SetOutput(m_index, buffer, length);
2491     OH_NN_ReturnCode ret = nnExecutor->Run();
2492     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2493 
2494     testing::Mock::AllowLeak(mockIPreparedMode.get());
2495 }
2496 
2497 /**
2498  * @tc.name: nnexecutortest_setextensionconfig_001
2499  * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2500  * @tc.type: FUNC
2501  */
2502 HWTEST_F(NNExecutorTest, nnexecutortest_setextensionconfig_001, TestSize.Level0)
2503 {
2504     LOGE("SetExtensionConfig nnexecutortest_setextensionconfig_001");
2505     size_t m_backendID {0};
2506     std::shared_ptr<Device> m_device {nullptr};
2507     std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2508     EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2509         .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2510     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2511     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2512 
2513     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2514     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2515     m_inputTensorDescs.emplace_back(pair1);
2516     m_inputTensorDescs.emplace_back(pair2);
2517     m_outputTensorDescs.emplace_back(pair1);
2518     m_outputTensorDescs.emplace_back(pair2);
2519     ExtensionConfig extensionConfig;
2520     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME};
2521     OH_NN_Priority priority {OH_NN_PRIORITY_HIGH};
2522 
2523     NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2524         m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig,
2525         false, performance, priority);
2526 
2527     std::unordered_map<std::string, std::vector<char>> configMap;
2528     std::string callingPidStr = "callingPid";
2529     std::vector<char> vecCallingPid(callingPidStr.begin(), callingPidStr.end());
2530     configMap["callingPid"] = vecCallingPid;
2531 
2532     std::string hiaiModelIdStr = "hiaiModelId";
2533     std::vector<char> vechiaiModelId(hiaiModelIdStr.begin(), hiaiModelIdStr.end());
2534     configMap["hiaiModelId"] = vechiaiModelId;
2535 
2536     std::string vecNeedLatencyStr = "isNeedModelLatency";
2537     std::vector<char> vecNeedLatency(vecNeedLatencyStr.begin(), vecNeedLatencyStr.end());
2538     configMap["isNeedModelLatency"] = vecNeedLatency;
2539     OH_NN_ReturnCode retSetExtensionConfig = nnExecutor->SetExtensionConfig(configMap);
2540     EXPECT_EQ(OH_NN_SUCCESS, retSetExtensionConfig);
2541 
2542     ExecutorConfig* retGetExecutorConfig = nnExecutor->GetExecutorConfig();
2543     EXPECT_NE(nullptr, retGetExecutorConfig);
2544 
2545     testing::Mock::AllowLeak(mockIPreparedMode.get());
2546 }
2547 } // namespace UnitTest
2548 } // namespace NeuralNetworkRuntime
2549 } // namespace OHOS