• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <v2_0/innrt_device.h>
17 #include <v2_0/iprepared_model.h>
18 #include <v2_0/nnrt_types.h>
19 #include <vector>
20 
21 #include "mindir.h"
22 #include "mindir_lite_graph.h"
23 #include "gtest/gtest.h"
24 
25 #include "common/hdi_nnrt_test.h"
26 #include "common/hdi_nnrt_test_utils.h"
27 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime.h"
28 
29 using namespace std;
30 using namespace testing::ext;
31 using namespace OHOS::NeuralNetworkRuntime;
32 using namespace OHOS::NeuralNetworkRuntime::Test;
33 
34 namespace {
35 
36 class DeviceTestAdditional : public HDINNRtTest {};
37 
38 }
39 
40 /**
41  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_GetDeviceNameV2_0200
42  * @tc.name: testNnrtGetDeviceNameV2_001
43  * @tc.desc: Call function V2 GetDeviceName, stability test
44  */
45 HWTEST_F(DeviceTestAdditional, testNnrtGetDeviceNameV2_001, Function | MediumTest | Level1)
46 {
47     std::string deviceName = "abc";
48     auto hdiRet = 0;
49 
50     for (int i = 0; i < 100; i++) {
51         hdiRet = device_->GetDeviceName(deviceName);
52         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
53         std::cout << "deviceName:" << deviceName << std::endl;
54         ASSERT_TRUE(!deviceName.empty());
55     }
56 }
57 
58 /**
59  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetDeviceNameV2_0300
60  * @tc.name: testNnrtGetDeviceNameV2_002
61  * @tc.desc: Call function V2 GetDeviceName, deviceName is nullptr
62  */
63 HWTEST_F(DeviceTestAdditional, testNnrtGetDeviceNameV2_002, Function | MediumTest | Level2)
64 {
65     std::string deviceName = nullptr;
66     auto hdiRet = device_->GetDeviceName(deviceName);
67     ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_NULL_PTR, hdiRet) << hdiRet;
68     std::cout << "deviceName:" << deviceName << std::endl;
69     ASSERT_TRUE(!deviceName.empty());
70 }
71 
72 /**
73  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_GetVendorNameV2_0200
74  * @tc.name: testNnrtGetVendorNameV2_001
75  * @tc.desc: Call function V2 GetVendorName, stability test
76  */
77 HWTEST_F(DeviceTestAdditional, testNnrtGetVendorNameV2_001, Function | MediumTest | Level1)
78 {
79     std::string vendorName = "abc";
80     auto hdiRet = 0;
81     for (int i = 0; i < 100; i++) {
82         hdiRet = device_->GetVendorName(vendorName);
83         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
84         std::cout << "vendorName:" << vendorName << std::endl;
85         ASSERT_TRUE(!vendorName.empty());
86     }
87 }
88 
89 /**
90  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetVendorNameV2_0300
91  * @tc.name: testNnrtGetVendorNameV2_002
92  * @tc.desc: Call function V2 GetVendorName, vendorName is nullptr
93  */
94 HWTEST_F(DeviceTestAdditional, testNnrtGetVendorNameV2_002, Function | MediumTest | Level2)
95 {
96     std::string vendorName = nullptr;
97     auto hdiRet = device_->GetVendorName(vendorName);
98     ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_NULL_PTR, hdiRet) << hdiRet;
99     std::cout << "vendorName:" << vendorName << std::endl;
100     ASSERT_TRUE(!vendorName.empty());
101 }
102 
103 /**
104  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_GetDeviceTypeV2_0200
105  * @tc.name: testNnrtGetDeviceTypeV2_001
106  * @tc.desc: Call function V2 GetDeviceType, stability test
107  */
108 HWTEST_F(DeviceTestAdditional, testNnrtGetDeviceTypeV2_001, Function | MediumTest | Level1)
109 {
110     V2_0::DeviceType deviceType = V2_0::DeviceType::CPU;
111     auto hdiRet = 0;
112 
113     for (int i = 0; i < 100; i++) {
114         hdiRet = device_->GetDeviceType(deviceType);
115         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet);
116 
117         ASSERT_TRUE(deviceType == V2_0::DeviceType::OTHER || deviceType == V2_0::DeviceType::GPU ||
118                     deviceType == V2_0::DeviceType::CPU || deviceType == V2_0::DeviceType::ACCELERATOR)
119             << deviceType;
120     }
121 }
122 
123 /**
124  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_GetDeviceStatusV2_0200
125  * @tc.name: testNnrtGetDeviceStatusV2_001
126  * @tc.desc: Call function V2 GetDeviceStatus, stability test
127  */
128 HWTEST_F(DeviceTestAdditional, testNnrtGetDeviceStatusV2_001, Function | MediumTest | Level1)
129 {
130     V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::OFFLINE;
131     auto hdiRet = 0;
132 
133     for (int i = 0; i < 100; i++) {
134         hdiRet = device_->GetDeviceStatus(deviceStatus);
135         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet);
136         ASSERT_TRUE(deviceStatus == V2_0::DeviceStatus::AVAILABLE || deviceStatus == V2_0::DeviceStatus::BUSY ||
137                     deviceStatus == V2_0::DeviceStatus::OFFLINE || deviceStatus == V2_0::DeviceStatus::UNKNOWN)
138             << deviceStatus;
139     }
140 }
141 
142 /**
143  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_IsFloat16PrecisionSupportedV2_0200
144  * @tc.name: testNnrtIsFloat16PrecisionSupportedV2_001
145  * @tc.desc: Call function V2 IsFloat16PrecisionSupported, stability test
146  */
147 HWTEST_F(DeviceTestAdditional, testNnrtIsFloat16PrecisionSupportedV2_001, Function | MediumTest | Level1)
148 {
149     bool isSupportedFp16 = true;
150     auto hdiRet = 0;
151     for (int i = 0; i < 100; i++) {
152         hdiRet = device_->IsFloat16PrecisionSupported(isSupportedFp16);
153         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
154     }
155 }
156 
157 /**
158  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_IsFloat16PrecisionSupportedV2_0300
159  * @tc.name: testNnrtIsFloat16PrecisionSupportedV2_002
160  * @tc.desc: Call function V2 IsFloat16PrecisionSupported, isSupportedFp16 is false
161  */
162 HWTEST_F(DeviceTestAdditional, testNnrtIsFloat16PrecisionSupportedV2_002, Function | MediumTest | Level1)
163 {
164     bool isSupportedFp16 = false;
165     auto hdiRet = device_->IsFloat16PrecisionSupported(isSupportedFp16);
166     ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
167 }
168 
169 /**
170  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_IsPerformanceModeSupportedV2_0200
171  * @tc.name: testNnrtIsPerformanceModeSupportedV2_001
172  * @tc.desc: Call function V2 IsPerformanceModeSupported, stability test
173  */
174 HWTEST_F(DeviceTestAdditional, testNnrtIsPerformanceModeSupportedV2_001, Function | MediumTest | Level1)
175 {
176     bool isSupportedPerformance = true;
177     auto hdiRet = 0;
178     for (int i = 0; i < 100; i++) {
179         hdiRet = device_->IsPerformanceModeSupported(isSupportedPerformance);
180         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
181     }
182 }
183 
184 /**
185  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_IsPerformanceModeSupportedV2_0300
186  * @tc.name: testNnrtIsPerformanceModeSupportedV2_002
187  * @tc.desc: Call function V2 IsPerformanceModeSupported, stability test
188  */
189 HWTEST_F(DeviceTestAdditional, testNnrtIsPerformanceModeSupportedV2_002, Function | MediumTest | Level1)
190 {
191     bool isSupportedPerformance = false;
192     auto hdiRet = device_->IsPerformanceModeSupported(isSupportedPerformance);
193     ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
194 }
195 
196 /**
197  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_IsPrioritySupportedV2_0200
198  * @tc.name: testNnrtIsPrioritySupportedV2_001
199  * @tc.desc: Call function V2 IsPrioritySupported, stability test
200  */
201 HWTEST_F(DeviceTestAdditional, testNnrtIsPrioritySupportedV2_001, Function | MediumTest | Level1)
202 {
203     bool isSupportedPriority = true;
204     auto hdiRet = 0;
205     for (int i = 0; i < 100; i++) {
206         hdiRet = device_->IsPrioritySupported(isSupportedPriority);
207         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
208     }
209 }
210 
211 /**
212  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_IsPrioritySupportedV2_0300
213  * @tc.name: testNnrtIsPrioritySupportedV2_002
214  * @tc.desc: Call function V2 IsPrioritySupported, isSupportedPriority is false
215  */
216 HWTEST_F(DeviceTestAdditional, testNnrtIsPrioritySupportedV2_002, Function | MediumTest | Level1)
217 {
218     bool isSupportedPriority = false;
219     auto hdiRet = device_->IsPrioritySupported(isSupportedPriority);
220     ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
221 }
222 
223 /**
224  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_IsDynamicInputSupportedV2_0200
225  * @tc.name: testNnrtIsDynamicInputSupportedV2_001
226  * @tc.desc: Call function V2 IsDynamicInputSupported, stability test
227  */
228 HWTEST_F(DeviceTestAdditional, testNnrtIsDynamicInputSupportedV2_001, Function | MediumTest | Level1)
229 {
230     bool isSupportedDynamicInput = true;
231     auto hdiRet = 0;
232     for (int i = 0; i < 100; i++) {
233         hdiRet = device_->IsDynamicInputSupported(isSupportedDynamicInput);
234         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
235     }
236 }
237 
238 /**
239  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_IsDynamicInputSupportedV2_0300
240  * @tc.name: testNnrtIsDynamicInputSupportedV2_002
241  * @tc.desc: Call function V2 IsDynamicInputSupported, isSupportedDynamicInput is false
242  */
243 HWTEST_F(DeviceTestAdditional, testNnrtIsDynamicInputSupportedV2_002, Function | MediumTest | Level1)
244 {
245     bool isSupportedDynamicInput = false;
246     auto hdiRet = device_->IsDynamicInputSupported(isSupportedDynamicInput);
247     ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
248 }
249 
250 /**
251  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_IsModelCacheSupportedV2_0200
252  * @tc.name: testNnrtIsModelCacheSupportedV2_001
253  * @tc.desc: Call function V2 IsModelCacheSupported, stability test
254  */
255 HWTEST_F(DeviceTestAdditional, testNnrtIsModelCacheSupportedV2_001, Function | MediumTest | Level1)
256 {
257     bool isSupportedCache = true;
258     auto hdiRet = 0;
259     for (int i = 0; i < 100; i++) {
260         hdiRet = device_->IsModelCacheSupported(isSupportedCache);
261         ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
262     }
263 }
264 
265 /**
266  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_IsModelCacheSupportedV2_0300
267  * @tc.name: testNnrtIsModelCacheSupportedV2_002
268  * @tc.desc: Call function V2 IsModelCacheSupported, isSupportedCache is false
269  */
270 HWTEST_F(DeviceTestAdditional, testNnrtIsModelCacheSupportedV2_002, Function | MediumTest | Level1)
271 {
272     bool isSupportedCache = false;
273     auto hdiRet = device_->IsModelCacheSupported(isSupportedCache);
274     ASSERT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, hdiRet) << hdiRet;
275 }
276 
277 /**
278  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_0400
279  * @tc.name: testNnrtGetSupportedOperationV2_001
280  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_NONE
281  */
282 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_001, Function | MediumTest | Level2)
283 {
284     OH_NNModel *model = nullptr;
285     HDICommon::BuildAddGraph(&model);
286     ASSERT_NE(model, nullptr);
287 
288     V2_0::Model *iModel = nullptr;
289     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
290     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
291 
292     for (auto &node : iModel->nodes) {
293         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_NONE);
294     }
295 
296     std::vector<bool> supportedOperations;
297     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
298     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
299         EXPECT_EQ(false, supportedOperations[i]);
300     }
301 
302     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
303     if (tensorBuffer.fd != -1) {
304         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
305     }
306 }
307 
308 /**
309  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_0500
310  * @tc.name: testNnrtGetSupportedOperationV2_002
311  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_ACTIVATION
312  */
313 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_002, Function | MediumTest | Level1)
314 {
315     OH_NNModel *model = nullptr;
316     HDICommon::BuildAddGraph(&model);
317     ASSERT_NE(model, nullptr);
318 
319     V2_0::Model *iModel = nullptr;
320     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
321     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
322 
323     for (auto &node : iModel->nodes) {
324         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_ACTIVATION);
325     }
326 
327     std::vector<bool> supportedOperations;
328     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
329     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
330         EXPECT_EQ(true, supportedOperations[i]);
331     }
332 
333     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
334     if (tensorBuffer.fd != -1) {
335         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
336     }
337 }
338 
339 /**
340  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_0600
341  * @tc.name: testNnrtGetSupportedOperationV2_003
342  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_ADD_FUSION
343  */
344 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_003, Function | MediumTest | Level1)
345 {
346     OH_NNModel *model = nullptr;
347     HDICommon::BuildAddGraph(&model);
348     ASSERT_NE(model, nullptr);
349 
350     V2_0::Model *iModel = nullptr;
351     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
352     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
353 
354     for (auto &node : iModel->nodes) {
355         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_ADD_FUSION);
356     }
357 
358     std::vector<bool> supportedOperations;
359     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
360     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
361         EXPECT_EQ(true, supportedOperations[i]);
362     }
363 
364     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
365     if (tensorBuffer.fd != -1) {
366         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
367     }
368 }
369 
370 /**
371  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_0700
372  * @tc.name: testNnrtGetSupportedOperationV2_004
373  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_ARGMAX_FUSION
374  */
375 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_004, Function | MediumTest | Level2)
376 {
377     OH_NNModel *model = nullptr;
378     HDICommon::BuildAddGraph(&model);
379     ASSERT_NE(model, nullptr);
380 
381     V2_0::Model *iModel = nullptr;
382     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
383     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
384 
385     for (auto &node : iModel->nodes) {
386         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_ARGMAX_FUSION);
387     }
388 
389     std::vector<bool> supportedOperations;
390     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
391     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
392         EXPECT_EQ(false, supportedOperations[i]);
393     }
394 
395     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
396     if (tensorBuffer.fd != -1) {
397         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
398     }
399 }
400 
401 /**
402  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_0800
403  * @tc.name: testNnrtGetSupportedOperationV2_005
404  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_AVG_POOL_FUSION
405  */
406 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_005, Function | MediumTest | Level1)
407 {
408     OH_NNModel *model = nullptr;
409     HDICommon::BuildAddGraph(&model);
410     ASSERT_NE(model, nullptr);
411 
412     V2_0::Model *iModel = nullptr;
413     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
414     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
415 
416     for (auto &node : iModel->nodes) {
417         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_AVG_POOL_FUSION);
418     }
419 
420     std::vector<bool> supportedOperations;
421     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
422     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
423         EXPECT_EQ(true, supportedOperations[i]);
424     }
425 
426     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
427     if (tensorBuffer.fd != -1) {
428         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
429     }
430 }
431 
432 /**
433  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_0900
434  * @tc.name: testNnrtGetSupportedOperationV2_006
435  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_BATCH_TO_SPACE_ND
436  */
437 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_006, Function | MediumTest | Level2)
438 {
439     OH_NNModel *model = nullptr;
440     HDICommon::BuildAddGraph(&model);
441     ASSERT_NE(model, nullptr);
442 
443     V2_0::Model *iModel = nullptr;
444     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
445     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
446 
447     for (auto &node : iModel->nodes) {
448         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_BATCH_TO_SPACE_ND);
449     }
450 
451     std::vector<bool> supportedOperations;
452     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
453     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
454         EXPECT_EQ(false, supportedOperations[i]);
455     }
456 
457     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
458     if (tensorBuffer.fd != -1) {
459         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
460     }
461 }
462 
463 /**
464  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1000
465  * @tc.name: testNnrtGetSupportedOperationV2_007
466  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_BIAS_ADD
467  */
468 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_007, Function | MediumTest | Level2)
469 {
470     OH_NNModel *model = nullptr;
471     HDICommon::BuildAddGraph(&model);
472     ASSERT_NE(model, nullptr);
473 
474     V2_0::Model *iModel = nullptr;
475     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
476     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
477 
478     for (auto &node : iModel->nodes) {
479         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_BIAS_ADD);
480     }
481 
482     std::vector<bool> supportedOperations;
483     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
484     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
485         EXPECT_EQ(false, supportedOperations[i]);
486     }
487 
488     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
489     if (tensorBuffer.fd != -1) {
490         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
491     }
492 }
493 
494 /**
495  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1100
496  * @tc.name: testNnrtGetSupportedOperationV2_008
497  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_CAST
498  */
499 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_008, Function | MediumTest | Level2)
500 {
501     OH_NNModel *model = nullptr;
502     HDICommon::BuildAddGraph(&model);
503     ASSERT_NE(model, nullptr);
504 
505     V2_0::Model *iModel = nullptr;
506     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
507     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
508 
509     for (auto &node : iModel->nodes) {
510         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_CAST);
511     }
512 
513     std::vector<bool> supportedOperations;
514     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
515     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
516         EXPECT_EQ(false, supportedOperations[i]);
517     }
518 
519     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
520     if (tensorBuffer.fd != -1) {
521         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
522     }
523 }
524 
525 /**
526  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1200
527  * @tc.name: testNnrtGetSupportedOperationV2_009
528  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_CONCAT
529  */
530 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_009, Function | MediumTest | Level1)
531 {
532     OH_NNModel *model = nullptr;
533     HDICommon::BuildAddGraph(&model);
534     ASSERT_NE(model, nullptr);
535 
536     V2_0::Model *iModel = nullptr;
537     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
538     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
539 
540     for (auto &node : iModel->nodes) {
541         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_CONCAT);
542     }
543 
544     std::vector<bool> supportedOperations;
545     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
546     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
547         EXPECT_EQ(true, supportedOperations[i]);
548     }
549 
550     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
551     if (tensorBuffer.fd != -1) {
552         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
553     }
554 }
555 
556 /**
557  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1300
558  * @tc.name: testNnrtGetSupportedOperationV2_010
559  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_CONV2D_FUSION
560  */
561 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_010, Function | MediumTest | Level1)
562 {
563     OH_NNModel *model = nullptr;
564     HDICommon::BuildAddGraph(&model);
565     ASSERT_NE(model, nullptr);
566 
567     V2_0::Model *iModel = nullptr;
568     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
569     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
570 
571     for (auto &node : iModel->nodes) {
572         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_CONV2D_FUSION);
573     }
574 
575     std::vector<bool> supportedOperations;
576     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
577     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
578         EXPECT_EQ(true, supportedOperations[i]);
579     }
580 
581     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
582     if (tensorBuffer.fd != -1) {
583         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
584     }
585 }
586 
587 /**
588  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1400
589  * @tc.name: testNnrtGetSupportedOperationV2_011
590  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_CONV2D_TRANSPOSE_FUSION
591  */
592 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_011, Function | MediumTest | Level2)
593 {
594     OH_NNModel *model = nullptr;
595     HDICommon::BuildAddGraph(&model);
596     ASSERT_NE(model, nullptr);
597 
598     V2_0::Model *iModel = nullptr;
599     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
600     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
601 
602     for (auto &node : iModel->nodes) {
603         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_CONV2D_TRANSPOSE_FUSION);
604     }
605 
606     std::vector<bool> supportedOperations;
607     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
608     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
609         EXPECT_EQ(false, supportedOperations[i]);
610     }
611 
612     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
613     if (tensorBuffer.fd != -1) {
614         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
615     }
616 }
617 
618 /**
619  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1500
620  * @tc.name: testNnrtGetSupportedOperationV2_012
621  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_DIV_FUSION
622  */
623 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_012, Function | MediumTest | Level2)
624 {
625     OH_NNModel *model = nullptr;
626     HDICommon::BuildAddGraph(&model);
627     ASSERT_NE(model, nullptr);
628 
629     V2_0::Model *iModel = nullptr;
630     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
631     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
632 
633     for (auto &node : iModel->nodes) {
634         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_DIV_FUSION);
635     }
636 
637     std::vector<bool> supportedOperations;
638     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
639     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
640         EXPECT_EQ(false, supportedOperations[i]);
641     }
642 
643     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
644     if (tensorBuffer.fd != -1) {
645         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
646     }
647 }
648 
649 /**
650  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1600
651  * @tc.name: testNnrtGetSupportedOperationV2_013
652  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_ELTWISE
653  */
654 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_013, Function | MediumTest | Level2)
655 {
656     OH_NNModel *model = nullptr;
657     HDICommon::BuildAddGraph(&model);
658     ASSERT_NE(model, nullptr);
659 
660     V2_0::Model *iModel = nullptr;
661     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
662     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
663 
664     for (auto &node : iModel->nodes) {
665         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_ELTWISE);
666     }
667 
668     std::vector<bool> supportedOperations;
669     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
670     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
671         EXPECT_EQ(false, supportedOperations[i]);
672     }
673 
674     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
675     if (tensorBuffer.fd != -1) {
676         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
677     }
678 }
679 
680 /**
681  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1700
682  * @tc.name: testNnrtGetSupportedOperationV2_014
683  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_EXPAND_DIMS
684  */
685 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_014, Function | MediumTest | Level2)
686 {
687     OH_NNModel *model = nullptr;
688     HDICommon::BuildAddGraph(&model);
689     ASSERT_NE(model, nullptr);
690 
691     V2_0::Model *iModel = nullptr;
692     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
693     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
694 
695     for (auto &node : iModel->nodes) {
696         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_EXPAND_DIMS);
697     }
698 
699     std::vector<bool> supportedOperations;
700     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
701     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
702         EXPECT_EQ(false, supportedOperations[i]);
703     }
704 
705     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
706     if (tensorBuffer.fd != -1) {
707         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
708     }
709 }
710 
711 /**
712  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1800
713  * @tc.name: testNnrtGetSupportedOperationV2_015
714  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_FILL
715  */
716 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_015, Function | MediumTest | Level2)
717 {
718     OH_NNModel *model = nullptr;
719     HDICommon::BuildAddGraph(&model);
720     ASSERT_NE(model, nullptr);
721 
722     V2_0::Model *iModel = nullptr;
723     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
724     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
725 
726     for (auto &node : iModel->nodes) {
727         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_FILL);
728     }
729 
730     std::vector<bool> supportedOperations;
731     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
732     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
733         EXPECT_EQ(false, supportedOperations[i]);
734     }
735 
736     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
737     if (tensorBuffer.fd != -1) {
738         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
739     }
740 }
741 
742 /**
743  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_1900
744  * @tc.name: testNnrtGetSupportedOperationV2_016
745  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_FULL_CONNECTION
746  */
747 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_016, Function | MediumTest | Level1)
748 {
749     OH_NNModel *model = nullptr;
750     HDICommon::BuildAddGraph(&model);
751     ASSERT_NE(model, nullptr);
752 
753     V2_0::Model *iModel = nullptr;
754     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
755     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
756 
757     for (auto &node : iModel->nodes) {
758         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_FULL_CONNECTION);
759     }
760 
761     std::vector<bool> supportedOperations;
762     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
763     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
764         EXPECT_EQ(true, supportedOperations[i]);
765     }
766 
767     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
768     if (tensorBuffer.fd != -1) {
769         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
770     }
771 }
772 
773 /**
774  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2000
775  * @tc.name: testNnrtGetSupportedOperationV2_017
776  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_FUSED_BATCH_NORM
777  */
778 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_017, Function | MediumTest | Level2)
779 {
780     OH_NNModel *model = nullptr;
781     HDICommon::BuildAddGraph(&model);
782     ASSERT_NE(model, nullptr);
783 
784     V2_0::Model *iModel = nullptr;
785     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
786     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
787 
788     for (auto &node : iModel->nodes) {
789         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_FUSED_BATCH_NORM);
790     }
791 
792     std::vector<bool> supportedOperations;
793     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
794     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
795         EXPECT_EQ(false, supportedOperations[i]);
796     }
797 
798     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
799     if (tensorBuffer.fd != -1) {
800         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
801     }
802 }
803 
804 /**
805  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2100
806  * @tc.name: testNnrtGetSupportedOperationV2_018
807  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_GATHER
808  */
809 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_018, Function | MediumTest | Level2)
810 {
811     OH_NNModel *model = nullptr;
812     HDICommon::BuildAddGraph(&model);
813     ASSERT_NE(model, nullptr);
814 
815     V2_0::Model *iModel = nullptr;
816     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
817     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
818 
819     for (auto &node : iModel->nodes) {
820         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_GATHER);
821     }
822 
823     std::vector<bool> supportedOperations;
824     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
825     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
826         EXPECT_EQ(false, supportedOperations[i]);
827     }
828 
829     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
830     if (tensorBuffer.fd != -1) {
831         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
832     }
833 }
834 
835 /**
836  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2200
837  * @tc.name: testNnrtGetSupportedOperationV2_019
838  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_LAYER_NORM_FUSION
839  */
840 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_019, Function | MediumTest | Level2)
841 {
842     OH_NNModel *model = nullptr;
843     HDICommon::BuildAddGraph(&model);
844     ASSERT_NE(model, nullptr);
845 
846     V2_0::Model *iModel = nullptr;
847     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
848     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
849 
850     for (auto &node : iModel->nodes) {
851         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_LAYER_NORM_FUSION);
852     }
853 
854     std::vector<bool> supportedOperations;
855     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
856     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
857         EXPECT_EQ(false, supportedOperations[i]);
858     }
859 
860     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
861     if (tensorBuffer.fd != -1) {
862         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
863     }
864 }
865 
866 /**
867  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2300
868  * @tc.name: testNnrtGetSupportedOperationV2_020
869  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_LESS_EQUAL
870  */
871 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_020, Function | MediumTest | Level2)
872 {
873     OH_NNModel *model = nullptr;
874     HDICommon::BuildAddGraph(&model);
875     ASSERT_NE(model, nullptr);
876 
877     V2_0::Model *iModel = nullptr;
878     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
879     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
880 
881     for (auto &node : iModel->nodes) {
882         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_LESS_EQUAL);
883     }
884 
885     std::vector<bool> supportedOperations;
886     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
887     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
888         EXPECT_EQ(false, supportedOperations[i]);
889     }
890 
891     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
892     if (tensorBuffer.fd != -1) {
893         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
894     }
895 }
896 
897 /**
898  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2400
899  * @tc.name: testNnrtGetSupportedOperationV2_021
900  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_MATMUL_FUSION
901  */
902 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_021, Function | MediumTest | Level1)
903 {
904     OH_NNModel *model = nullptr;
905     HDICommon::BuildAddGraph(&model);
906     ASSERT_NE(model, nullptr);
907 
908     V2_0::Model *iModel = nullptr;
909     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
910     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
911 
912     for (auto &node : iModel->nodes) {
913         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_MATMUL_FUSION);
914     }
915 
916     std::vector<bool> supportedOperations;
917     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
918     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
919         EXPECT_EQ(true, supportedOperations[i]);
920     }
921 
922     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
923     if (tensorBuffer.fd != -1) {
924         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
925     }
926 }
927 
928 /**
929  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2500
930  * @tc.name: testNnrtGetSupportedOperationV2_022
931  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_MAXIMUM
932  */
933 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_022, Function | MediumTest | Level2)
934 {
935     OH_NNModel *model = nullptr;
936     HDICommon::BuildAddGraph(&model);
937     ASSERT_NE(model, nullptr);
938 
939     V2_0::Model *iModel = nullptr;
940     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
941     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
942 
943     for (auto &node : iModel->nodes) {
944         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_MAXIMUM);
945     }
946 
947     std::vector<bool> supportedOperations;
948     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
949     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
950         EXPECT_EQ(false, supportedOperations[i]);
951     }
952 
953     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
954     if (tensorBuffer.fd != -1) {
955         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
956     }
957 }
958 
959 /**
960  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2600
961  * @tc.name: testNnrtGetSupportedOperationV2_023
962  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_MAX_POOL_FUSION
963  */
964 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_023, Function | MediumTest | Level1)
965 {
966     OH_NNModel *model = nullptr;
967     HDICommon::BuildAddGraph(&model);
968     ASSERT_NE(model, nullptr);
969 
970     V2_0::Model *iModel = nullptr;
971     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
972     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
973 
974     for (auto &node : iModel->nodes) {
975         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_MAX_POOL_FUSION);
976     }
977 
978     std::vector<bool> supportedOperations;
979     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
980     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
981         EXPECT_EQ(true, supportedOperations[i]);
982     }
983 
984     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
985     if (tensorBuffer.fd != -1) {
986         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
987     }
988 }
989 
990 /**
991  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2700
992  * @tc.name: testNnrtGetSupportedOperationV2_024
993  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_MUL_FUSION
994  */
995 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_024, Function | MediumTest | Level1)
996 {
997     OH_NNModel *model = nullptr;
998     HDICommon::BuildAddGraph(&model);
999     ASSERT_NE(model, nullptr);
1000 
1001     V2_0::Model *iModel = nullptr;
1002     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1003     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1004 
1005     for (auto &node : iModel->nodes) {
1006         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_MUL_FUSION);
1007     }
1008 
1009     std::vector<bool> supportedOperations;
1010     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1011     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1012         EXPECT_EQ(true, supportedOperations[i]);
1013     }
1014 
1015     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1016     if (tensorBuffer.fd != -1) {
1017         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1018     }
1019 }
1020 
1021 /**
1022  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2800
1023  * @tc.name: testNnrtGetSupportedOperationV2_025
1024  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_ONE_HOT
1025  */
1026 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_025, Function | MediumTest | Level2)
1027 {
1028     OH_NNModel *model = nullptr;
1029     HDICommon::BuildAddGraph(&model);
1030     ASSERT_NE(model, nullptr);
1031 
1032     V2_0::Model *iModel = nullptr;
1033     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1034     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1035 
1036     for (auto &node : iModel->nodes) {
1037         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_ONE_HOT);
1038     }
1039 
1040     std::vector<bool> supportedOperations;
1041     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1042     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1043         EXPECT_EQ(false, supportedOperations[i]);
1044     }
1045 
1046     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1047     if (tensorBuffer.fd != -1) {
1048         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1049     }
1050 }
1051 
1052 /**
1053  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_2900
1054  * @tc.name: testNnrtGetSupportedOperationV2_026
1055  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_PAD_FUSION
1056  */
1057 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_026, Function | MediumTest | Level2)
1058 {
1059     OH_NNModel *model = nullptr;
1060     HDICommon::BuildAddGraph(&model);
1061     ASSERT_NE(model, nullptr);
1062 
1063     V2_0::Model *iModel = nullptr;
1064     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1065     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1066 
1067     for (auto &node : iModel->nodes) {
1068         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_PAD_FUSION);
1069     }
1070 
1071     std::vector<bool> supportedOperations;
1072     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1073     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1074         EXPECT_EQ(false, supportedOperations[i]);
1075     }
1076 
1077     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1078     if (tensorBuffer.fd != -1) {
1079         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1080     }
1081 }
1082 
1083 /**
1084  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3000
1085  * @tc.name: testNnrtGetSupportedOperationV2_027
1086  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_POW_FUSION
1087  */
1088 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_027, Function | MediumTest | Level2)
1089 {
1090     OH_NNModel *model = nullptr;
1091     HDICommon::BuildAddGraph(&model);
1092     ASSERT_NE(model, nullptr);
1093 
1094     V2_0::Model *iModel = nullptr;
1095     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1096     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1097 
1098     for (auto &node : iModel->nodes) {
1099         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_POW_FUSION);
1100     }
1101 
1102     std::vector<bool> supportedOperations;
1103     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1104     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1105         EXPECT_EQ(false, supportedOperations[i]);
1106     }
1107 
1108     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1109     if (tensorBuffer.fd != -1) {
1110         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1111     }
1112 }
1113 
1114 /**
1115  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3100
1116  * @tc.name: testNnrtGetSupportedOperationV2_028
1117  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_PRELU_FUSION
1118  */
1119 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_028, Function | MediumTest | Level2)
1120 {
1121     OH_NNModel *model = nullptr;
1122     HDICommon::BuildAddGraph(&model);
1123     ASSERT_NE(model, nullptr);
1124 
1125     V2_0::Model *iModel = nullptr;
1126     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1127     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1128 
1129     for (auto &node : iModel->nodes) {
1130         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_PRELU_FUSION);
1131     }
1132 
1133     std::vector<bool> supportedOperations;
1134     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1135     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1136         EXPECT_EQ(false, supportedOperations[i]);
1137     }
1138 
1139     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1140     if (tensorBuffer.fd != -1) {
1141         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1142     }
1143 }
1144 
1145 /**
1146  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3200
1147  * @tc.name: testNnrtGetSupportedOperationV2_029
1148  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_QUANT_DTYPE_CAST
1149  */
1150 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_029, Function | MediumTest | Level1)
1151 {
1152     OH_NNModel *model = nullptr;
1153     HDICommon::BuildAddGraph(&model);
1154     ASSERT_NE(model, nullptr);
1155 
1156     V2_0::Model *iModel = nullptr;
1157     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1158     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1159 
1160     for (auto &node : iModel->nodes) {
1161         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_QUANT_DTYPE_CAST);
1162     }
1163 
1164     std::vector<bool> supportedOperations;
1165     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1166     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1167         EXPECT_EQ(true, supportedOperations[i]);
1168     }
1169 
1170     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1171     if (tensorBuffer.fd != -1) {
1172         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1173     }
1174 }
1175 
1176 /**
1177  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3300
1178  * @tc.name: testNnrtGetSupportedOperationV2_030
1179  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_REDUCE_FUSION
1180  */
1181 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_030, Function | MediumTest | Level2)
1182 {
1183     OH_NNModel *model = nullptr;
1184     HDICommon::BuildAddGraph(&model);
1185     ASSERT_NE(model, nullptr);
1186 
1187     V2_0::Model *iModel = nullptr;
1188     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1189     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1190 
1191     for (auto &node : iModel->nodes) {
1192         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_REDUCE_FUSION);
1193     }
1194 
1195     std::vector<bool> supportedOperations;
1196     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1197     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1198         EXPECT_EQ(false, supportedOperations[i]);
1199     }
1200 
1201     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1202     if (tensorBuffer.fd != -1) {
1203         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1204     }
1205 }
1206 
1207 /**
1208  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3400
1209  * @tc.name: testNnrtGetSupportedOperationV2_031
1210  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_RESHAPE
1211  */
1212 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_031, Function | MediumTest | Level1)
1213 {
1214     OH_NNModel *model = nullptr;
1215     HDICommon::BuildAddGraph(&model);
1216     ASSERT_NE(model, nullptr);
1217 
1218     V2_0::Model *iModel = nullptr;
1219     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1220     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1221 
1222     for (auto &node : iModel->nodes) {
1223         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_RESHAPE);
1224     }
1225 
1226     std::vector<bool> supportedOperations;
1227     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1228     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1229         EXPECT_EQ(true, supportedOperations[i]);
1230     }
1231 
1232     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1233     if (tensorBuffer.fd != -1) {
1234         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1235     }
1236 }
1237 
1238 /**
1239  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3500
1240  * @tc.name: testNnrtGetSupportedOperationV2_032
1241  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_RESIZE
1242  */
1243 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_032, Function | MediumTest | Level2)
1244 {
1245     OH_NNModel *model = nullptr;
1246     HDICommon::BuildAddGraph(&model);
1247     ASSERT_NE(model, nullptr);
1248 
1249     V2_0::Model *iModel = nullptr;
1250     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1251     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1252 
1253     for (auto &node : iModel->nodes) {
1254         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_RESIZE);
1255     }
1256 
1257     std::vector<bool> supportedOperations;
1258     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1259     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1260         EXPECT_EQ(false, supportedOperations[i]);
1261     }
1262 
1263     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1264     if (tensorBuffer.fd != -1) {
1265         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1266     }
1267 }
1268 
1269 /**
1270  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3600
1271  * @tc.name: testNnrtGetSupportedOperationV2_033
1272  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_RSQRT
1273  */
1274 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_033, Function | MediumTest | Level2)
1275 {
1276     OH_NNModel *model = nullptr;
1277     HDICommon::BuildAddGraph(&model);
1278     ASSERT_NE(model, nullptr);
1279 
1280     V2_0::Model *iModel = nullptr;
1281     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1282     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1283 
1284     for (auto &node : iModel->nodes) {
1285         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_RSQRT);
1286     }
1287 
1288     std::vector<bool> supportedOperations;
1289     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1290     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1291         EXPECT_EQ(false, supportedOperations[i]);
1292     }
1293 
1294     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1295     if (tensorBuffer.fd != -1) {
1296         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1297     }
1298 }
1299 
1300 /**
1301  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3700
1302  * @tc.name: testNnrtGetSupportedOperationV2_034
1303  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SCALE_FUSION
1304  */
1305 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_034, Function | MediumTest | Level1)
1306 {
1307     OH_NNModel *model = nullptr;
1308     HDICommon::BuildAddGraph(&model);
1309     ASSERT_NE(model, nullptr);
1310 
1311     V2_0::Model *iModel = nullptr;
1312     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1313     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1314 
1315     for (auto &node : iModel->nodes) {
1316         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SCALE_FUSION);
1317     }
1318 
1319     std::vector<bool> supportedOperations;
1320     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1321     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1322         EXPECT_EQ(true, supportedOperations[i]);
1323     }
1324 
1325     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1326     if (tensorBuffer.fd != -1) {
1327         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1328     }
1329 }
1330 
1331 /**
1332  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3800
1333  * @tc.name: testNnrtGetSupportedOperationV2_035
1334  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SHAPE
1335  */
1336 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_035, Function | MediumTest | Level2)
1337 {
1338     OH_NNModel *model = nullptr;
1339     HDICommon::BuildAddGraph(&model);
1340     ASSERT_NE(model, nullptr);
1341 
1342     V2_0::Model *iModel = nullptr;
1343     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1344     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1345 
1346     for (auto &node : iModel->nodes) {
1347         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SHAPE);
1348     }
1349 
1350     std::vector<bool> supportedOperations;
1351     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1352     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1353         EXPECT_EQ(false, supportedOperations[i]);
1354     }
1355 
1356     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1357     if (tensorBuffer.fd != -1) {
1358         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1359     }
1360 }
1361 
1362 /**
1363  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_3900
1364  * @tc.name: testNnrtGetSupportedOperationV2_036
1365  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SLICE_FUSION
1366  */
1367 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_036, Function | MediumTest | Level2)
1368 {
1369     OH_NNModel *model = nullptr;
1370     HDICommon::BuildAddGraph(&model);
1371     ASSERT_NE(model, nullptr);
1372 
1373     V2_0::Model *iModel = nullptr;
1374     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1375     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1376 
1377     for (auto &node : iModel->nodes) {
1378         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SLICE_FUSION);
1379     }
1380 
1381     std::vector<bool> supportedOperations;
1382     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1383     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1384         EXPECT_EQ(false, supportedOperations[i]);
1385     }
1386 
1387     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1388     if (tensorBuffer.fd != -1) {
1389         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1390     }
1391 }
1392 
1393 /**
1394  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4000
1395  * @tc.name: testNnrtGetSupportedOperationV2_037
1396  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SOFTMAX
1397  */
1398 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_037, Function | MediumTest | Level1)
1399 {
1400     OH_NNModel *model = nullptr;
1401     HDICommon::BuildAddGraph(&model);
1402     ASSERT_NE(model, nullptr);
1403 
1404     V2_0::Model *iModel = nullptr;
1405     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1406     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1407 
1408     for (auto &node : iModel->nodes) {
1409         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SOFTMAX);
1410     }
1411 
1412     std::vector<bool> supportedOperations;
1413     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1414     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1415         EXPECT_EQ(true, supportedOperations[i]);
1416     }
1417 
1418     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1419     if (tensorBuffer.fd != -1) {
1420         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1421     }
1422 }
1423 
1424 /**
1425  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4100
1426  * @tc.name: testNnrtGetSupportedOperationV2_038
1427  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SPACE_TO_BATCH_ND
1428  */
1429 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_038, Function | MediumTest | Level2)
1430 {
1431     OH_NNModel *model = nullptr;
1432     HDICommon::BuildAddGraph(&model);
1433     ASSERT_NE(model, nullptr);
1434 
1435     V2_0::Model *iModel = nullptr;
1436     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1437     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1438 
1439     for (auto &node : iModel->nodes) {
1440         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SPACE_TO_BATCH_ND);
1441     }
1442 
1443     std::vector<bool> supportedOperations;
1444     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1445     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1446         EXPECT_EQ(false, supportedOperations[i]);
1447     }
1448 
1449     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1450     if (tensorBuffer.fd != -1) {
1451         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1452     }
1453 }
1454 
1455 /**
1456  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4200
1457  * @tc.name: testNnrtGetSupportedOperationV2_039
1458  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SPLIT
1459  */
1460 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_039, Function | MediumTest | Level2)
1461 {
1462     OH_NNModel *model = nullptr;
1463     HDICommon::BuildAddGraph(&model);
1464     ASSERT_NE(model, nullptr);
1465 
1466     V2_0::Model *iModel = nullptr;
1467     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1468     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1469 
1470     for (auto &node : iModel->nodes) {
1471         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SPLIT);
1472     }
1473 
1474     std::vector<bool> supportedOperations;
1475     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1476     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1477         EXPECT_EQ(false, supportedOperations[i]);
1478     }
1479 
1480     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1481     if (tensorBuffer.fd != -1) {
1482         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1483     }
1484 }
1485 
1486 /**
1487  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4300
1488  * @tc.name: testNnrtGetSupportedOperationV2_040
1489  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SQRT
1490  */
1491 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_040, Function | MediumTest | Level2)
1492 {
1493     OH_NNModel *model = nullptr;
1494     HDICommon::BuildAddGraph(&model);
1495     ASSERT_NE(model, nullptr);
1496 
1497     V2_0::Model *iModel = nullptr;
1498     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1499     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1500 
1501     for (auto &node : iModel->nodes) {
1502         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SQRT);
1503     }
1504 
1505     std::vector<bool> supportedOperations;
1506     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1507     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1508         EXPECT_EQ(false, supportedOperations[i]);
1509     }
1510 
1511     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1512     if (tensorBuffer.fd != -1) {
1513         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1514     }
1515 }
1516 
1517 /**
1518  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4400
1519  * @tc.name: testNnrtGetSupportedOperationV2_041
1520  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SQUEEZE
1521  */
1522 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_041, Function | MediumTest | Level2)
1523 {
1524     OH_NNModel *model = nullptr;
1525     HDICommon::BuildAddGraph(&model);
1526     ASSERT_NE(model, nullptr);
1527 
1528     V2_0::Model *iModel = nullptr;
1529     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1530     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1531 
1532     for (auto &node : iModel->nodes) {
1533         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SQUEEZE);
1534     }
1535 
1536     std::vector<bool> supportedOperations;
1537     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1538     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1539         EXPECT_EQ(false, supportedOperations[i]);
1540     }
1541 
1542     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1543     if (tensorBuffer.fd != -1) {
1544         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1545     }
1546 }
1547 
1548 /**
1549  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4500
1550  * @tc.name: testNnrtGetSupportedOperationV2_042
1551  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SQUARED_DIFFERENCE
1552  */
1553 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_042, Function | MediumTest | Level2)
1554 {
1555     OH_NNModel *model = nullptr;
1556     HDICommon::BuildAddGraph(&model);
1557     ASSERT_NE(model, nullptr);
1558 
1559     V2_0::Model *iModel = nullptr;
1560     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1561     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1562 
1563     for (auto &node : iModel->nodes) {
1564         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SQUARED_DIFFERENCE);
1565     }
1566 
1567     std::vector<bool> supportedOperations;
1568     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1569     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1570         EXPECT_EQ(false, supportedOperations[i]);
1571     }
1572 
1573     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1574     if (tensorBuffer.fd != -1) {
1575         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1576     }
1577 }
1578 
1579 /**
1580  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4600
1581  * @tc.name: testNnrtGetSupportedOperationV2_043
1582  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_STACK
1583  */
1584 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_043, Function | MediumTest | Level2)
1585 {
1586     OH_NNModel *model = nullptr;
1587     HDICommon::BuildAddGraph(&model);
1588     ASSERT_NE(model, nullptr);
1589 
1590     V2_0::Model *iModel = nullptr;
1591     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1592     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1593 
1594     for (auto &node : iModel->nodes) {
1595         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_STACK);
1596     }
1597 
1598     std::vector<bool> supportedOperations;
1599     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1600     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1601         EXPECT_EQ(false, supportedOperations[i]);
1602     }
1603 
1604     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1605     if (tensorBuffer.fd != -1) {
1606         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1607     }
1608 }
1609 
1610 /**
1611  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4700
1612  * @tc.name: testNnrtGetSupportedOperationV2_044
1613  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_STRIDED_SLICE
1614  */
1615 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_044, Function | MediumTest | Level2)
1616 {
1617     OH_NNModel *model = nullptr;
1618     HDICommon::BuildAddGraph(&model);
1619     ASSERT_NE(model, nullptr);
1620 
1621     V2_0::Model *iModel = nullptr;
1622     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1623     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1624 
1625     for (auto &node : iModel->nodes) {
1626         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_STRIDED_SLICE);
1627     }
1628 
1629     std::vector<bool> supportedOperations;
1630     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1631     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1632         EXPECT_EQ(false, supportedOperations[i]);
1633     }
1634 
1635     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1636     if (tensorBuffer.fd != -1) {
1637         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1638     }
1639 }
1640 
1641 /**
1642  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4800
1643  * @tc.name: testNnrtGetSupportedOperationV2_045
1644  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_SUB_FUSION
1645  */
1646 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_045, Function | MediumTest | Level2)
1647 {
1648     OH_NNModel *model = nullptr;
1649     HDICommon::BuildAddGraph(&model);
1650     ASSERT_NE(model, nullptr);
1651 
1652     V2_0::Model *iModel = nullptr;
1653     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1654     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1655 
1656     for (auto &node : iModel->nodes) {
1657         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_SUB_FUSION);
1658     }
1659 
1660     std::vector<bool> supportedOperations;
1661     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1662     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1663         EXPECT_EQ(false, supportedOperations[i]);
1664     }
1665 
1666     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1667     if (tensorBuffer.fd != -1) {
1668         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1669     }
1670 }
1671 
1672 /**
1673  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_4900
1674  * @tc.name: testNnrtGetSupportedOperationV2_046
1675  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_TILE_FUSION
1676  */
1677 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_046, Function | MediumTest | Level2)
1678 {
1679     OH_NNModel *model = nullptr;
1680     HDICommon::BuildAddGraph(&model);
1681     ASSERT_NE(model, nullptr);
1682 
1683     V2_0::Model *iModel = nullptr;
1684     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1685     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1686 
1687     for (auto &node : iModel->nodes) {
1688         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_TILE_FUSION);
1689     }
1690 
1691     std::vector<bool> supportedOperations;
1692     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1693     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1694         EXPECT_EQ(false, supportedOperations[i]);
1695     }
1696 
1697     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1698     if (tensorBuffer.fd != -1) {
1699         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1700     }
1701 }
1702 
1703 /**
1704  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5000
1705  * @tc.name: testNnrtGetSupportedOperationV2_047
1706  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_TOPK_FUSION
1707  */
1708 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_047, Function | MediumTest | Level2)
1709 {
1710     OH_NNModel *model = nullptr;
1711     HDICommon::BuildAddGraph(&model);
1712     ASSERT_NE(model, nullptr);
1713 
1714     V2_0::Model *iModel = nullptr;
1715     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1716     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1717 
1718     for (auto &node : iModel->nodes) {
1719         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_TOPK_FUSION);
1720     }
1721 
1722     std::vector<bool> supportedOperations;
1723     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1724     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1725         EXPECT_EQ(false, supportedOperations[i]);
1726     }
1727 
1728     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1729     if (tensorBuffer.fd != -1) {
1730         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1731     }
1732 }
1733 
1734 /**
1735  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5100
1736  * @tc.name: testNnrtGetSupportedOperationV2_048
1737  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_TRANSPOSE
1738  */
1739 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_048, Function | MediumTest | Level2)
1740 {
1741     OH_NNModel *model = nullptr;
1742     HDICommon::BuildAddGraph(&model);
1743     ASSERT_NE(model, nullptr);
1744 
1745     V2_0::Model *iModel = nullptr;
1746     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1747     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1748 
1749     for (auto &node : iModel->nodes) {
1750         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_TRANSPOSE);
1751     }
1752 
1753     std::vector<bool> supportedOperations;
1754     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1755     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1756         EXPECT_EQ(false, supportedOperations[i]);
1757     }
1758 
1759     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1760     if (tensorBuffer.fd != -1) {
1761         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1762     }
1763 }
1764 
1765 /**
1766  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5200
1767  * @tc.name: testNnrtGetSupportedOperationV2_049
1768  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeType is NODE_TYPE_UNSQUEEZE
1769  */
1770 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_049, Function | MediumTest | Level2)
1771 {
1772     OH_NNModel *model = nullptr;
1773     HDICommon::BuildAddGraph(&model);
1774     ASSERT_NE(model, nullptr);
1775 
1776     V2_0::Model *iModel = nullptr;
1777     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1778     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1779 
1780     for (auto &node : iModel->nodes) {
1781         node.nodeType = static_cast<V2_0::NodeType>(mindspore::lite::NODE_TYPE_UNSQUEEZE);
1782     }
1783 
1784     std::vector<bool> supportedOperations;
1785     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1786     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1787         EXPECT_EQ(false, supportedOperations[i]);
1788     }
1789 
1790     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1791     if (tensorBuffer.fd != -1) {
1792         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1793     }
1794 }
1795 
1796 /**
1797  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5300
1798  * @tc.name: testNnrtGetSupportedOperationV2_050
1799  * @tc.desc: Call function V2 GetSupportedOperation, node.quantType is QUANT_TYPE_NONE
1800  */
1801 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_050, Function | MediumTest | Level1)
1802 {
1803     OH_NNModel *model = nullptr;
1804     HDICommon::BuildAddGraph(&model);
1805     ASSERT_NE(model, nullptr);
1806 
1807     V2_0::Model *iModel = nullptr;
1808     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1809     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1810 
1811     for (auto &node : iModel->nodes) {
1812         node.quantType = static_cast<V2_0::QuantType>(mindspore::lite::QUANT_TYPE_NONE);
1813     }
1814 
1815     std::vector<bool> supportedOperations;
1816     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1817     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1818         EXPECT_EQ(true, supportedOperations[i]);
1819     }
1820 
1821     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1822     if (tensorBuffer.fd != -1) {
1823         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1824     }
1825 }
1826 
1827 /**
1828  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5400
1829  * @tc.name: testNnrtGetSupportedOperationV2_051
1830  * @tc.desc: Call function V2 GetSupportedOperation, node.quantType is QUANT_TYPE_ALL
1831  */
1832 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_051, Function | MediumTest | Level1)
1833 {
1834     OH_NNModel *model = nullptr;
1835     HDICommon::BuildAddGraph(&model);
1836     ASSERT_NE(model, nullptr);
1837 
1838     V2_0::Model *iModel = nullptr;
1839     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1840     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1841 
1842     for (auto &node : iModel->nodes) {
1843         node.quantType = static_cast<V2_0::QuantType>(mindspore::lite::QUANT_TYPE_ALL);
1844     }
1845 
1846     std::vector<bool> supportedOperations;
1847     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1848     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1849         EXPECT_EQ(true, supportedOperations[i]);
1850     }
1851 
1852     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1853     if (tensorBuffer.fd != -1) {
1854         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1855     }
1856 }
1857 
1858 /**
1859  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5500
1860  * @tc.name: testNnrtGetSupportedOperationV2_052
1861  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_UNKNOWN
1862  */
1863 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_052, Function | MediumTest | Level1)
1864 {
1865     OH_NNModel *model = nullptr;
1866     HDICommon::BuildAddGraph(&model);
1867     ASSERT_NE(model, nullptr);
1868 
1869     V2_0::Model *iModel = nullptr;
1870     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1871     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1872 
1873     for (auto &tensor : iModel->allTensors) {
1874         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_UNKNOWN);
1875     }
1876 
1877     std::vector<bool> supportedOperations;
1878     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1879     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1880         EXPECT_EQ(true, supportedOperations[i]);
1881     }
1882 
1883     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1884     if (tensorBuffer.fd != -1) {
1885         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1886     }
1887 }
1888 
1889 /**
1890  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5600
1891  * @tc.name: testNnrtGetSupportedOperationV2_053
1892  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_BOOL
1893  */
1894 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_053, Function | MediumTest | Level2)
1895 {
1896     OH_NNModel *model = nullptr;
1897     HDICommon::BuildAddGraph(&model);
1898     ASSERT_NE(model, nullptr);
1899 
1900     V2_0::Model *iModel = nullptr;
1901     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1902     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1903 
1904     for (auto &tensor : iModel->allTensors) {
1905         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_BOOL);
1906     }
1907 
1908     std::vector<bool> supportedOperations;
1909     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1910     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1911         EXPECT_EQ(true, supportedOperations[i]);
1912     }
1913 
1914     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1915     if (tensorBuffer.fd != -1) {
1916         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1917     }
1918 }
1919 
1920 /**
1921  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5700
1922  * @tc.name: testNnrtGetSupportedOperationV2_054
1923  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_INT8
1924  */
1925 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_054, Function | MediumTest | Level1)
1926 {
1927     OH_NNModel *model = nullptr;
1928     HDICommon::BuildAddGraph(&model);
1929     ASSERT_NE(model, nullptr);
1930 
1931     V2_0::Model *iModel = nullptr;
1932     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1933     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1934 
1935     for (auto &tensor : iModel->allTensors) {
1936         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_INT8);
1937     }
1938 
1939     std::vector<bool> supportedOperations;
1940     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1941     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1942         EXPECT_EQ(true, supportedOperations[i]);
1943     }
1944 
1945     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1946     if (tensorBuffer.fd != -1) {
1947         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1948     }
1949 }
1950 
1951 /**
1952  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5800
1953  * @tc.name: testNnrtGetSupportedOperationV2_055
1954  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_INT16
1955  */
1956 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_055, Function | MediumTest | Level1)
1957 {
1958     OH_NNModel *model = nullptr;
1959     HDICommon::BuildAddGraph(&model);
1960     ASSERT_NE(model, nullptr);
1961 
1962     V2_0::Model *iModel = nullptr;
1963     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1964     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1965 
1966     for (auto &tensor : iModel->allTensors) {
1967         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_INT16);
1968     }
1969 
1970     std::vector<bool> supportedOperations;
1971     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
1972     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
1973         EXPECT_EQ(true, supportedOperations[i]);
1974     }
1975 
1976     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
1977     if (tensorBuffer.fd != -1) {
1978         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
1979     }
1980 }
1981 
1982 /**
1983  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_5900
1984  * @tc.name: testNnrtGetSupportedOperationV2_056
1985  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_INT32
1986  */
1987 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_056, Function | MediumTest | Level1)
1988 {
1989     OH_NNModel *model = nullptr;
1990     HDICommon::BuildAddGraph(&model);
1991     ASSERT_NE(model, nullptr);
1992 
1993     V2_0::Model *iModel = nullptr;
1994     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
1995     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
1996 
1997     for (auto &tensor : iModel->allTensors) {
1998         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_INT32);
1999     }
2000 
2001     std::vector<bool> supportedOperations;
2002     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2003     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2004         EXPECT_EQ(true, supportedOperations[i]);
2005     }
2006 
2007     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2008     if (tensorBuffer.fd != -1) {
2009         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2010     }
2011 }
2012 
2013 /**
2014  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6000
2015  * @tc.name: testNnrtGetSupportedOperationV2_057
2016  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_INT64
2017  */
2018 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_057, Function | MediumTest | Level1)
2019 {
2020     OH_NNModel *model = nullptr;
2021     HDICommon::BuildAddGraph(&model);
2022     ASSERT_NE(model, nullptr);
2023 
2024     V2_0::Model *iModel = nullptr;
2025     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2026     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2027 
2028     for (auto &tensor : iModel->allTensors) {
2029         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_INT64);
2030     }
2031 
2032     std::vector<bool> supportedOperations;
2033     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2034     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2035         EXPECT_EQ(true, supportedOperations[i]);
2036     }
2037 
2038     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2039     if (tensorBuffer.fd != -1) {
2040         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2041     }
2042 }
2043 
2044 /**
2045  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6100
2046  * @tc.name: testNnrtGetSupportedOperationV2_058
2047  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_UINT8
2048  */
2049 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_058, Function | MediumTest | Level1)
2050 {
2051     OH_NNModel *model = nullptr;
2052     HDICommon::BuildAddGraph(&model);
2053     ASSERT_NE(model, nullptr);
2054 
2055     V2_0::Model *iModel = nullptr;
2056     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2057     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2058 
2059     for (auto &tensor : iModel->allTensors) {
2060         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_UINT8);
2061     }
2062 
2063     std::vector<bool> supportedOperations;
2064     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2065     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2066         EXPECT_EQ(true, supportedOperations[i]);
2067     }
2068 
2069     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2070     if (tensorBuffer.fd != -1) {
2071         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2072     }
2073 }
2074 
2075 /**
2076  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6200
2077  * @tc.name: testNnrtGetSupportedOperationV2_059
2078  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_UINT16
2079  */
2080 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_059, Function | MediumTest | Level1)
2081 {
2082     OH_NNModel *model = nullptr;
2083     HDICommon::BuildAddGraph(&model);
2084     ASSERT_NE(model, nullptr);
2085 
2086     V2_0::Model *iModel = nullptr;
2087     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2088     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2089 
2090     for (auto &tensor : iModel->allTensors) {
2091         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_UINT16);
2092     }
2093 
2094     std::vector<bool> supportedOperations;
2095     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2096     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2097         EXPECT_EQ(true, supportedOperations[i]);
2098     }
2099 
2100     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2101     if (tensorBuffer.fd != -1) {
2102         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2103     }
2104 }
2105 
2106 /**
2107  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6300
2108  * @tc.name: testNnrtGetSupportedOperationV2_060
2109  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_UINT32
2110  */
2111 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_060, Function | MediumTest | Level1)
2112 {
2113     OH_NNModel *model = nullptr;
2114     HDICommon::BuildAddGraph(&model);
2115     ASSERT_NE(model, nullptr);
2116 
2117     V2_0::Model *iModel = nullptr;
2118     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2119     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2120 
2121     for (auto &tensor : iModel->allTensors) {
2122         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_UINT32);
2123     }
2124 
2125     std::vector<bool> supportedOperations;
2126     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2127     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2128         EXPECT_EQ(true, supportedOperations[i]);
2129     }
2130 
2131     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2132     if (tensorBuffer.fd != -1) {
2133         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2134     }
2135 }
2136 
2137 /**
2138  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6400
2139  * @tc.name: testNnrtGetSupportedOperationV2_061
2140  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_UINT64
2141  */
2142 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_061, Function | MediumTest | Level1)
2143 {
2144     OH_NNModel *model = nullptr;
2145     HDICommon::BuildAddGraph(&model);
2146     ASSERT_NE(model, nullptr);
2147 
2148     V2_0::Model *iModel = nullptr;
2149     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2150     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2151 
2152     for (auto &tensor : iModel->allTensors) {
2153         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_UINT64);
2154     }
2155 
2156     std::vector<bool> supportedOperations;
2157     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2158     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2159         EXPECT_EQ(true, supportedOperations[i]);
2160     }
2161 
2162     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2163     if (tensorBuffer.fd != -1) {
2164         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2165     }
2166 }
2167 
2168 /**
2169  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6500
2170  * @tc.name: testNnrtGetSupportedOperationV2_062
2171  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_FLOAT16
2172  */
2173 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_062, Function | MediumTest | Level1)
2174 {
2175     OH_NNModel *model = nullptr;
2176     HDICommon::BuildAddGraph(&model);
2177     ASSERT_NE(model, nullptr);
2178 
2179     V2_0::Model *iModel = nullptr;
2180     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2181     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2182 
2183     for (auto &tensor : iModel->allTensors) {
2184         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_FLOAT16);
2185     }
2186 
2187     std::vector<bool> supportedOperations;
2188     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2189     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2190         EXPECT_EQ(true, supportedOperations[i]);
2191     }
2192 
2193     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2194     if (tensorBuffer.fd != -1) {
2195         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2196     }
2197 }
2198 
2199 /**
2200  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6600
2201  * @tc.name: testNnrtGetSupportedOperationV2_063
2202  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_FLOAT32
2203  */
2204 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_063, Function | MediumTest | Level1)
2205 {
2206     OH_NNModel *model = nullptr;
2207     HDICommon::BuildAddGraph(&model);
2208     ASSERT_NE(model, nullptr);
2209 
2210     V2_0::Model *iModel = nullptr;
2211     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2212     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2213 
2214     for (auto &tensor : iModel->allTensors) {
2215         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_FLOAT32);
2216     }
2217 
2218     std::vector<bool> supportedOperations;
2219     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2220     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2221         EXPECT_EQ(true, supportedOperations[i]);
2222     }
2223 
2224     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2225     if (tensorBuffer.fd != -1) {
2226         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2227     }
2228 }
2229 
2230 /**
2231  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6700
2232  * @tc.name: testNnrtGetSupportedOperationV2_064
2233  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is DATA_TYPE_FLOAT64
2234  */
2235 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_064, Function | MediumTest | Level1)
2236 {
2237     OH_NNModel *model = nullptr;
2238     HDICommon::BuildAddGraph(&model);
2239     ASSERT_NE(model, nullptr);
2240 
2241     V2_0::Model *iModel = nullptr;
2242     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2243     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2244 
2245     for (auto &tensor : iModel->allTensors) {
2246         tensor.dataType = static_cast<V2_0::DataType>(mindspore::lite::DATA_TYPE_FLOAT64);
2247     }
2248 
2249     std::vector<bool> supportedOperations;
2250     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2251     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2252         EXPECT_EQ(true, supportedOperations[i]);
2253     }
2254 
2255     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2256     if (tensorBuffer.fd != -1) {
2257         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2258     }
2259 }
2260 
2261 /**
2262  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6800
2263  * @tc.name: testNnrtGetSupportedOperationV2_065
2264  * @tc.desc: Call function V2 GetSupportedOperation, tensor.format is FORMAT_NONE
2265  */
2266 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_065, Function | MediumTest | Level1)
2267 {
2268     OH_NNModel *model = nullptr;
2269     HDICommon::BuildAddGraph(&model);
2270     ASSERT_NE(model, nullptr);
2271 
2272     V2_0::Model *iModel = nullptr;
2273     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2274     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2275 
2276     for (auto &tensor : iModel->allTensors) {
2277         tensor.format = static_cast<V2_0::Format>(OHOS::HDI::Nnrt::V2_0::FORMAT_NONE);
2278     }
2279 
2280     std::vector<bool> supportedOperations;
2281     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2282     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2283         EXPECT_EQ(true, supportedOperations[i]);
2284     }
2285 
2286     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2287     if (tensorBuffer.fd != -1) {
2288         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2289     }
2290 }
2291 
2292 /**
2293  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_6900
2294  * @tc.name: testNnrtGetSupportedOperationV2_066
2295  * @tc.desc: Call function V2 GetSupportedOperation, tensor.format is FORMAT_NCHW
2296  */
2297 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_066, Function | MediumTest | Level1)
2298 {
2299     OH_NNModel *model = nullptr;
2300     HDICommon::BuildAddGraph(&model);
2301     ASSERT_NE(model, nullptr);
2302 
2303     V2_0::Model *iModel = nullptr;
2304     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2305     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2306 
2307     for (auto &tensor : iModel->allTensors) {
2308         tensor.format = static_cast<V2_0::Format>(mindspore::lite::FORMAT_NCHW);
2309     }
2310 
2311     std::vector<bool> supportedOperations;
2312     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2313     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2314         EXPECT_EQ(true, supportedOperations[i]);
2315     }
2316 
2317     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2318     if (tensorBuffer.fd != -1) {
2319         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2320     }
2321 }
2322 
2323 /**
2324  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7000
2325  * @tc.name: testNnrtGetSupportedOperationV2_067
2326  * @tc.desc: Call function V2 GetSupportedOperation, tensor.format is FORMAT_NHWC
2327  */
2328 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_067, Function | MediumTest | Level1)
2329 {
2330     OH_NNModel *model = nullptr;
2331     HDICommon::BuildAddGraph(&model);
2332     ASSERT_NE(model, nullptr);
2333 
2334     V2_0::Model *iModel = nullptr;
2335     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2336     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2337 
2338     for (auto &tensor : iModel->allTensors) {
2339         tensor.format = static_cast<V2_0::Format>(mindspore::lite::FORMAT_NHWC);
2340     }
2341 
2342     std::vector<bool> supportedOperations;
2343     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2344     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2345         EXPECT_EQ(true, supportedOperations[i]);
2346     }
2347 
2348     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2349     if (tensorBuffer.fd != -1) {
2350         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2351     }
2352 }
2353 
2354 /**
2355  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7100
2356  * @tc.name: testNnrtGetSupportedOperationV2_068
2357  * @tc.desc: Call function V2 GetSupportedOperation, allTensors is empty
2358  */
2359 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_068, Function | MediumTest | Level1)
2360 {
2361     OH_NNModel *model = nullptr;
2362     HDICommon::BuildAddGraph(&model);
2363     ASSERT_NE(model, nullptr);
2364 
2365     V2_0::Model *iModel = nullptr;
2366     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2367     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2368 
2369     iModel->allTensors = {};
2370 
2371     std::vector<bool> supportedOperations;
2372     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2373 
2374     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2375     if (tensorBuffer.fd != -1) {
2376         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2377     }
2378 }
2379 
2380 /**
2381  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7200
2382  * @tc.name: testNnrtGetSupportedOperationV2_069
2383  * @tc.desc: Call function V2 GetSupportedOperation, subGraph is empty
2384  */
2385 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_069, Function | MediumTest | Level1)
2386 {
2387     OH_NNModel *model = nullptr;
2388     HDICommon::BuildAddGraph(&model);
2389     ASSERT_NE(model, nullptr);
2390 
2391     V2_0::Model *iModel = nullptr;
2392     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2393     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2394 
2395     iModel->subGraph = {};
2396 
2397     std::vector<bool> supportedOperations;
2398     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2399 
2400     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2401     if (tensorBuffer.fd != -1) {
2402         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2403     }
2404 }
2405 
2406 /**
2407  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7300
2408  * @tc.name: testNnrtGetSupportedOperationV2_070
2409  * @tc.desc: Call function V2 GetSupportedOperation, inputIndex is empty
2410  */
2411 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_070, Function | MediumTest | Level1)
2412 {
2413     OH_NNModel *model = nullptr;
2414     HDICommon::BuildAddGraph(&model);
2415     ASSERT_NE(model, nullptr);
2416 
2417     V2_0::Model *iModel = nullptr;
2418     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2419     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2420 
2421     iModel->inputIndex = {};
2422 
2423     std::vector<bool> supportedOperations;
2424     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2425 
2426     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2427     if (tensorBuffer.fd != -1) {
2428         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2429     }
2430 }
2431 
2432 /**
2433  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7400
2434  * @tc.name: testNnrtGetSupportedOperationV2_071
2435  * @tc.desc: Call function V2 GetSupportedOperation, outputIndex is empty
2436  */
2437 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_071, Function | MediumTest | Level1)
2438 {
2439     OH_NNModel *model = nullptr;
2440     HDICommon::BuildAddGraph(&model);
2441     ASSERT_NE(model, nullptr);
2442 
2443     V2_0::Model *iModel = nullptr;
2444     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2445     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2446 
2447     iModel->outputIndex = {};
2448     std::vector<bool> supportedOperations;
2449     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2450 
2451     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2452     if (tensorBuffer.fd != -1) {
2453         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2454     }
2455 }
2456 
2457 /**
2458  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7500
2459  * @tc.name: testNnrtGetSupportedOperationV2_072
2460  * @tc.desc: Call function V2 GetSupportedOperation, node.quantType is 10000
2461  */
2462 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_072, Function | MediumTest | Level1)
2463 {
2464     OH_NNModel *model = nullptr;
2465     HDICommon::BuildAddGraph(&model);
2466     ASSERT_NE(model, nullptr);
2467 
2468     V2_0::Model *iModel = nullptr;
2469     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2470     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2471 
2472     for (auto &node : iModel->nodes) {
2473         node.quantType = static_cast<V2_0::QuantType>(100000);
2474     }
2475 
2476     std::vector<bool> supportedOperations;
2477     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2478     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2479         EXPECT_EQ(true, supportedOperations[i]);
2480     }
2481 
2482     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2483     if (tensorBuffer.fd != -1) {
2484         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2485     }
2486 }
2487 
2488 /**
2489  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7600
2490  * @tc.name: testNnrtGetSupportedOperationV2_073
2491  * @tc.desc: Call function V2 GetSupportedOperation, node.quantType is -1
2492  */
2493 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_073, Function | MediumTest | Level2)
2494 {
2495     OH_NNModel *model = nullptr;
2496     HDICommon::BuildAddGraph(&model);
2497     ASSERT_NE(model, nullptr);
2498 
2499     V2_0::Model *iModel = nullptr;
2500     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2501     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2502 
2503     for (auto &node : iModel->nodes) {
2504         node.quantType = static_cast<V2_0::QuantType>(-1);
2505     }
2506 
2507     std::vector<bool> supportedOperations;
2508     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2509     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2510         EXPECT_EQ(true, supportedOperations[i]);
2511     }
2512 
2513     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2514     if (tensorBuffer.fd != -1) {
2515         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2516     }
2517 }
2518 
2519 /**
2520  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7700
2521  * @tc.name: testNnrtGetSupportedOperationV2_074
2522  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is -1
2523  */
2524 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_074, Function | MediumTest | Level2)
2525 {
2526     OH_NNModel *model = nullptr;
2527     HDICommon::BuildAddGraph(&model);
2528     ASSERT_NE(model, nullptr);
2529 
2530     V2_0::Model *iModel = nullptr;
2531     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2532     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2533 
2534     for (auto &tensor : iModel->allTensors) {
2535         tensor.dataType = static_cast<V2_0::DataType>(-1);
2536     }
2537 
2538     std::vector<bool> supportedOperations;
2539     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2540     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2541         EXPECT_EQ(true, supportedOperations[i]);
2542     }
2543 
2544     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2545     if (tensorBuffer.fd != -1) {
2546         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2547     }
2548 }
2549 
2550 /**
2551  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7800
2552  * @tc.name: testNnrtGetSupportedOperationV2_075
2553  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dataType is 10000
2554  */
2555 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_075, Function | MediumTest | Level2)
2556 {
2557     OH_NNModel *model = nullptr;
2558     HDICommon::BuildAddGraph(&model);
2559     ASSERT_NE(model, nullptr);
2560 
2561     V2_0::Model *iModel = nullptr;
2562     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2563     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2564 
2565     for (auto &tensor : iModel->allTensors) {
2566         tensor.dataType = static_cast<V2_0::DataType>(10000);
2567     }
2568 
2569     std::vector<bool> supportedOperations;
2570     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2571     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2572         EXPECT_EQ(true, supportedOperations[i]);
2573     }
2574 
2575     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2576     if (tensorBuffer.fd != -1) {
2577         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2578     }
2579 }
2580 
2581 /**
2582  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_7900
2583  * @tc.name: testNnrtGetSupportedOperationV2_076
2584  * @tc.desc: Call function V2 GetSupportedOperation, tensor.format is -1
2585  */
2586 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_076, Function | MediumTest | Level2)
2587 {
2588     OH_NNModel *model = nullptr;
2589     HDICommon::BuildAddGraph(&model);
2590     ASSERT_NE(model, nullptr);
2591 
2592     V2_0::Model *iModel = nullptr;
2593     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2594     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2595 
2596     for (auto &tensor : iModel->allTensors) {
2597         tensor.format = static_cast<V2_0::Format>(-1);
2598     }
2599 
2600     std::vector<bool> supportedOperations;
2601     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2602     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2603         EXPECT_EQ(true, supportedOperations[i]);
2604     }
2605 
2606     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2607     if (tensorBuffer.fd != -1) {
2608         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2609     }
2610 }
2611 
2612 /**
2613  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8000
2614  * @tc.name: testNnrtGetSupportedOperationV2_077
2615  * @tc.desc: Call function V2 GetSupportedOperation, tensor.format is 10000
2616  */
2617 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_077, Function | MediumTest | Level2)
2618 {
2619     OH_NNModel *model = nullptr;
2620     HDICommon::BuildAddGraph(&model);
2621     ASSERT_NE(model, nullptr);
2622 
2623     V2_0::Model *iModel = nullptr;
2624     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2625     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2626 
2627     for (auto &tensor : iModel->allTensors) {
2628         tensor.format = static_cast<V2_0::Format>(10000);
2629     }
2630 
2631     std::vector<bool> supportedOperations;
2632     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2633     for (uint32_t i = 0; i < supportedOperations.size(); i++) {
2634         EXPECT_EQ(true, supportedOperations[i]);
2635     }
2636 
2637     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2638     if (tensorBuffer.fd != -1) {
2639         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2640     }
2641 }
2642 
2643 /**
2644  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8100
2645  * @tc.name: testNnrtGetSupportedOperationV2_078
2646  * @tc.desc: Call function V2 GetSupportedOperation, tensor.dims is empty
2647  */
2648 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_078, Function | MediumTest | Level2)
2649 {
2650     OH_NNModel *model = nullptr;
2651     HDICommon::BuildAddGraph(&model);
2652     ASSERT_NE(model, nullptr);
2653 
2654     V2_0::Model *iModel = nullptr;
2655     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2656     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2657 
2658     for (auto &tensor : iModel->allTensors) {
2659         tensor.dims = {};
2660     }
2661 
2662     std::vector<bool> supportedOperations;
2663     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2664 
2665     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2666     if (tensorBuffer.fd != -1) {
2667         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2668     }
2669 }
2670 
2671 /**
2672  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8200
2673  * @tc.name: testNnrtGetSupportedOperationV2_079
2674  * @tc.desc: Call function V2 GetSupportedOperation, tensor.data is empty
2675  */
2676 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_079, Function | MediumTest | Level2)
2677 {
2678     OH_NNModel *model = nullptr;
2679     HDICommon::BuildAddGraph(&model);
2680     ASSERT_NE(model, nullptr);
2681 
2682     V2_0::Model *iModel = nullptr;
2683     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2684     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2685 
2686     for (auto &tensor : iModel->allTensors) {
2687         tensor.data = {};
2688     }
2689 
2690     std::vector<bool> supportedOperations;
2691     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2692 
2693     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2694     if (tensorBuffer.fd != -1) {
2695         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2696     }
2697 }
2698 
2699 /**
2700  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8300
2701  * @tc.name: testNnrtGetSupportedOperationV2_080
2702  * @tc.desc: Call function V2 GetSupportedOperation, tensor.quantParams is empty
2703  */
2704 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_080, Function | MediumTest | Level2)
2705 {
2706     OH_NNModel *model = nullptr;
2707     HDICommon::BuildAddGraph(&model);
2708     ASSERT_NE(model, nullptr);
2709 
2710     V2_0::Model *iModel = nullptr;
2711     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2712     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2713 
2714     for (auto &tensor : iModel->allTensors) {
2715         tensor.quantParams = {};
2716     }
2717 
2718     std::vector<bool> supportedOperations;
2719     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2720 
2721     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2722     if (tensorBuffer.fd != -1) {
2723         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2724     }
2725 }
2726 
2727 /**
2728  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8400
2729  * @tc.name: testNnrtGetSupportedOperationV2_081
2730  * @tc.desc: Call function V2 GetSupportedOperation, node.nodeAttr is empty
2731  */
2732 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_081, Function | MediumTest | Level2)
2733 {
2734     OH_NNModel *model = nullptr;
2735     HDICommon::BuildAddGraph(&model);
2736     ASSERT_NE(model, nullptr);
2737 
2738     V2_0::Model *iModel = nullptr;
2739     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2740     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2741 
2742     for (auto &node : iModel->nodes) {
2743         node.nodeAttr = {};
2744     }
2745 
2746     std::vector<bool> supportedOperations;
2747     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2748 
2749     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2750     if (tensorBuffer.fd != -1) {
2751         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2752     }
2753 }
2754 
2755 /**
2756  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8500
2757  * @tc.name: testNnrtGetSupportedOperationV2_082
2758  * @tc.desc: Call function V2 GetSupportedOperation, node.inputIndex is empty
2759  */
2760 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_082, Function | MediumTest | Level2)
2761 {
2762     OH_NNModel *model = nullptr;
2763     HDICommon::BuildAddGraph(&model);
2764     ASSERT_NE(model, nullptr);
2765 
2766     V2_0::Model *iModel = nullptr;
2767     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2768     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2769 
2770     for (auto &node : iModel->nodes) {
2771         node.inputIndex = {};
2772     }
2773 
2774     std::vector<bool> supportedOperations;
2775     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2776 
2777     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2778     if (tensorBuffer.fd != -1) {
2779         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2780     }
2781 }
2782 
2783 /**
2784  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8600
2785  * @tc.name: testNnrtGetSupportedOperationV2_083
2786  * @tc.desc: Call function V2 GetSupportedOperation, node.outputIndex is empty
2787  */
2788 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_083, Function | MediumTest | Level2)
2789 {
2790     OH_NNModel *model = nullptr;
2791     HDICommon::BuildAddGraph(&model);
2792     ASSERT_NE(model, nullptr);
2793 
2794     V2_0::Model *iModel = nullptr;
2795     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2796     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2797 
2798     for (auto &node : iModel->nodes) {
2799         node.outputIndex = {};
2800     }
2801 
2802     std::vector<bool> supportedOperations;
2803     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2804 
2805     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2806     if (tensorBuffer.fd != -1) {
2807         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2808     }
2809 }
2810 
2811 /**
2812  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8700
2813  * @tc.name: testNnrtGetSupportedOperationV2_084
2814  * @tc.desc: Call function V2 GetSupportedOperation, subgraph.inputIndices is empty
2815  */
2816 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_084, Function | MediumTest | Level2)
2817 {
2818     OH_NNModel *model = nullptr;
2819     HDICommon::BuildAddGraph(&model);
2820     ASSERT_NE(model, nullptr);
2821 
2822     V2_0::Model *iModel = nullptr;
2823     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2824     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2825 
2826     for (auto &subgraph : iModel->subGraph) {
2827         subgraph.inputIndices = {};
2828     }
2829 
2830     std::vector<bool> supportedOperations;
2831     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2832 
2833     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2834     if (tensorBuffer.fd != -1) {
2835         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2836     }
2837 }
2838 
2839 /**
2840  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8800
2841  * @tc.name: testNnrtGetSupportedOperationV2_085
2842  * @tc.desc: Call function V2 GetSupportedOperation, subgraph.outputIndices is empty
2843  */
2844 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_085, Function | MediumTest | Level2)
2845 {
2846     OH_NNModel *model = nullptr;
2847     HDICommon::BuildAddGraph(&model);
2848     ASSERT_NE(model, nullptr);
2849 
2850     V2_0::Model *iModel = nullptr;
2851     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2852     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2853 
2854     for (auto &subgraph : iModel->subGraph) {
2855         subgraph.outputIndices = {};
2856     }
2857 
2858     std::vector<bool> supportedOperations;
2859     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2860 
2861     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2862     if (tensorBuffer.fd != -1) {
2863         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2864     }
2865 }
2866 
2867 /**
2868  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_GetSupportedOperationV2_8900
2869  * @tc.name: testNnrtGetSupportedOperationV2_086
2870  * @tc.desc: Call function V2 GetSupportedOperation, subgraph.nodeIndices is empty
2871  */
2872 HWTEST_F(DeviceTestAdditional, testNnrtGetSupportedOperationV2_086, Function | MediumTest | Level2)
2873 {
2874     OH_NNModel *model = nullptr;
2875     HDICommon::BuildAddGraph(&model);
2876     ASSERT_NE(model, nullptr);
2877 
2878     V2_0::Model *iModel = nullptr;
2879     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2880     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
2881 
2882     for (auto &subgraph : iModel->subGraph) {
2883         subgraph.nodeIndices = {};
2884     }
2885 
2886     std::vector<bool> supportedOperations;
2887     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
2888 
2889     OHOS::NeuralNetworkRuntime::V2::HDIModel_Destroy(&iModel);
2890     if (tensorBuffer.fd != -1) {
2891         EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
2892     }
2893 }
2894 
2895 /**
2896  * @tc.number: SUB_AI_Nnrt_Func_HDI_Device_AllocateBufferV2_0300
2897  * @tc.name: testNnrtAllocateBufferV2_001
2898  * @tc.desc: Call function V2 AllocateBuffer, tensorSize is -1
2899  */
2900 HWTEST_F(DeviceTestAdditional, testNnrtAllocateBufferV2_001, Function | MediumTest | Level2)
2901 {
2902     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2903     size_t tensorSize = -1;
2904     auto hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer);
2905     EXPECT_EQ(V2_0::NNRT_ReturnCode::NNRT_OUT_OF_MEMORY, hdiRet);
2906     EXPECT_FALSE(tensorBuffer.fd != NNRT_INVALID_FD && tensorBuffer.bufferSize == tensorSize)
2907         << tensorBuffer.fd << tensorBuffer.bufferSize;
2908 
2909     auto hdiRet1 = device_->ReleaseBuffer(tensorBuffer);
2910     EXPECT_FALSE(hdiRet1 == V2_0::NNRT_ReturnCode::NNRT_SUCCESS);
2911 }
2912 
2913 /**
2914  * @tc.number: SUB_AI_Nnrt_Stability_HDI_Device_ReleaseBufferV2_0300
2915  * @tc.name: testNnrtReleaseBufferV2_001
2916  * @tc.desc: Call function V2 ReleaseBuffer, stability test
2917  */
2918 HWTEST_F(DeviceTestAdditional, testNnrtReleaseBufferV2_001, Function | MediumTest | Level1)
2919 {
2920     V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
2921     size_t tensorSize = 1;
2922     auto hdiRet = 0;
2923     auto hdiRet1 = 0;
2924     for (int i = 0; i < 100; i++) {
2925         hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer);
2926         EXPECT_TRUE(hdiRet == V2_0::NNRT_ReturnCode::NNRT_SUCCESS) << hdiRet;
2927         EXPECT_TRUE(tensorBuffer.fd != NNRT_INVALID_FD && tensorBuffer.bufferSize == tensorSize)
2928             << tensorBuffer.fd << tensorBuffer.bufferSize;
2929 
2930         hdiRet1 = device_->ReleaseBuffer(tensorBuffer);
2931         EXPECT_TRUE(hdiRet1 == V2_0::NNRT_ReturnCode::NNRT_SUCCESS);
2932     }
2933 }