• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vector>
17 #include <v1_0/nnrt_types.h>
18 #include <v1_0/innrt_device.h>
19 #include <v1_0/iprepared_model.h>
20 
21 #include "gtest/gtest.h"
22 #include "mindir.h"
23 #include "mindir_lite_graph.h"
24 
25 #include "interfaces/kits/c/neural_network_runtime.h"
26 #include "common/hdi_nnrt_test_utils.h"
27 #include "common/hdi_nnrt_test.h"
28 
29 using namespace std;
30 using namespace testing::ext;
31 using namespace OHOS::NeuralNetworkRuntime;
32 using namespace OHOS::NeuralNetworkRuntime::Test;
33 
34 namespace {
35 
36 class ModelPrepareTest : public HDINNRtTest {};
37 
38 } // namespace
39 
40 /**
41  * @tc.number : SUB_AI_NNRt_Func_South_Model_ExportModelCache_0100
42  * @tc.name   : 模型已编译,导出cache
43  * @tc.desc   : [C- SOFTWARE -0200]
44  */
45 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_ExportModelCache_0100, Function | MediumTest | Level1)
46 {
47     bool isSupportedCache = false;
48     EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
49     if (!isSupportedCache) {
50         GTEST_SKIP() << "Export cache is not supported.";
51     }
52 
53     OH_NNModel *model = nullptr;
54     HDICommon::BuildAddGraph(&model);
55     ASSERT_NE(model, nullptr);
56 
57     V1_0::Model *iModel = nullptr;
58     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
59     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
60 
61     V1_0::ModelConfig config;
62     config.enableFloat16 = false;
63     config.mode = V1_0::PERFORMANCE_NONE;
64     config.priority = V1_0::PRIORITY_NONE;
65     // prepared model
66     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
67     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, config, iPreparedModel));
68     // export model cache
69     std::vector<V1_0::SharedBuffer> modelCache;
70     EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
71 
72     mindspore::lite::MindIR_Model_Destroy(&iModel);
73     if (tensorBuffer.fd != -1) {
74         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
75     }
76 }
77 
78 /**
79  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0100
80  * @tc.name   : 加载模型缓存,modelCache为空
81  * @tc.desc   : [C- SOFTWARE -0200]
82  */
83 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0100, Function | MediumTest | Level3)
84 {
85     bool isSupportedCache = false;
86     EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
87     if (!isSupportedCache) {
88         GTEST_SKIP() << "Export cache is not supported.";
89     }
90 
91     V1_0::ModelConfig config;
92     config.enableFloat16 = false;
93     config.mode = V1_0::PERFORMANCE_NONE;
94     config.priority = V1_0::PRIORITY_NONE;
95 
96     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
97     std::vector<V1_0::SharedBuffer> modelCache;
98     // prepared model with empty model cache
99     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModelFromModelCache(modelCache, config, iPreparedModel));
100 }
101 
102 /**
103  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0200
104  * @tc.name   : 加载模型缓存,modelCache不匹配
105  * @tc.desc   : [C- SOFTWARE -0200]
106  */
107 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0200, Function | MediumTest | Level3)
108 {
109     bool isSupportedCache = false;
110     EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
111     if (!isSupportedCache) {
112         GTEST_SKIP() << "Export cache is not supported.";
113     }
114 
115     OH_NNModel *model = nullptr;
116     HDICommon::BuildAddGraph(&model);
117     ASSERT_NE(model, nullptr);
118 
119     V1_0::Model *iModel = nullptr;
120     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
121     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
122 
123     V1_0::ModelConfig config;
124     config.enableFloat16 = false;
125     config.mode = V1_0::PERFORMANCE_NONE;
126     config.priority = V1_0::PRIORITY_NONE;
127     // export model cache
128     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
129     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, config, iPreparedModel));
130     std::vector<V1_0::SharedBuffer> modelCache;
131     EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
132 
133     // prepare model from invalid model cache
134     OHOS::HDI::Nnrt::V1_0::SharedBuffer invalidBuffer{NNRT_INVALID_FD, 0, 0, 0};
135     modelCache.emplace_back(invalidBuffer);
136     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel1;
137     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModelFromModelCache(modelCache, config, iPreparedModel1));
138 
139     // release
140     mindspore::lite::MindIR_Model_Destroy(&iModel);
141     if (tensorBuffer.fd != -1) {
142         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
143     }
144 }
145 
146 /**
147  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0300
148  * @tc.name   : 加载模型缓存,modelCache不完整
149  * @tc.desc   : [C- SOFTWARE -0200]
150  */
151 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0300, Function | MediumTest | Level3)
152 {
153     bool isSupportedCache = false;
154     EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
155     if (!isSupportedCache) {
156         GTEST_SKIP() << "Export cache is not supported.";
157     }
158 
159     OH_NNModel *model = nullptr;
160     HDICommon::BuildAddGraph(&model);
161     ASSERT_NE(model, nullptr);
162 
163     V1_0::Model *iModel = nullptr;
164     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
165     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
166 
167     V1_0::ModelConfig config;
168     config.enableFloat16 = false;
169     config.mode = V1_0::PERFORMANCE_NONE;
170     config.priority = V1_0::PRIORITY_NONE;
171     // export model cache
172     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
173     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, config, iPreparedModel));
174     std::vector<V1_0::SharedBuffer> modelCache;
175     EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
176     // prepare model from invalid model cache
177     modelCache.resize(size_t(modelCache.size() * 0.9));
178     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel1;
179     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModelFromModelCache(modelCache, config, iPreparedModel1));
180 
181     // release
182     mindspore::lite::MindIR_Model_Destroy(&iModel);
183     if (tensorBuffer.fd != -1) {
184         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
185     }
186 }
187 
188 /**
189  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0100
190  * @tc.name   : 编译模型,model中inputIndex为空
191  * @tc.desc   : [C- SOFTWARE -0200]
192  */
193 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0100, Function | MediumTest | Level3)
194 {
195     OH_NNModel *model = nullptr;
196     HDICommon::BuildAddGraph(&model);
197     ASSERT_NE(model, nullptr);
198 
199     V1_0::Model *iModel = nullptr;
200     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
201     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
202     // set inputIndex to empty
203     iModel->inputIndex = {};
204     // prepare model with empty inputIndex
205     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
206     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
207     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
208 
209     // release
210     mindspore::lite::MindIR_Model_Destroy(&iModel);
211     if (tensorBuffer.fd != -1) {
212         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
213     }
214 }
215 /**
216  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0200
217  * @tc.name   : 编译模型,model中outputIndex为空
218  * @tc.desc   : [C- SOFTWARE -0200]
219  */
220 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0200, Function | MediumTest | Level3)
221 {
222     OH_NNModel *model = nullptr;
223     HDICommon::BuildAddGraph(&model);
224     ASSERT_NE(model, nullptr);
225 
226     V1_0::Model *iModel = nullptr;
227     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
228     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
229     // set outputIndex to empty
230     iModel->outputIndex = {};
231     // prepare model with empty outputIndex
232     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
233     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
234     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
235 
236     // release
237     mindspore::lite::MindIR_Model_Destroy(&iModel);
238     if (tensorBuffer.fd != -1) {
239         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
240     }
241 }
242 
243 /**
244  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0300
245  * @tc.name   : 编译模型,model中nodes为空
246  * @tc.desc   : [C- SOFTWARE -0200]
247  */
248 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0300, Function | MediumTest | Level3)
249 {
250     OH_NNModel *model = nullptr;
251     HDICommon::BuildAddGraph(&model);
252     ASSERT_NE(model, nullptr);
253 
254     V1_0::Model *iModel = nullptr;
255     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
256     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
257     // set nodes to empty
258     iModel->nodes = {};
259     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
260     // prepare model with empty nodes
261     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
262     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
263 
264     // release
265     mindspore::lite::MindIR_Model_Destroy(&iModel);
266     if (tensorBuffer.fd != -1) {
267         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
268     }
269 }
270 
271 /**
272  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0400
273  * @tc.name   : 编译模型,model中allTensors为空
274  * @tc.desc   : [C- SOFTWARE -0200]
275  */
276 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0400, Function | MediumTest | Level3)
277 {
278     OH_NNModel *model = nullptr;
279     HDICommon::BuildAddGraph(&model);
280     ASSERT_NE(model, nullptr);
281 
282     V1_0::Model *iModel = nullptr;
283     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
284     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
285     // set Model.allTensors empty
286     iModel->allTensors = {};
287     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
288     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
289     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
290 
291     // release
292     mindspore::lite::MindIR_Model_Destroy(&iModel);
293     if (tensorBuffer.fd != -1) {
294         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
295     }
296 }
297 
298 /**
299  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0500
300  * @tc.name   : 编译模型,Tensor的DataTyp为100000
301  * @tc.desc   : [C- SOFTWARE -0200]
302  */
303 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0500, Function | MediumTest | Level3)
304 {
305     OH_NNModel *model = nullptr;
306     HDICommon::BuildAddGraph(&model);
307     ASSERT_NE(model, nullptr);
308 
309     V1_0::Model *iModel = nullptr;
310     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
311     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
312     // set invalid Tensor.DataType
313     auto &Tensor = iModel->allTensors[0];
314     Tensor.dataType = static_cast<V1_0::DataType>(100000);
315     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
316     // prepare model
317     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
318     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
319 
320     // release
321     mindspore::lite::MindIR_Model_Destroy(&iModel);
322     if (tensorBuffer.fd != -1) {
323         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
324     }
325 }
326 
327 /**
328  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0600
329  * @tc.name   : 编译模型,Tensor的Format值为100000
330  * @tc.desc   : [C- SOFTWARE -0200]
331  */
332 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0600, Function | MediumTest | Level3)
333 {
334     OH_NNModel *model = nullptr;
335     HDICommon::BuildAddGraph(&model);
336     ASSERT_NE(model, nullptr);
337 
338     V1_0::Model *iModel = nullptr;
339     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
340     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
341     // set invalid Tensor.Format
342     auto &Tensor = iModel->allTensors[0];
343     Tensor.format = static_cast<V1_0::Format>(100000);
344     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
345     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
346     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
347 
348     // release
349     mindspore::lite::MindIR_Model_Destroy(&iModel);
350     if (tensorBuffer.fd != -1) {
351         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
352     }
353 }
354 
355 /**
356  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0700
357  * @tc.name   : 编译模型,model中subGraph为空
358  * @tc.desc   : [C- SOFTWARE -0200]
359  */
360 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0700, Function | MediumTest | Level3)
361 {
362     OH_NNModel *model = nullptr;
363     HDICommon::BuildAddGraph(&model);
364     ASSERT_NE(model, nullptr);
365 
366     V1_0::Model *iModel = nullptr;
367     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
368     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
369     // set empty Model.subGraph
370     iModel->subGraph = {};
371     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
372     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
373     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
374 
375     // release
376     mindspore::lite::MindIR_Model_Destroy(&iModel);
377     if (tensorBuffer.fd != -1) {
378         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
379     }
380 }
381 
382 /**
383  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0800
384  * @tc.name   : 编译模型,model中subGraph输入输出错误
385  * @tc.desc   : [C- SOFTWARE -0200]
386  */
387 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0800, Function | MediumTest | Level3)
388 {
389     OH_NNModel *model = nullptr;
390     HDICommon::BuildAddGraph(&model);
391     ASSERT_NE(model, nullptr);
392 
393     V1_0::Model *iModel = nullptr;
394     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
395     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
396     // set wrong input of subGraph
397     auto &subGraph = iModel->subGraph[0];
398     subGraph.inputIndices = {0, 1, 3};
399     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_NONE, V1_0::PRIORITY_NONE};
400     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
401     EXPECT_EQ(HDF_FAILURE, device_->PrepareModel(*iModel, modelConfig, preparedModel));
402 
403     // release
404     mindspore::lite::MindIR_Model_Destroy(&iModel);
405     if (tensorBuffer.fd != -1) {
406         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
407     }
408 }
409 
410 /**
411  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0900
412  * @tc.name   : 编译模型,config中mode为PERFORMANCE_NONE-1
413  * @tc.desc   : [C- SOFTWARE -0200]
414  */
415 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0900, Function | MediumTest | Level3)
416 {
417     OH_NNModel *model = nullptr;
418     HDICommon::BuildAddGraph(&model);
419     ASSERT_NE(model, nullptr);
420 
421     V1_0::Model *iModel = nullptr;
422     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
423     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
424     V1_0::ModelConfig modelConfig{true, static_cast<V1_0::PerformanceMode>(V1_0::PERFORMANCE_NONE - 1),
425                                   V1_0::PRIORITY_NONE};
426     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
427     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
428 
429     // release
430     mindspore::lite::MindIR_Model_Destroy(&iModel);
431     if (tensorBuffer.fd != -1) {
432         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
433     }
434 }
435 
436 /**
437  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_1000
438  * @tc.name   : 编译模型,config中mode为PERFORMANCE_EXTREME+1
439  * @tc.desc   : [C- SOFTWARE -0200]
440  */
441 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_1000, Function | MediumTest | Level3)
442 {
443     OH_NNModel *model = nullptr;
444     HDICommon::BuildAddGraph(&model);
445     ASSERT_NE(model, nullptr);
446 
447     V1_0::Model *iModel = nullptr;
448     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
449     EXPECT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
450     V1_0::ModelConfig modelConfig{true, static_cast<V1_0::PerformanceMode>(V1_0::PERFORMANCE_EXTREME + 1),
451                                   V1_0::PRIORITY_NONE};
452     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
453     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
454 
455     // release
456     mindspore::lite::MindIR_Model_Destroy(&iModel);
457     if (tensorBuffer.fd != -1) {
458         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
459     }
460 }
461 
462 /**
463  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_1100
464  * @tc.name   : 编译模型,config中priority为PRIORITY_NONE-1
465  * @tc.desc   : [C- SOFTWARE -0200]
466  */
467 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_1100, Function | MediumTest | Level3)
468 {
469     OH_NNModel *model = nullptr;
470     HDICommon::BuildAddGraph(&model);
471     ASSERT_NE(model, nullptr);
472 
473     V1_0::Model *iModel = nullptr;
474     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
475     EXPECT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
476     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_EXTREME,
477                                   static_cast<V1_0::Priority>(V1_0::PRIORITY_NONE - 1)};
478     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
479     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
480 
481     // release
482     mindspore::lite::MindIR_Model_Destroy(&iModel);
483     if (tensorBuffer.fd != -1) {
484         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
485     }
486 }
487 
488 /**
489  * @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_1200
490  * @tc.name   : 编译模型,config中priority为PRIORITY_HIGH+1
491  * @tc.desc   : [C- SOFTWARE -0200]
492  */
493 HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_1200, Function | MediumTest | Level3)
494 {
495     OH_NNModel *model = nullptr;
496     HDICommon::BuildAddGraph(&model);
497     ASSERT_NE(model, nullptr);
498 
499     V1_0::Model *iModel = nullptr;
500     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
501     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
502     V1_0::ModelConfig modelConfig{true, V1_0::PERFORMANCE_EXTREME,
503                                   static_cast<V1_0::Priority>(V1_0::PRIORITY_HIGH + 1)};
504     V1_0::sptr<V1_0::IPreparedModel> preparedModel;
505     EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
506 
507     // release
508     mindspore::lite::MindIR_Model_Destroy(&iModel);
509     if (tensorBuffer.fd != -1) {
510         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
511     }
512 }