• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <thread>
17 #include <random>
18 #include <inttypes.h>
19 #include <securec.h>
20 #include "ohos_common.h"
21 #include "gtest/gtest.h"
22 #include "include/c_api/context_c.h"
23 #include "include/c_api/model_c.h"
24 #include "include/c_api/types_c.h"
25 #include "include/c_api/status_c.h"
26 #include "include/c_api/data_type_c.h"
27 #include "include/c_api/tensor_c.h"
28 #include "include/c_api/format_c.h"
29 
30 using namespace testing::ext;
31 
32 class MSLiteTest: public testing::Test {
33 protected:
SetUpTestCase(void)34     static void SetUpTestCase(void) {}
TearDownTestCase(void)35     static void TearDownTestCase(void) {}
SetUp()36     virtual void SetUp() {}
TearDown()37     virtual void TearDown() {}
38 };
39 
40 // function before callback
PrintBeforeCallback(const OH_AI_TensorHandleArray inputs,const OH_AI_TensorHandleArray outputs,const OH_AI_CallBackParam kernel_Info)41 bool PrintBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
42                          const OH_AI_CallBackParam kernel_Info) {
43     std::cout << "Before forwarding " << kernel_Info.node_name << " " << kernel_Info.node_type << std::endl;
44     return true;
45 }
46 
47 // function after callback
PrintAfterCallback(const OH_AI_TensorHandleArray inputs,const OH_AI_TensorHandleArray outputs,const OH_AI_CallBackParam kernel_Info)48 bool PrintAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
49                         const OH_AI_CallBackParam kernel_Info) {
50     std::cout << "After forwarding " << kernel_Info.node_name << " " << kernel_Info.node_type << std::endl;
51     return true;
52 }
53 
54 // add cpu device info
AddContextDeviceCPU(OH_AI_ContextHandle context)55 void AddContextDeviceCPU(OH_AI_ContextHandle context) {
56     OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
57     ASSERT_NE(cpu_device_info, nullptr);
58     OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info);
59     printf("==========device_type:%d\n", device_type);
60     ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU);
61     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
62 }
63 
IsNNRTAvailable()64 bool IsNNRTAvailable() {
65     size_t num = 0;
66     auto desc = OH_AI_GetAllNNRTDeviceDescs(&num);
67     if (desc == nullptr) {
68         return false;
69     }
70     auto type = OH_AI_GetTypeFromNNRTDeviceDesc(desc);
71     if (type != 1) {
72         return false;
73     }
74     OH_AI_DestroyAllNNRTDeviceDescs(&desc);
75     return true;
76 }
77 
IsNPU()78 bool IsNPU() {
79     size_t num = 0;
80     auto desc = OH_AI_GetAllNNRTDeviceDescs(&num);
81     if (desc == nullptr) {
82         return false;
83     }
84     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
85     const std::string npu_name_prefix = "NPU_";
86     if (strncmp(npu_name_prefix.c_str(), name, npu_name_prefix.size()) != 0) {
87         return false;
88     }
89     return true;
90 }
91 
92 // add nnrt device info
AddContextDeviceNNRT(OH_AI_ContextHandle context)93 void AddContextDeviceNNRT(OH_AI_ContextHandle context) {
94     size_t num = 0;
95     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
96     if (descs == nullptr) {
97         return;
98     }
99 
100     std::cout << "found " << num << " nnrt devices" << std::endl;
101     for (size_t i = 0; i < num; i++) {
102         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
103         ASSERT_NE(desc, nullptr);
104         auto id = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc);
105         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
106         auto type = OH_AI_GetTypeFromNNRTDeviceDesc(desc);
107         std::cout << "NNRT device: id = " << id << ", name: " << name << ", type:" << type << std::endl;
108     }
109 
110     auto id = OH_AI_GetDeviceIdFromNNRTDeviceDesc(descs);
111 
112     OH_AI_DeviceInfoHandle nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
113     ASSERT_NE(nnrt_device_info, nullptr);
114     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id);
115     OH_AI_DestroyAllNNRTDeviceDescs(&descs);
116 
117     OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(nnrt_device_info);
118     printf("==========device_type:%d\n", device_type);
119     ASSERT_EQ(device_type, OH_AI_DEVICETYPE_NNRT);
120 
121     OH_AI_DeviceInfoSetPerformanceMode(nnrt_device_info, OH_AI_PERFORMANCE_MEDIUM);
122     ASSERT_EQ(OH_AI_DeviceInfoGetPerformanceMode(nnrt_device_info), OH_AI_PERFORMANCE_MEDIUM);
123     OH_AI_DeviceInfoSetPriority(nnrt_device_info, OH_AI_PRIORITY_MEDIUM);
124     ASSERT_EQ(OH_AI_DeviceInfoGetPriority(nnrt_device_info), OH_AI_PRIORITY_MEDIUM);
125 
126     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
127 }
128 
129 // add nnrt device info
AddContextDeviceNNRTWithCache(OH_AI_ContextHandle context,const char * cache_path,const char * cache_version)130 void AddContextDeviceNNRTWithCache(OH_AI_ContextHandle context, const char *cache_path, const char *cache_version) {
131     size_t num = 0;
132     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
133     if (descs == nullptr) {
134         return;
135     }
136 
137     std::cout << "found " << num << " nnrt devices" << std::endl;
138     for (size_t i = 0; i < num; i++) {
139         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
140         ASSERT_NE(desc, nullptr);
141         auto id = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc);
142         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
143         auto type = OH_AI_GetTypeFromNNRTDeviceDesc(desc);
144         std::cout << "NNRT device: id = " << id << ", name: " << name << ", type:" << type << std::endl;
145     }
146 
147     auto id = OH_AI_GetDeviceIdFromNNRTDeviceDesc(descs);
148 
149     OH_AI_DeviceInfoHandle nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
150     ASSERT_NE(nnrt_device_info, nullptr);
151     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id);
152     OH_AI_DestroyAllNNRTDeviceDescs(&descs);
153 
154     OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(nnrt_device_info);
155     printf("==========device_type:%d\n", device_type);
156     ASSERT_EQ(device_type, OH_AI_DEVICETYPE_NNRT);
157 
158     OH_AI_DeviceInfoSetPerformanceMode(nnrt_device_info, OH_AI_PERFORMANCE_MEDIUM);
159     ASSERT_EQ(OH_AI_DeviceInfoGetPerformanceMode(nnrt_device_info), OH_AI_PERFORMANCE_MEDIUM);
160     OH_AI_DeviceInfoSetPriority(nnrt_device_info, OH_AI_PRIORITY_MEDIUM);
161     ASSERT_EQ(OH_AI_DeviceInfoGetPriority(nnrt_device_info), OH_AI_PRIORITY_MEDIUM);
162     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CachePath", cache_path, strlen(cache_path));
163     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CacheVersion", cache_version, strlen(cache_version));
164 
165     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
166 }
167 
168 // add nnrt device info by type
AddContextDeviceNNRTByType(OH_AI_ContextHandle context)169 void AddContextDeviceNNRTByType(OH_AI_ContextHandle context) {
170     size_t num = 0;
171     auto desc = OH_AI_GetAllNNRTDeviceDescs(&num);
172     // 返回值desc是NNRTDeviceDesc结构体数组首地址
173     if (desc == nullptr) {
174         return;
175     }
176     // 目前nnrt仅提供了rk3568的驱动,只有cpu一个设备,故不用判断
177     std::cout << "found " << num << " nnrt devices" << std::endl;
178     auto id = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc);
179     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
180     auto type = OH_AI_GetTypeFromNNRTDeviceDesc(desc);
181     std::cout << "NNRT device: id = " << id << ", name: " << name << ", type:" << type << std::endl;
182 
183     auto nnrt_device_info = OH_AI_CreateNNRTDeviceInfoByType(type);
184     OH_AI_DestroyAllNNRTDeviceDescs(&desc);
185     ASSERT_NE(nnrt_device_info, nullptr);
186 
187     OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(nnrt_device_info);
188     printf("==========device_type:%d\n", device_type);
189     ASSERT_EQ(device_type, OH_AI_DEVICETYPE_NNRT);
190     ASSERT_EQ(OH_AI_DeviceInfoGetDeviceId(nnrt_device_info), id);
191 
192     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
193 }
194 
195 // add nnrt device info by name
AddContextDeviceNNRTByName(OH_AI_ContextHandle context)196 void AddContextDeviceNNRTByName(OH_AI_ContextHandle context) {
197     size_t num = 0;
198     auto desc = OH_AI_GetAllNNRTDeviceDescs(&num);
199     // 返回值desc是NNRTDeviceDesc结构体数组首地址
200     if (desc == nullptr) {
201         return;
202     }
203     // 目前nnrt仅提供了rk3568的驱动,只有cpu一个设备,故不用判断
204     std::cout << "found " << num << " nnrt devices" << std::endl;
205     auto id = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc);
206     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
207     auto type = OH_AI_GetTypeFromNNRTDeviceDesc(desc);
208     std::cout << "NNRT device: id = " << id << ", name: " << name << ", type:" << type << std::endl;
209 
210     auto nnrt_device_info = OH_AI_CreateNNRTDeviceInfoByName(name);
211     OH_AI_DestroyAllNNRTDeviceDescs(&desc);
212     ASSERT_NE(nnrt_device_info, nullptr);
213 
214     OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(nnrt_device_info);
215     printf("==========device_type:%d\n", device_type);
216     ASSERT_EQ(device_type, OH_AI_DEVICETYPE_NNRT);
217     ASSERT_EQ(OH_AI_DeviceInfoGetDeviceId(nnrt_device_info), id);
218 
219     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
220 }
221 
222 // fill data to inputs tensor
FillInputsData(OH_AI_TensorHandleArray inputs,string model_name,bool is_transpose)223 void FillInputsData(OH_AI_TensorHandleArray inputs, string model_name, bool is_transpose) {
224     for (size_t i = 0; i < inputs.handle_num; ++i) {
225         printf("==========ReadFile==========\n");
226         size_t size1;
227         size_t *ptr_size1 = &size1;
228         string input_data_path = "/data/test/" + model_name + "_" + std::to_string(i) + ".input";
229         const char *imagePath = input_data_path.c_str();
230         char *imageBuf = ReadFile(imagePath, ptr_size1);
231         ASSERT_NE(imageBuf, nullptr);
232         OH_AI_TensorHandle tensor = inputs.handle_list[i];
233         int64_t element_num = OH_AI_TensorGetElementNum(tensor);
234         printf("Tensor name: %s. \n", OH_AI_TensorGetName(tensor));
235         float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(inputs.handle_list[i]));
236         ASSERT_NE(input_data, nullptr);
237         if (is_transpose) {
238             printf("==========Transpose==========\n");
239             size_t shape_num;
240             const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num);
241             auto imageBuf_nhwc = new char[size1];
242             PackNCHWToNHWCFp32(imageBuf, imageBuf_nhwc, shape[0], shape[1] * shape[2], shape[3]);
243             memcpy_s(input_data, size1, imageBuf_nhwc, size1);
244             delete[] imageBuf_nhwc;
245         } else {
246             memcpy_s(input_data, size1, imageBuf, size1);
247         }
248         printf("input data after filling is: ");
249         for (int j = 0; j < element_num && j <= 20; ++j) {
250             printf("%f ", input_data[j]);
251         }
252         printf("\n");
253         delete[] imageBuf;
254     }
255 }
256 
257 // compare result after predict
CompareResult(OH_AI_TensorHandleArray outputs,string model_name,float atol=0.01,float rtol=0.01,bool isquant=false)258 void CompareResult(OH_AI_TensorHandleArray outputs, string model_name, float atol = 0.01, float rtol = 0.01, bool isquant = false) {
259     printf("==========GetOutput==========\n");
260     for (size_t i = 0; i < outputs.handle_num; ++i) {
261         OH_AI_TensorHandle tensor = outputs.handle_list[i];
262         int64_t element_num = OH_AI_TensorGetElementNum(tensor);
263         printf("Tensor name: %s .\n", OH_AI_TensorGetName(tensor));
264         float *output_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
265         printf("output data is:");
266         for (int j = 0; j < element_num && j <= 20; ++j) {
267             printf("%f ", output_data[j]);
268         }
269         printf("\n");
270         printf("==========compFp32WithTData==========\n");
271         string output_file = "/data/test/" + model_name + std::to_string(i) + ".output";
272         bool result = compFp32WithTData(output_data, output_file, atol, rtol, isquant);
273         EXPECT_EQ(result, true);
274     }
275 }
276 
277 // model build and predict
ModelPredict(OH_AI_ModelHandle model,OH_AI_ContextHandle context,string model_name,OH_AI_ShapeInfo shape_infos,bool build_by_graph,bool is_transpose,bool is_callback)278 void ModelPredict(OH_AI_ModelHandle model, OH_AI_ContextHandle context, string model_name,
279             OH_AI_ShapeInfo shape_infos, bool build_by_graph, bool is_transpose, bool is_callback) {
280     string model_path = "/data/test/" + model_name + ".ms";
281     const char *graphPath = model_path.c_str();
282     OH_AI_Status ret = OH_AI_STATUS_SUCCESS;
283     if (build_by_graph) {
284         printf("==========Build model by graphBuf==========\n");
285         size_t size;
286         size_t *ptr_size = &size;
287         char *graphBuf = ReadFile(graphPath, ptr_size);
288         ASSERT_NE(graphBuf, nullptr);
289         ret = OH_AI_ModelBuild(model, graphBuf, size, OH_AI_MODELTYPE_MINDIR, context);
290         delete[] graphBuf;
291     } else {
292         printf("==========Build model==========\n");
293         ret = OH_AI_ModelBuildFromFile(model, graphPath, OH_AI_MODELTYPE_MINDIR, context);
294     }
295     printf("==========build model return code:%d\n", ret);
296     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
297     printf("==========GetInputs==========\n");
298     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
299     ASSERT_NE(inputs.handle_list, nullptr);
300     if (shape_infos.shape_num != 0) {
301         printf("==========Resizes==========\n");
302         OH_AI_Status resize_ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
303         printf("==========Resizes return code:%d\n", resize_ret);
304         ASSERT_EQ(resize_ret, OH_AI_STATUS_SUCCESS);
305     }
306 
307     FillInputsData(inputs, model_name, is_transpose);
308     OH_AI_TensorHandleArray outputs;
309     OH_AI_Status predict_ret = OH_AI_STATUS_SUCCESS;
310     if (is_callback) {
311         printf("==========Model Predict Callback==========\n");
312         OH_AI_KernelCallBack before_call_back = PrintBeforeCallback;
313         OH_AI_KernelCallBack after_call_back = PrintAfterCallback;
314         predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, before_call_back, after_call_back);
315     }else {
316         printf("==========Model Predict==========\n");
317         predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
318     }
319     printf("==========Model Predict End==========\n");
320     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
321     printf("=========CompareResult===========\n");
322     CompareResult(outputs, model_name);
323     printf("=========OH_AI_ModelDestroy===========\n");
324     OH_AI_ModelDestroy(&model);
325     printf("=========OH_AI_ModelDestroy End===========\n");
326 }
327 
328 // model build and predict
ModelTrain(OH_AI_ModelHandle model,OH_AI_ContextHandle context,string model_name,OH_AI_ShapeInfo shape_infos,bool build_by_graph,bool is_transpose,bool is_callback)329 void ModelTrain(OH_AI_ModelHandle model, OH_AI_ContextHandle context, string model_name,
330             OH_AI_ShapeInfo shape_infos, bool build_by_graph, bool is_transpose, bool is_callback) {
331     string model_path = "/data/test/" + model_name + ".ms";
332     const char *graphPath = model_path.c_str();
333     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
334     OH_AI_Status ret = OH_AI_STATUS_SUCCESS;
335     if (build_by_graph) {
336         printf("==========Build model by graphBuf==========\n");
337         size_t size;
338         size_t *ptr_size = &size;
339         char *graphBuf = ReadFile(graphPath, ptr_size);
340         ASSERT_NE(graphBuf, nullptr);
341         ret = OH_AI_TrainModelBuild(model, graphBuf, size, OH_AI_MODELTYPE_MINDIR, context, train_cfg);
342         delete[] graphBuf;
343     } else {
344         printf("==========Build model==========\n");
345         ret = OH_AI_TrainModelBuildFromFile(model, graphPath, OH_AI_MODELTYPE_MINDIR, context, train_cfg);
346     }
347     printf("==========build model return code:%d\n", ret);
348     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
349     printf("==========GetInputs==========\n");
350     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
351     ASSERT_NE(inputs.handle_list, nullptr);
352     if (shape_infos.shape_num != 0) {
353         printf("==========Resizes==========\n");
354         OH_AI_Status resize_ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
355         printf("==========Resizes return code:%d\n", resize_ret);
356         ASSERT_EQ(resize_ret, OH_AI_STATUS_SUCCESS);
357     }
358     FillInputsData(inputs, model_name, is_transpose);
359     ret = OH_AI_ModelSetTrainMode(model, true);
360     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
361     if (is_callback) {
362         printf("==========Model RunStep Callback==========\n");
363         OH_AI_KernelCallBack before_call_back = PrintBeforeCallback;
364         OH_AI_KernelCallBack after_call_back = PrintAfterCallback;
365         ret = OH_AI_RunStep(model, before_call_back, after_call_back);
366     }else {
367         printf("==========Model RunStep==========\n");
368         ret = OH_AI_RunStep(model, nullptr, nullptr);
369     }
370     printf("==========Model RunStep End==========\n");
371     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
372 }
TransStrVectorToCharArrays(const std::vector<std::string> & s)373 char **TransStrVectorToCharArrays(const std::vector<std::string> &s) {
374   char **char_arr = static_cast<char **>(malloc(s.size() * sizeof(char *)));
375   for (size_t i = 0; i < s.size(); i++) {
376     char_arr[i] = static_cast<char *>(malloc((s[i].size() + 1)));
377     strcpy(char_arr[i], s[i].c_str());
378   }
379   return char_arr;
380 }
TransCharArraysToStrVector(char ** c,const size_t & num)381 std::vector<std::string> TransCharArraysToStrVector(char **c, const size_t &num) {
382   std::vector<std::string> str;
383   for (size_t i = 0; i < num; i++) {
384     str.push_back(std::string(c[i]));
385   }
386   return str;
387 }
388 
PrintTrainLossName(OH_AI_TrainCfgHandle trainCfg)389 void PrintTrainLossName(OH_AI_TrainCfgHandle trainCfg) {
390     size_t num = 0;
391     char **lossName = OH_AI_TrainCfgGetLossName(trainCfg, &num);
392     std::vector<std::string> trainCfgLossName = TransCharArraysToStrVector(lossName, num);
393     for (auto ele : trainCfgLossName) {
394         std::cout << "loss_name:" << ele << std::endl;
395     }
396     for (size_t i = 0; i < num; i++) {
397         free(lossName[i]);
398     }
399 }
400 
401 // 正常场景:更新权重
402 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_UpdateWeights_0001, Function | MediumTest | Level1) {
403     printf("==========OH_AI_ContextCreate==========\n");
404     OH_AI_ContextHandle context = OH_AI_ContextCreate();
405     ASSERT_NE(context, nullptr);
406     AddContextDeviceCPU(context);
407     printf("==========OH_AI_ModelCreate==========\n");
408     OH_AI_ModelHandle model = OH_AI_ModelCreate();
409     ASSERT_NE(model, nullptr);
410     printf("==========OH_AI_TrainCfgCreate==========\n");
411     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
412     ASSERT_NE(train_cfg, nullptr);
413     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
414     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
415     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
__anondc0015860102(size_t size, void *data) 416     auto GenRandomData = [](size_t size, void *data) {
417       auto generator = std::uniform_real_distribution<float>(0.0f, 1.0f);
418       std::mt19937 random_engine_;
419       size_t elements_num = size / sizeof(float);
420       (void)std::generate_n(static_cast<float *>(data), elements_num,
421                             [&]() { return static_cast<float>(generator(random_engine_)); });
422     };
423     std::vector<OH_AI_TensorHandle> vec_inputs;
424     constexpr size_t create_shape_num = 1;
425     int64_t create_shape[create_shape_num] = {10};
426     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("fc3.bias", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, create_shape_num, nullptr, 0);
427     ASSERT_NE(tensor, nullptr);
428     GenRandomData(OH_AI_TensorGetDataSize(tensor), OH_AI_TensorGetMutableData(tensor));
429     vec_inputs.push_back(tensor);
430     OH_AI_TensorHandleArray update_weights = {1, vec_inputs.data()};
431     status = OH_AI_ModelUpdateWeights(model, update_weights);
432     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
433     printf("==========GetInputs==========\n");
434     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
435     ASSERT_NE(inputs.handle_list, nullptr);
436     FillInputsData(inputs, "lenet_train", false);
437     status = OH_AI_ModelSetTrainMode(model, true);
438     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
439     printf("==========Model RunStep==========\n");
440     status = OH_AI_RunStep(model, nullptr, nullptr);
441     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
442 }
443 // 正常场景:更新权重后导出训练图,再获取权重
444 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_UpdateWeights_0002, Function | MediumTest | Level1) {
445     printf("==========OH_AI_ContextCreate==========\n");
446     OH_AI_ContextHandle context = OH_AI_ContextCreate();
447     ASSERT_NE(context, nullptr);
448     AddContextDeviceCPU(context);
449     printf("==========OH_AI_ModelCreate==========\n");
450     OH_AI_ModelHandle model = OH_AI_ModelCreate();
451     ASSERT_NE(model, nullptr);
452     printf("==========OH_AI_TrainCfgCreate==========\n");
453     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
454     ASSERT_NE(train_cfg, nullptr);
455     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
456     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
457     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
458     OH_AI_TensorHandleArray get_update_weights = OH_AI_ModelGetWeights(model);
459     for (size_t i = 0; i < get_update_weights.handle_num; ++i) {
460         OH_AI_TensorHandle weights_tensor = get_update_weights.handle_list[i];
461         if (strcmp(OH_AI_TensorGetName(weights_tensor), "fc3.bias") == 0){
462             float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(weights_tensor));
463             std::cout << "fc3.bias:" << input_data[0] << std::endl;
464         }
465     }
__anondc0015860302(size_t size, void *data) 466     auto GenRandomData = [](size_t size, void *data) {
467       auto generator = std::uniform_real_distribution<float>(0.0f, 1.0f);
468       std::mt19937 random_engine_;
469       size_t elements_num = size / sizeof(float);
470       (void)std::generate_n(static_cast<float *>(data), elements_num,
471                             [&]() { return static_cast<float>(generator(random_engine_)); });
472     };
473     std::vector<OH_AI_TensorHandle> vec_inputs;
474     constexpr size_t create_shape_num = 1;
475     int64_t create_shape[create_shape_num] = {10};
476     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("fc3.bias", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, create_shape_num, nullptr, 0);
477     ASSERT_NE(tensor, nullptr);
478     GenRandomData(OH_AI_TensorGetDataSize(tensor), OH_AI_TensorGetMutableData(tensor));
479     vec_inputs.push_back(tensor);
480     OH_AI_TensorHandleArray update_weights = {1, vec_inputs.data()};
481     status = OH_AI_ModelUpdateWeights(model, update_weights);
482     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
483     printf("==========GetInputs==========\n");
484     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
485     ASSERT_NE(inputs.handle_list, nullptr);
486     FillInputsData(inputs, "lenet_train", false);
487     status = OH_AI_ModelSetTrainMode(model, true);
488     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
489     printf("==========Model RunStep==========\n");
490     status = OH_AI_RunStep(model, nullptr, nullptr);
491     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
492     printf("==========OH_AI_ExportModel==========\n");
493     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
494     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
495     OH_AI_TensorHandleArray export_update_weights = OH_AI_ModelGetWeights(model);
496     for (size_t i = 0; i < export_update_weights.handle_num; ++i) {
497         OH_AI_TensorHandle weights_tensor = export_update_weights.handle_list[i];
498         if (strcmp(OH_AI_TensorGetName(weights_tensor), "fc3.bias") == 0){
499             float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(weights_tensor));
500             std::cout << "fc3.bias:" << input_data[0] << std::endl;
501         }
502     }
503 }
504 // 异常场景:更新权重tensor name错误
505 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_UpdateWeights_0003, Function | MediumTest | Level1) {
506     printf("==========OH_AI_ContextCreate==========\n");
507     OH_AI_ContextHandle context = OH_AI_ContextCreate();
508     ASSERT_NE(context, nullptr);
509     AddContextDeviceCPU(context);
510     printf("==========OH_AI_ModelCreate==========\n");
511     OH_AI_ModelHandle model = OH_AI_ModelCreate();
512     ASSERT_NE(model, nullptr);
513     printf("==========OH_AI_TrainCfgCreate==========\n");
514     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
515     ASSERT_NE(train_cfg, nullptr);
516     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
517     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
518     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
519     std::vector<OH_AI_TensorHandle> vec_inputs;
520     constexpr size_t create_shape_num = 1;
521     int64_t create_shape[create_shape_num] = {10};
522     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("aaaaa", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, create_shape_num, nullptr, 0);
523     ASSERT_NE(tensor, nullptr);
524     vec_inputs.push_back(tensor);
525     OH_AI_TensorHandleArray update_weights = {1, vec_inputs.data()};
526     status = OH_AI_ModelUpdateWeights(model, update_weights);
527     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
528 }
529 // 异常场景:更新权重tensor type错误
530 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_UpdateWeights_0004, Function | MediumTest | Level1) {
531     printf("==========OH_AI_ContextCreate==========\n");
532     OH_AI_ContextHandle context = OH_AI_ContextCreate();
533     ASSERT_NE(context, nullptr);
534     AddContextDeviceCPU(context);
535     printf("==========OH_AI_ModelCreate==========\n");
536     OH_AI_ModelHandle model = OH_AI_ModelCreate();
537     ASSERT_NE(model, nullptr);
538     printf("==========OH_AI_TrainCfgCreate==========\n");
539     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
540     ASSERT_NE(train_cfg, nullptr);
541     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
542     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
543     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
544     std::vector<OH_AI_TensorHandle> vec_inputs;
545     constexpr size_t create_shape_num = 1;
546     int64_t create_shape[create_shape_num] = {10};
547     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("fc3.bias", OH_AI_DATATYPE_NUMBERTYPE_FLOAT16, create_shape, create_shape_num, nullptr, 0);
548     ASSERT_NE(tensor, nullptr);
549     vec_inputs.push_back(tensor);
550     OH_AI_TensorHandleArray update_weights = {1, vec_inputs.data()};
551     status = OH_AI_ModelUpdateWeights(model, update_weights);
552     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
553 }
554 // 正常场景:设置学习率为0.01
555 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_SetLearningRate_0001, Function | MediumTest | Level1) {
556     printf("==========OH_AI_ContextCreate==========\n");
557     OH_AI_ContextHandle context = OH_AI_ContextCreate();
558     ASSERT_NE(context, nullptr);
559     AddContextDeviceCPU(context);
560     printf("==========OH_AI_ModelCreate==========\n");
561     OH_AI_ModelHandle model = OH_AI_ModelCreate();
562     ASSERT_NE(model, nullptr);
563     printf("==========OH_AI_TrainCfgCreate==========\n");
564     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
565     ASSERT_NE(train_cfg, nullptr);
566     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
567     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
568     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
569     auto learing_rate = OH_AI_ModelGetLearningRate(model);
570     std::cout << "learing_rate:" << learing_rate << std::endl;
571     status = OH_AI_ModelSetLearningRate(model, 0.01f);
572     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
573     learing_rate = OH_AI_ModelGetLearningRate(model);
574     std::cout << "get_learing_rate:" << learing_rate << std::endl;
575     ASSERT_EQ(learing_rate, 0.01f);
576     printf("==========GetInputs==========\n");
577     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
578     ASSERT_NE(inputs.handle_list, nullptr);
579     FillInputsData(inputs, "lenet_train", false);
580     status = OH_AI_ModelSetTrainMode(model, true);
581     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
582     printf("==========Model RunStep==========\n");
583     status = OH_AI_RunStep(model, nullptr, nullptr);
584     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
585 }
586 // 正常场景:设置学习率值为1000.0
587 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_SetLearningRate_0002, Function | MediumTest | Level1) {
588     printf("==========OH_AI_ContextCreate==========\n");
589     OH_AI_ContextHandle context = OH_AI_ContextCreate();
590     ASSERT_NE(context, nullptr);
591     AddContextDeviceCPU(context);
592     printf("==========OH_AI_ModelCreate==========\n");
593     OH_AI_ModelHandle model = OH_AI_ModelCreate();
594     ASSERT_NE(model, nullptr);
595     printf("==========OH_AI_TrainCfgCreate==========\n");
596     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
597     ASSERT_NE(train_cfg, nullptr);
598     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
599     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
600     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
601     auto learing_rate = OH_AI_ModelGetLearningRate(model);
602     std::cout << "learing_rate:" << learing_rate << std::endl;
603     status = OH_AI_ModelSetLearningRate(model, 1000.0f);
604     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
605     learing_rate = OH_AI_ModelGetLearningRate(model);
606     std::cout << "get_learing_rate:" << learing_rate << std::endl;
607     ASSERT_EQ(learing_rate, 1000.0f);
608     printf("==========GetInputs==========\n");
609     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
610     ASSERT_NE(inputs.handle_list, nullptr);
611     FillInputsData(inputs, "lenet_train", false);
612     status = OH_AI_ModelSetTrainMode(model, true);
613     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
614     printf("==========Model RunStep==========\n");
615     status = OH_AI_RunStep(model, nullptr, nullptr);
616     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
617 }
618 // 正常场景:设置虚拟batch_size为2
619 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_SetupVirtualBatch_0001, Function | MediumTest | Level1) {
620     printf("==========OH_AI_ContextCreate==========\n");
621     OH_AI_ContextHandle context = OH_AI_ContextCreate();
622     ASSERT_NE(context, nullptr);
623     AddContextDeviceCPU(context);
624     printf("==========OH_AI_ModelCreate==========\n");
625     OH_AI_ModelHandle model = OH_AI_ModelCreate();
626     ASSERT_NE(model, nullptr);
627     printf("==========OH_AI_TrainCfgCreate==========\n");
628     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
629     ASSERT_NE(train_cfg, nullptr);
630     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
631     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
632     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
633     status = OH_AI_ModelSetupVirtualBatch(model, 2, -1.0f, -1.0f);
634     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
635     printf("==========GetInputs==========\n");
636     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
637     ASSERT_NE(inputs.handle_list, nullptr);
638     FillInputsData(inputs, "lenet_train", false);
639     status = OH_AI_ModelSetTrainMode(model, true);
640     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
641     printf("==========Model RunStep==========\n");
642     status = OH_AI_RunStep(model, nullptr, nullptr);
643     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
644     printf("==========OH_AI_ExportModel==========\n");
645     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
646     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
647     printf("==========OH_AI_ModelSetTrainMode==========\n");
648     status = OH_AI_ModelSetTrainMode(model, false);
649     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
650     printf("==========OH_AI_ModelCreate2==========\n");
651     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
652     ASSERT_NE(model2, nullptr);
653     printf("==========ModelPredict==========\n");
654     ModelPredict(model2, context, "lenet_train_infer", {}, false, false, false);
655 }
656 // 正常场景:设置优化等级
657 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_SetOptimizationLevel_0001, Function | MediumTest | Level1) {
658     printf("==========OH_AI_ContextCreate==========\n");
659     OH_AI_ContextHandle context = OH_AI_ContextCreate();
660     ASSERT_NE(context, nullptr);
661     AddContextDeviceCPU(context);
662     printf("==========OH_AI_ModelCreate==========\n");
663     OH_AI_ModelHandle model = OH_AI_ModelCreate();
664     ASSERT_NE(model, nullptr);
665     printf("==========OH_AI_TrainCfgCreate==========\n");
666     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
667     ASSERT_NE(train_cfg, nullptr);
668     auto opt_level = OH_AI_TrainCfgGetOptimizationLevel(train_cfg);
669     std::cout << "opt_level:" << opt_level << std::endl;
670     OH_AI_TrainCfgSetOptimizationLevel(train_cfg, OH_AI_KO2);
671     auto set_opt_level = OH_AI_TrainCfgGetOptimizationLevel(train_cfg);
672     std::cout << "set_opt_level:" << set_opt_level << std::endl;
673     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
674     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
675     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
676     printf("==========GetInputs==========\n");
677     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
678     ASSERT_NE(inputs.handle_list, nullptr);
679     FillInputsData(inputs, "lenet_train", false);
680     status = OH_AI_ModelSetTrainMode(model, true);
681     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
682     printf("==========Model RunStep==========\n");
683     status = OH_AI_RunStep(model, nullptr, nullptr);
684     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
685     printf("==========OH_AI_ExportModel==========\n");
686     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
687     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
688     printf("==========OH_AI_ModelSetTrainMode==========\n");
689     status = OH_AI_ModelSetTrainMode(model, false);
690     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
691     printf("==========OH_AI_ModelCreate2==========\n");
692     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
693     ASSERT_NE(model2, nullptr);
694     printf("==========ModelPredict==========\n");
695     ModelPredict(model2, context, "lenet_train_infer", {}, false, false, false);
696 }
697 // 正常场景:创建TrainCfg对象并销毁
698 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_TrainCfg_0001, Function | MediumTest | Level1) {
699     printf("==========OH_AI_ContextCreate==========\n");
700     OH_AI_ContextHandle context = OH_AI_ContextCreate();
701     ASSERT_NE(context, nullptr);
702     AddContextDeviceCPU(context);
703     printf("==========OH_AI_ModelCreate==========\n");
704     OH_AI_ModelHandle model = OH_AI_ModelCreate();
705     ASSERT_NE(model, nullptr);
706     printf("==========OH_AI_TrainCfgCreate==========\n");
707     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
708     ASSERT_NE(train_cfg, nullptr);
709     OH_AI_TrainCfgDestroy(&train_cfg);
710     ASSERT_EQ(train_cfg, nullptr);
711 }
712 // 正常场景:设置存在的损失函数名
713 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_TrainCfg_0002, Function | MediumTest | Level1) {
714     printf("==========OH_AI_ContextCreate==========\n");
715     OH_AI_ContextHandle context = OH_AI_ContextCreate();
716     ASSERT_NE(context, nullptr);
717     AddContextDeviceCPU(context);
718     printf("==========OH_AI_ModelCreate==========\n");
719     OH_AI_ModelHandle model = OH_AI_ModelCreate();
720     ASSERT_NE(model, nullptr);
721     printf("==========OH_AI_TrainCfgCreate==========\n");
722     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
723     ASSERT_NE(train_cfg, nullptr);
724     PrintTrainLossName(train_cfg);
725 
726     std::vector<std::string> set_train_cfg_loss_name = {"loss_fct", "_loss_fn"};
727     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
728     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), set_train_cfg_loss_name.size());
729     PrintTrainLossName(train_cfg);
730 
731     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
732     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
733     printf("==========GetInputs==========\n");
734     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
735     ASSERT_NE(inputs.handle_list, nullptr);
736     FillInputsData(inputs, "lenet_train", false);
737     status = OH_AI_ModelSetTrainMode(model, true);
738     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
739     printf("==========Model RunStep==========\n");
740     status = OH_AI_RunStep(model, nullptr, nullptr);
741     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
742     printf("==========OH_AI_ExportModel==========\n");
743     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
744     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
745     printf("==========OH_AI_ModelSetTrainMode==========\n");
746     status = OH_AI_ModelSetTrainMode(model, false);
747     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
748     printf("==========OH_AI_ModelCreate2==========\n");
749     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
750     ASSERT_NE(model2, nullptr);
751     printf("==========ModelPredict==========\n");
752     ModelPredict(model2, context, "lenet_train_infer", {}, false, false, false);
753 }
754 // 正常场景:设置不存在的损失函数名
755 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_TrainCfg_0003, Function | MediumTest | Level1) {
756     printf("==========OH_AI_ContextCreate==========\n");
757     OH_AI_ContextHandle context = OH_AI_ContextCreate();
758     ASSERT_NE(context, nullptr);
759     AddContextDeviceCPU(context);
760     printf("==========OH_AI_ModelCreate==========\n");
761     OH_AI_ModelHandle model = OH_AI_ModelCreate();
762     ASSERT_NE(model, nullptr);
763     printf("==========OH_AI_TrainCfgCreate==========\n");
764     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
765     ASSERT_NE(train_cfg, nullptr);
766     PrintTrainLossName(train_cfg);
767 
768     std::vector<std::string> set_train_cfg_loss_name = {"aaa", "bbb"};
769     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
770     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), set_train_cfg_loss_name.size());
771     PrintTrainLossName(train_cfg);
772 
773     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
774     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
775 }
776 // 正常场景:设置损失函数名个数大于num
777 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_TrainCfg_0004, Function | MediumTest | Level1) {
778     printf("==========OH_AI_ContextCreate==========\n");
779     OH_AI_ContextHandle context = OH_AI_ContextCreate();
780     ASSERT_NE(context, nullptr);
781     AddContextDeviceCPU(context);
782     printf("==========OH_AI_ModelCreate==========\n");
783     OH_AI_ModelHandle model = OH_AI_ModelCreate();
784     ASSERT_NE(model, nullptr);
785     printf("==========OH_AI_TrainCfgCreate==========\n");
786     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
787     ASSERT_NE(train_cfg, nullptr);
788     PrintTrainLossName(train_cfg);
789 
790     std::vector<std::string> set_train_cfg_loss_name = {"loss_fct", "_loss_fn"};
791     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
792     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), 1);
793     PrintTrainLossName(train_cfg);
794 
795     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
796     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
797 }
798 // 正常场景:通过buffer加载模型,执行1轮训练并对比精度
799 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ModelBuild_0001, Function | MediumTest | Level1) {
800     printf("==========OH_AI_ContextCreate==========\n");
801     OH_AI_ContextHandle context = OH_AI_ContextCreate();
802     ASSERT_NE(context, nullptr);
803     AddContextDeviceCPU(context);
804     printf("==========OH_AI_ModelCreate==========\n");
805     OH_AI_ModelHandle model = OH_AI_ModelCreate();
806     ASSERT_NE(model, nullptr);
807     printf("==========OH_AI_RunStep==========\n");
808     ModelTrain(model, context, "lenet_train", {}, true, false, false);
809     printf("==========OH_AI_ExportModel==========\n");
810     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
811     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
812     printf("==========OH_AI_ModelSetTrainMode==========\n");
813     status = OH_AI_ModelSetTrainMode(model, false);
814     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
815     printf("==========OH_AI_ModelCreate2==========\n");
816     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
817     ASSERT_NE(model2, nullptr);
818     printf("==========ModelPredict==========\n");
819     ModelPredict(model2, context, "lenet_train_infer", {}, true, false, false);
820 }
821 // 异常场景:加载模型buffer为空
822 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ModelBuild_0002, Function | MediumTest | Level1) {
823     printf("==========OH_AI_ContextCreate==========\n");
824     OH_AI_ContextHandle context = OH_AI_ContextCreate();
825     ASSERT_NE(context, nullptr);
826     AddContextDeviceCPU(context);
827     printf("==========OH_AI_ModelCreate==========\n");
828     OH_AI_ModelHandle model = OH_AI_ModelCreate();
829     ASSERT_NE(model, nullptr);
830     printf("==========OH_AI_TrainCfgCreate==========\n");
831     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
832     ASSERT_NE(train_cfg, nullptr);
833     printf("==========Build model by graphBuf==========\n");
834     auto status = OH_AI_TrainModelBuild(model, nullptr, 0, OH_AI_MODELTYPE_MINDIR, context, train_cfg);
835     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
836 }
837 // 异常场景:加载模型文件路径不存在
838 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ModelBuild_0003, Function | MediumTest | Level1) {
839     printf("==========OH_AI_ContextCreate==========\n");
840     OH_AI_ContextHandle context = OH_AI_ContextCreate();
841     ASSERT_NE(context, nullptr);
842     AddContextDeviceCPU(context);
843     printf("==========OH_AI_ModelCreate==========\n");
844     OH_AI_ModelHandle model = OH_AI_ModelCreate();
845     ASSERT_NE(model, nullptr);
846     printf("==========OH_AI_TrainCfgCreate==========\n");
847     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
848     ASSERT_NE(train_cfg, nullptr);
849     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/not_exist/lenet_train.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
850     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
851 }
852 // 异常场景:加载模型文件路径为空
853 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ModelBuild_0004, Function | MediumTest | Level1) {
854     printf("==========OH_AI_ContextCreate==========\n");
855     OH_AI_ContextHandle context = OH_AI_ContextCreate();
856     ASSERT_NE(context, nullptr);
857     AddContextDeviceCPU(context);
858     printf("==========OH_AI_ModelCreate==========\n");
859     OH_AI_ModelHandle model = OH_AI_ModelCreate();
860     ASSERT_NE(model, nullptr);
861     printf("==========OH_AI_TrainCfgCreate==========\n");
862     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
863     ASSERT_NE(train_cfg, nullptr);
864     auto status = OH_AI_TrainModelBuildFromFile(model, "", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
865     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
866 }
867 // 异常场景:加载模型文件路径为错误文件
868 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ModelBuild_0005, Function | MediumTest | Level1) {
869     printf("==========OH_AI_ContextCreate==========\n");
870     OH_AI_ContextHandle context = OH_AI_ContextCreate();
871     ASSERT_NE(context, nullptr);
872     AddContextDeviceCPU(context);
873     printf("==========OH_AI_ModelCreate==========\n");
874     OH_AI_ModelHandle model = OH_AI_ModelCreate();
875     ASSERT_NE(model, nullptr);
876     printf("==========OH_AI_TrainCfgCreate==========\n");
877     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
878     ASSERT_NE(train_cfg, nullptr);
879     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/lenet_train_0.input", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
880     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
881 }
882 // 正常场景:训练model导出推理图后对比精度
883 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0001, Function | MediumTest | Level1) {
884     printf("==========OH_AI_ContextCreate==========\n");
885     OH_AI_ContextHandle context = OH_AI_ContextCreate();
886     ASSERT_NE(context, nullptr);
887     AddContextDeviceCPU(context);
888     printf("==========OH_AI_ModelCreate==========\n");
889     OH_AI_ModelHandle model = OH_AI_ModelCreate();
890     ASSERT_NE(model, nullptr);
891     printf("==========OH_AI_RunStep==========\n");
892     ModelTrain(model, context, "lenet_train", {}, false, false, false);
893     printf("==========OH_AI_ExportModel==========\n");
894     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
895     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
896     printf("==========OH_AI_ModelSetTrainMode==========\n");
897     status = OH_AI_ModelSetTrainMode(model, false);
898     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
899     printf("==========OH_AI_ModelCreate2==========\n");
900     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
901     ASSERT_NE(model2, nullptr);
902     printf("==========ModelPredict==========\n");
903     ModelPredict(model2, context, "lenet_train_infer", {}, false, false, false);
904 }
905 // 正常场景:quantization_type为OH_AI_WEIGHT_QUANT
906 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0002, Function | MediumTest | Level1) {
907     printf("==========OH_AI_ContextCreate==========\n");
908     OH_AI_ContextHandle context = OH_AI_ContextCreate();
909     ASSERT_NE(context, nullptr);
910     AddContextDeviceCPU(context);
911     printf("==========OH_AI_ModelCreate==========\n");
912     OH_AI_ModelHandle model = OH_AI_ModelCreate();
913     ASSERT_NE(model, nullptr);
914     printf("==========OH_AI_RunStep==========\n");
915     ModelTrain(model, context, "lenet_train", {}, false, false, false);
916     printf("==========OH_AI_ExportModel==========\n");
917     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_WEIGHT_QUANT, true, nullptr, 0);
918     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
919     printf("==========OH_AI_ModelSetTrainMode==========\n");
920     status = OH_AI_ModelSetTrainMode(model, false);
921     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
922     printf("==========OH_AI_ModelCreate2==========\n");
923     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
924     ASSERT_NE(model2, nullptr);
925     printf("==========ModelPredict==========\n");
926     ModelPredict(model2, context, "lenet_train_infer", {}, false, false, false);
927 }
928 // 正常场景:quantization_type为OH_AI_FULL_QUANT
929 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0003, Function | MediumTest | Level1) {
930     printf("==========OH_AI_ContextCreate==========\n");
931     OH_AI_ContextHandle context = OH_AI_ContextCreate();
932     ASSERT_NE(context, nullptr);
933     AddContextDeviceCPU(context);
934     printf("==========OH_AI_ModelCreate==========\n");
935     OH_AI_ModelHandle model = OH_AI_ModelCreate();
936     ASSERT_NE(model, nullptr);
937     printf("==========OH_AI_RunStep==========\n");
938     ModelTrain(model, context, "lenet_train", {}, false, false, false);
939     printf("==========OH_AI_ExportModel==========\n");
940     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_FULL_QUANT, true, nullptr, 0);
941     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
942     printf("==========OH_AI_ModelSetTrainMode==========\n");
943     status = OH_AI_ModelSetTrainMode(model, false);
944     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
945     printf("==========OH_AI_ModelCreate2==========\n");
946     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
947     ASSERT_NE(model2, nullptr);
948     printf("==========ModelPredict==========\n");
949     ModelPredict(model2, context, "lenet_train_infer", {}, false, false, false);
950 }
951 // 正常场景:quantization_type为OH_AI_UNKNOWN_QUANT_TYPE
952 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0004, Function | MediumTest | Level1) {
953     printf("==========OH_AI_ContextCreate==========\n");
954     OH_AI_ContextHandle context = OH_AI_ContextCreate();
955     ASSERT_NE(context, nullptr);
956     AddContextDeviceCPU(context);
957     printf("==========OH_AI_ModelCreate==========\n");
958     OH_AI_ModelHandle model = OH_AI_ModelCreate();
959     ASSERT_NE(model, nullptr);
960     printf("==========OH_AI_RunStep==========\n");
961     ModelTrain(model, context, "lenet_train", {}, false, false, false);
962     printf("==========OH_AI_ExportModel==========\n");
963     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_UNKNOWN_QUANT_TYPE, true, nullptr, 0);
964     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
965     printf("==========OH_AI_ModelSetTrainMode==========\n");
966     status = OH_AI_ModelSetTrainMode(model, false);
967     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
968     printf("==========OH_AI_ModelCreate2==========\n");
969     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
970     ASSERT_NE(model2, nullptr);
971     printf("==========ModelPredict==========\n");
972     ModelPredict(model2, context, "lenet_train_infer", {}, false, false, false);
973 }
974 // 正常场景:export_inference_only为false
975 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0005, Function | MediumTest | Level1) {
976     printf("==========OH_AI_ContextCreate==========\n");
977     OH_AI_ContextHandle context = OH_AI_ContextCreate();
978     ASSERT_NE(context, nullptr);
979     AddContextDeviceCPU(context);
980     printf("==========OH_AI_ModelCreate==========\n");
981     OH_AI_ModelHandle model = OH_AI_ModelCreate();
982     ASSERT_NE(model, nullptr);
983     printf("==========OH_AI_RunStep==========\n");
984     ModelTrain(model, context, "lenet_train", {}, false, false, false);
985     printf("==========OH_AI_ExportModel==========\n");
986     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_train.ms", OH_AI_NO_QUANT, false, nullptr, 0);
987     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
988     printf("==========OH_AI_ModelCreate2==========\n");
989     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
990     ASSERT_NE(model2, nullptr);
991     printf("==========ModelTrain==========\n");
992     ModelTrain(model2, context, "lenet_train_train", {}, false, false, false);
993 }
994 // 正常场景:export_inference_only为false,再指定output_tensor_name
995 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0006, Function | MediumTest | Level1) {
996     printf("==========OH_AI_ContextCreate==========\n");
997     OH_AI_ContextHandle context = OH_AI_ContextCreate();
998     ASSERT_NE(context, nullptr);
999     AddContextDeviceCPU(context);
1000     printf("==========OH_AI_ModelCreate==========\n");
1001     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1002     ASSERT_NE(model, nullptr);
1003     printf("==========OH_AI_RunStep==========\n");
1004     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1005     printf("==========OH_AI_ExportModel==========\n");
1006     const std::vector<std::string> output_name = {"Default/network-WithLossCell/_loss_fn-L1Loss/ReduceMean-op127"};
1007     auto output_tensor_name = TransStrVectorToCharArrays(output_name);
1008     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_train.ms", OH_AI_NO_QUANT, false, output_tensor_name, 1);
1009     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1010     printf("==========OH_AI_ModelCreate2==========\n");
1011     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
1012     ASSERT_NE(model2, nullptr);
1013     printf("==========ModelTrain==========\n");
1014     ModelTrain(model2, context, "lenet_train_train", {}, false, false, false);
1015 }
1016 // 异常场景:OH_AI_MODELTYPE_INVALID
1017 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0007, Function | MediumTest | Level1) {
1018     printf("==========OH_AI_ContextCreate==========\n");
1019     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1020     ASSERT_NE(context, nullptr);
1021     AddContextDeviceCPU(context);
1022     printf("==========OH_AI_ModelCreate==========\n");
1023     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1024     ASSERT_NE(model, nullptr);
1025     printf("==========OH_AI_RunStep==========\n");
1026     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1027     printf("==========OH_AI_ExportModel==========\n");
1028     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_INVALID, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1029     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1030 }
1031 // 异常场景:指定导出不存在的output_tensor_name
1032 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0008, Function | MediumTest | Level1) {
1033     printf("==========OH_AI_ContextCreate==========\n");
1034     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1035     ASSERT_NE(context, nullptr);
1036     AddContextDeviceCPU(context);
1037     printf("==========OH_AI_ModelCreate==========\n");
1038     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1039     ASSERT_NE(model, nullptr);
1040     printf("==========OH_AI_RunStep==========\n");
1041     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1042     printf("==========OH_AI_ExportModel==========\n");
1043     const std::vector<std::string> output_name = {"aaa"};
1044     auto output_tensor_name = TransStrVectorToCharArrays(output_name);
1045     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, output_tensor_name, 1);
1046     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1047 }
1048 // 正常场景:output_tensor_name的个数与num不一致
1049 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0009, Function | MediumTest | Level1) {
1050     printf("==========OH_AI_ContextCreate==========\n");
1051     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1052     ASSERT_NE(context, nullptr);
1053     AddContextDeviceCPU(context);
1054     printf("==========OH_AI_ModelCreate==========\n");
1055     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1056     ASSERT_NE(model, nullptr);
1057     printf("==========OH_AI_RunStep==========\n");
1058     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1059     printf("==========OH_AI_ExportModel==========\n");
1060     const std::vector<std::string> output_name = {"Default/network-WithLossCell/_loss_fn-L1Loss/ReduceMean-op127"};
1061     auto output_tensor_name = TransStrVectorToCharArrays(output_name);
1062     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, output_tensor_name, 0);
1063     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1064 }
1065 // 异常场景:model_file文件路径不存在
1066 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0010, Function | MediumTest | Level1) {
1067     printf("==========OH_AI_ContextCreate==========\n");
1068     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1069     ASSERT_NE(context, nullptr);
1070     AddContextDeviceCPU(context);
1071     printf("==========OH_AI_ModelCreate==========\n");
1072     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1073     ASSERT_NE(model, nullptr);
1074     printf("==========OH_AI_RunStep==========\n");
1075     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1076     printf("==========OH_AI_ExportModel==========\n");
1077     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/not_exsit/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1078     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1079 }
1080 // 异常场景:model_file路径为空
1081 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0011, Function | MediumTest | Level1) {
1082     printf("==========OH_AI_ContextCreate==========\n");
1083     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1084     ASSERT_NE(context, nullptr);
1085     AddContextDeviceCPU(context);
1086     printf("==========OH_AI_ModelCreate==========\n");
1087     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1088     ASSERT_NE(model, nullptr);
1089     printf("==========OH_AI_RunStep==========\n");
1090     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1091     printf("==========OH_AI_ExportModel==========\n");
1092     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "", OH_AI_NO_QUANT, true, nullptr, 0);
1093     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1094 }
1095 // 异常场景:model_file路径为文件夹
1096 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0012, Function | MediumTest | Level1) {
1097     printf("==========OH_AI_ContextCreate==========\n");
1098     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1099     ASSERT_NE(context, nullptr);
1100     AddContextDeviceCPU(context);
1101     printf("==========OH_AI_ModelCreate==========\n");
1102     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1103     ASSERT_NE(model, nullptr);
1104     printf("==========OH_AI_RunStep==========\n");
1105     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1106     printf("==========OH_AI_ExportModel==========\n");
1107     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/", OH_AI_NO_QUANT, true, nullptr, 0);
1108     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1109 }
1110 // 正常场景:OH_AI_ModelGetTrainMode
1111 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0013, Function | MediumTest | Level1) {
1112     printf("==========OH_AI_ContextCreate==========\n");
1113     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1114     ASSERT_NE(context, nullptr);
1115     AddContextDeviceCPU(context);
1116     printf("==========OH_AI_ModelCreate==========\n");
1117     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1118     ASSERT_NE(model, nullptr);
1119     printf("==========OH_AI_RunStep==========\n");
1120     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1121     printf("==========OH_AI_ExportModel==========\n");
1122     auto status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/lenet_train_infer.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1123     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1124     printf("==========OH_AI_ModelSetTrainMode==========\n");
1125     status = OH_AI_ModelSetTrainMode(model, false);
1126     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1127     auto train_mode = OH_AI_ModelGetTrainMode(model);
1128     ASSERT_EQ(train_mode, false);
1129     printf("=========OH_AI_ModelDestroy===========\n");
1130     OH_AI_ModelDestroy(&model);
1131     printf("=========OH_AI_ModelDestroy End===========\n");
1132 }
1133 // 正常场景:OH_AI_ExportModelBuffer
1134 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportModel_0014, Function | MediumTest | Level1) {
1135     printf("==========OH_AI_ContextCreate==========\n");
1136     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1137     ASSERT_NE(context, nullptr);
1138     AddContextDeviceCPU(context);
1139     printf("==========OH_AI_ModelCreate==========\n");
1140     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1141     ASSERT_NE(model, nullptr);
1142     printf("==========OH_AI_RunStep==========\n");
1143     ModelTrain(model, context, "lenet_train", {}, false, false, false);
1144     printf("==========OH_AI_ExportModel==========\n");
1145     char *modelData;
1146     size_t data_size;
1147     auto status = OH_AI_ExportModelBuffer(model, OH_AI_MODELTYPE_MINDIR, &modelData,
1148 	 &data_size, OH_AI_NO_QUANT, true, nullptr, 0);
1149     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1150     ASSERT_NE(modelData, nullptr);
1151     ASSERT_NE(data_size, 0);
1152     printf("==========OH_AI_ModelCreate2==========\n");
1153     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
1154     ASSERT_NE(model2, nullptr);
1155 
1156     OH_AI_ContextHandle context2 = OH_AI_ContextCreate();
1157     ASSERT_NE(context2, nullptr);
1158     AddContextDeviceCPU(context2);
1159     printf("==========ModelPredict==========\n");
1160     auto ret = OH_AI_ModelBuild(model2, modelData, data_size, OH_AI_MODELTYPE_MINDIR, context2);
1161     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
1162     printf("==========GetInputs==========\n");
1163     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model2);
1164     ASSERT_NE(inputs.handle_list, nullptr);
1165     FillInputsData(inputs, "lenet_train_infer", false);
1166     printf("==========Model Predict==========\n");
1167     OH_AI_TensorHandleArray outputs;
1168     OH_AI_Status predict_ret = OH_AI_ModelPredict(model2, inputs, &outputs, nullptr, nullptr);
1169     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
1170     printf("=========CompareResult===========\n");
1171     CompareResult(outputs, "lenet_train_infer");
1172     printf("=========model01 OH_AI_ModelDestroy===========\n");
1173     OH_AI_ModelDestroy(&model);
1174     printf("=========model01 OH_AI_ModelDestroy End===========\n");
1175     printf("=========model02 OH_AI_ModelDestroy===========\n");
1176     OH_AI_ModelDestroy(&model2);
1177     printf("=========model02 OH_AI_ModelDestroy End===========\n");
1178 }
1179 // 正常场景:训练model导出micro权重
1180 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportWeights_0001, Function | MediumTest | Level1) {
1181     printf("==========OH_AI_ContextCreate==========\n");
1182     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1183     ASSERT_NE(context, nullptr);
1184     AddContextDeviceCPU(context);
1185     printf("==========OH_AI_ModelCreate==========\n");
1186     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1187     ASSERT_NE(model, nullptr);
1188     printf("==========OH_AI_TrainCfgCreate==========\n");
1189     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
1190     ASSERT_NE(train_cfg, nullptr);
1191     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
1192     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/xiaoyi_train_codegen.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
1193     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1194     printf("==========OH_AI_ExportModel==========\n");
1195     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_gru_model1.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1196     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1197     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_net1.bin", true, true, nullptr, 0);
1198     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1199     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_net1_fp32.bin", true, false, nullptr, 0);
1200     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1201 }
1202 // 正常场景:训练model更新并导出micro权重
1203 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportWeights_0002, Function | MediumTest | Level1) {
1204     printf("==========OH_AI_ContextCreate==========\n");
1205     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1206     ASSERT_NE(context, nullptr);
1207     AddContextDeviceCPU(context);
1208     printf("==========OH_AI_ModelCreate==========\n");
1209     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1210     ASSERT_NE(model, nullptr);
1211     printf("==========OH_AI_TrainCfgCreate==========\n");
1212     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
1213     ASSERT_NE(train_cfg, nullptr);
1214     std::vector<std::string> set_train_cfg_loss_name = {"loss_fct", "_loss_fn", "SigmoidCrossEntropy", "BinaryCrossEntropy"};
1215     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
1216     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), set_train_cfg_loss_name.size());
1217     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
1218     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/xiaoyi_train_codegen.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
1219     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1220     printf("==========OH_AI_ExportModel==========\n");
1221     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_gru_model1.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1222     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1223     const std::vector<std::string> changeble_weights_name = {"app_usage_statistic_30_cell.embedding.embedding_table",
1224                                                  "moment1.app_usage_statistic_30_cell.embedding.embedding_table",
1225                                                  "moment2.app_usage_statistic_30_cell.embedding.embedding_table",
1226                                                  "data-57"};
1227     char **set_changeble_weights_name = TransStrVectorToCharArrays(changeble_weights_name);
1228     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_net1.bin", true, true, set_changeble_weights_name, changeble_weights_name.size());
1229     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1230     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_net1_fp32.bin", true, false, set_changeble_weights_name, changeble_weights_name.size());
1231     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1232     printf("==================== update weight ==================\n");
__anondc0015860502(size_t size, void *data) 1233     auto GenRandomData = [](size_t size, void *data) {
1234       auto generator = std::uniform_real_distribution<float>(0.0f, 1.0f);
1235       std::mt19937 random_engine_;
1236       size_t elements_num = size / sizeof(float);
1237       (void)std::generate_n(static_cast<float *>(data), elements_num,
1238                             [&]() { return static_cast<float>(generator(random_engine_)); });
1239     };
1240     std::vector<OH_AI_TensorHandle> vec_inputs;
1241     constexpr size_t create_shape_num = 2;
1242     int64_t create_shape[create_shape_num] = {76, 8};
1243     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("app_usage_statistic_30_cell.embedding.embedding_table", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape, create_shape_num, nullptr, 0);
1244     GenRandomData(OH_AI_TensorGetDataSize(tensor), OH_AI_TensorGetMutableData(tensor));
1245     vec_inputs.push_back(tensor);
1246     constexpr size_t create_shape_num2 = 2;
1247     int64_t create_shape2[create_shape_num2] = {76, 8};
1248     OH_AI_TensorHandle tensor2 = OH_AI_TensorCreate("moment1.app_usage_statistic_30_cell.embedding.embedding_table", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape2, create_shape_num2, nullptr, 0);
1249     GenRandomData(OH_AI_TensorGetDataSize(tensor2), OH_AI_TensorGetMutableData(tensor2));
1250     vec_inputs.push_back(tensor2);
1251     constexpr size_t create_shape_num3 = 2;
1252     int64_t create_shape3[create_shape_num3] = {76, 8};
1253     OH_AI_TensorHandle tensor3 = OH_AI_TensorCreate("moment2.app_usage_statistic_30_cell.embedding.embedding_table", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape3, create_shape_num3, nullptr, 0);
1254     GenRandomData(OH_AI_TensorGetDataSize(tensor3), OH_AI_TensorGetMutableData(tensor3));
1255     vec_inputs.push_back(tensor3);
1256     OH_AI_TensorHandleArray update_weights = {3, vec_inputs.data()};
1257     status = OH_AI_ModelUpdateWeights(model, update_weights);
1258     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1259     printf("==================== train ===================\n");
1260     status = OH_AI_ModelSetTrainMode(model, true);
1261     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1262     status = OH_AI_RunStep(model, nullptr, nullptr);
1263     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1264     status = OH_AI_ModelSetTrainMode(model, false);
1265     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1266     status = OH_AI_RunStep(model, nullptr, nullptr);
1267     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1268     printf("==========OH_AI_ExportModel2==========\n");
1269     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_gru_model2.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1270     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1271     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_net2.bin", true, true, set_changeble_weights_name, changeble_weights_name.size());
1272     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1273     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_net2_fp32.bin", true, false, set_changeble_weights_name, changeble_weights_name.size());
1274     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1275 }
1276 // 异常场景:weight_file文件路径不存在
1277 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportWeights_0003, Function | MediumTest | Level1) {
1278     printf("==========OH_AI_ContextCreate==========\n");
1279     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1280     ASSERT_NE(context, nullptr);
1281     AddContextDeviceCPU(context);
1282     printf("==========OH_AI_ModelCreate==========\n");
1283     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1284     ASSERT_NE(model, nullptr);
1285     printf("==========OH_AI_TrainCfgCreate==========\n");
1286     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
1287     ASSERT_NE(train_cfg, nullptr);
1288     std::vector<std::string> set_train_cfg_loss_name = {"loss_fct", "_loss_fn", "SigmoidCrossEntropy", "BinaryCrossEntropy"};
1289     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
1290     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), set_train_cfg_loss_name.size());
1291     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
1292     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/xiaoyi_train_codegen.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
1293     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1294     printf("==========OH_AI_ExportModel==========\n");
1295     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_gru_model1.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1296     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1297     const std::vector<std::string> changeble_weights_name = {"app_usage_statistic_30_cell.embedding.embedding_table",
1298                                                  "moment1.app_usage_statistic_30_cell.embedding.embedding_table",
1299                                                  "moment2.app_usage_statistic_30_cell.embedding.embedding_table",
1300                                                  "data-57"};
1301     char **set_changeble_weights_name = TransStrVectorToCharArrays(changeble_weights_name);
1302     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/not_exist/xiaoyi_train_codegen_net1.bin", true, true, set_changeble_weights_name, changeble_weights_name.size());
1303     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1304 }
1305 // 异常场景:weight_file路径为空
1306 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportWeights_0004, Function | MediumTest | Level1) {
1307     printf("==========OH_AI_ContextCreate==========\n");
1308     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1309     ASSERT_NE(context, nullptr);
1310     AddContextDeviceCPU(context);
1311     printf("==========OH_AI_ModelCreate==========\n");
1312     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1313     ASSERT_NE(model, nullptr);
1314     printf("==========OH_AI_TrainCfgCreate==========\n");
1315     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
1316     ASSERT_NE(train_cfg, nullptr);
1317     std::vector<std::string> set_train_cfg_loss_name = {"loss_fct", "_loss_fn", "SigmoidCrossEntropy", "BinaryCrossEntropy"};
1318     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
1319     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), set_train_cfg_loss_name.size());
1320     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
1321     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/xiaoyi_train_codegen.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
1322     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1323     printf("==========OH_AI_ExportModel==========\n");
1324     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_gru_model1.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1325     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1326     const std::vector<std::string> changeble_weights_name = {"app_usage_statistic_30_cell.embedding.embedding_table",
1327                                                  "moment1.app_usage_statistic_30_cell.embedding.embedding_table",
1328                                                  "moment2.app_usage_statistic_30_cell.embedding.embedding_table",
1329                                                  "data-57"};
1330     char **set_changeble_weights_name = TransStrVectorToCharArrays(changeble_weights_name);
1331     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "", true, true, set_changeble_weights_name, changeble_weights_name.size());
1332     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1333 }
1334 // 异常场景:weight_file路径为文件夹
1335 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportWeights_0005, Function | MediumTest | Level1) {
1336     printf("==========OH_AI_ContextCreate==========\n");
1337     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1338     ASSERT_NE(context, nullptr);
1339     AddContextDeviceCPU(context);
1340     printf("==========OH_AI_ModelCreate==========\n");
1341     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1342     ASSERT_NE(model, nullptr);
1343     printf("==========OH_AI_TrainCfgCreate==========\n");
1344     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
1345     ASSERT_NE(train_cfg, nullptr);
1346     std::vector<std::string> set_train_cfg_loss_name = {"loss_fct", "_loss_fn", "SigmoidCrossEntropy", "BinaryCrossEntropy"};
1347     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
1348     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), set_train_cfg_loss_name.size());
1349     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
1350     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/xiaoyi_train_codegen.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
1351     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1352     printf("==========OH_AI_ExportModel==========\n");
1353     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_gru_model1.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1354     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1355     const std::vector<std::string> changeble_weights_name = {"app_usage_statistic_30_cell.embedding.embedding_table",
1356                                                  "moment1.app_usage_statistic_30_cell.embedding.embedding_table",
1357                                                  "moment2.app_usage_statistic_30_cell.embedding.embedding_table",
1358                                                  "data-57"};
1359     char **set_changeble_weights_name = TransStrVectorToCharArrays(changeble_weights_name);
1360     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/", true, true, set_changeble_weights_name, changeble_weights_name.size());
1361     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1362 }
1363 // 异常场景:is_inference为false
1364 HWTEST(MSLiteTest, SUB_AI_MindSpore_Train_ExportWeights_0006, Function | MediumTest | Level1) {
1365     printf("==========OH_AI_ContextCreate==========\n");
1366     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1367     ASSERT_NE(context, nullptr);
1368     AddContextDeviceCPU(context);
1369     printf("==========OH_AI_ModelCreate==========\n");
1370     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1371     ASSERT_NE(model, nullptr);
1372     printf("==========OH_AI_TrainCfgCreate==========\n");
1373     OH_AI_TrainCfgHandle train_cfg = OH_AI_TrainCfgCreate();
1374     ASSERT_NE(train_cfg, nullptr);
1375     std::vector<std::string> set_train_cfg_loss_name = {"loss_fct", "_loss_fn", "SigmoidCrossEntropy", "BinaryCrossEntropy"};
1376     char **set_loss_name = TransStrVectorToCharArrays(set_train_cfg_loss_name);
1377     OH_AI_TrainCfgSetLossName(train_cfg, const_cast<const char **>(set_loss_name), set_train_cfg_loss_name.size());
1378     printf("==========OH_AI_TrainModelBuildFromFile==========\n");
1379     auto status = OH_AI_TrainModelBuildFromFile(model, "/data/test/xiaoyi_train_codegen.ms", OH_AI_MODELTYPE_MINDIR, context, train_cfg);
1380     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1381     printf("==========OH_AI_ExportModel==========\n");
1382     status = OH_AI_ExportModel(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_gru_model1.ms", OH_AI_NO_QUANT, true, nullptr, 0);
1383     ASSERT_EQ(status, OH_AI_STATUS_SUCCESS);
1384     const std::vector<std::string> changeble_weights_name = {"app_usage_statistic_30_cell.embedding.embedding_table",
1385                                                  "moment1.app_usage_statistic_30_cell.embedding.embedding_table",
1386                                                  "moment2.app_usage_statistic_30_cell.embedding.embedding_table",
1387                                                  "data-57"};
1388     char **set_changeble_weights_name = TransStrVectorToCharArrays(changeble_weights_name);
1389     status = OH_AI_ExportWeightsCollaborateWithMicro(model, OH_AI_MODELTYPE_MINDIR, "/data/test/xiaoyi_train_codegen_net1.bin", false, true, set_changeble_weights_name, changeble_weights_name.size());
1390     ASSERT_NE(status, OH_AI_STATUS_SUCCESS);
1391 }
1392 
1393 
1394 // predict on cpu
Predict_CPU()1395 void Predict_CPU() {
1396     printf("==========Init Context==========\n");
1397     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1398     ASSERT_NE(context, nullptr);
1399     AddContextDeviceCPU(context);
1400     printf("==========Create model==========\n");
1401     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1402     ASSERT_NE(model, nullptr);
1403     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1404 }
1405 
1406 // predict on cpu
Predict_NPU()1407 void Predict_NPU() {
1408     if (!IsNPU()) {
1409         printf("NNRt is not NPU, skip this test");
1410         return;
1411     }
1412     printf("==========Init Context==========\n");
1413     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1414     ASSERT_NE(context, nullptr);
1415     AddContextDeviceNNRT(context);
1416     printf("==========Create model==========\n");
1417     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1418     ASSERT_NE(model, nullptr);
1419     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1420 }
1421 
1422 // 正常场景:Context设置CPU,默认场景,不设置线程绑核
1423 HWTEST(MSLiteTest, OHOS_Context_CPU_0001, Function | MediumTest | Level1) {
1424     printf("==========Init Context==========\n");
1425     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1426     ASSERT_NE(context, nullptr);
1427     AddContextDeviceCPU(context);
1428     printf("==========Create model==========\n");
1429     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1430     ASSERT_NE(model, nullptr);
1431     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1432 }
1433 
1434 // 正常场景:Context设置CPU,4线程
1435 HWTEST(MSLiteTest, OHOS_Context_CPU_0002, Function | MediumTest | Level1) {
1436     printf("==========Init Context==========\n");
1437     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1438     ASSERT_NE(context, nullptr);
1439     OH_AI_ContextSetThreadNum(context, 4);
1440     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1441     printf("==========thread_num:%d\n", thread_num);
1442     ASSERT_EQ(thread_num, 4);
1443     AddContextDeviceCPU(context);
1444     printf("==========Create model==========\n");
1445     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1446     ASSERT_NE(model, nullptr);
1447     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1448 }
1449 
1450 // 正常场景:Context设置CPU,2线程
1451 HWTEST(MSLiteTest, OHOS_Context_CPU_0003, Function | MediumTest | Level1) {
1452     printf("==========Init Context==========\n");
1453     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1454     ASSERT_NE(context, nullptr);
1455     OH_AI_ContextSetThreadNum(context, 2);
1456     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1457     printf("==========thread_num:%d\n", thread_num);
1458     ASSERT_EQ(thread_num, 2);
1459     AddContextDeviceCPU(context);
1460     printf("==========Create model==========\n");
1461     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1462     ASSERT_NE(model, nullptr);
1463     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1464 }
1465 
1466 // 正常场景:Context设置CPU,1线程
1467 HWTEST(MSLiteTest, OHOS_Context_CPU_0004, Function | MediumTest | Level1) {
1468     printf("==========Init Context==========\n");
1469     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1470     ASSERT_NE(context, nullptr);
1471     OH_AI_ContextSetThreadNum(context, 1);
1472     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1473     printf("==========thread_num:%d\n", thread_num);
1474     ASSERT_EQ(thread_num, 1);
1475     AddContextDeviceCPU(context);
1476     printf("==========Create model==========\n");
1477     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1478     ASSERT_NE(model, nullptr);
1479     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1480 }
1481 
1482 // 异常场景:Context设置CPU,0线程
1483 HWTEST(MSLiteTest, OHOS_Context_CPU_0005, Function | MediumTest | Level1) {
1484     printf("==========Init Context==========\n");
1485     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1486     ASSERT_NE(context, nullptr);
1487     OH_AI_ContextSetThreadNum(context, 0);
1488     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1489     printf("==========thread_num:%d\n", thread_num);
1490     ASSERT_EQ(thread_num, 0);
1491     AddContextDeviceCPU(context);
1492     printf("==========Create model==========\n");
1493     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1494     ASSERT_NE(model, nullptr);
1495     printf("==========Build model==========\n");
1496     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
1497     printf("==========build model return code:%d\n", ret);
1498     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
1499     OH_AI_ModelDestroy(&model);
1500 }
1501 
1502 // 正常场景:Context设置CPU,不绑核
1503 HWTEST(MSLiteTest, OHOS_Context_CPU_0006, Function | MediumTest | Level1) {
1504     printf("==========Init Context==========\n");
1505     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1506     ASSERT_NE(context, nullptr);
1507     OH_AI_ContextSetThreadNum(context, 4);
1508     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1509     printf("==========thread_num:%d\n", thread_num);
1510     ASSERT_EQ(thread_num, 4);
1511     OH_AI_ContextSetThreadAffinityMode(context, 0);
1512     int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
1513     printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
1514     ASSERT_EQ(thread_affinity_mode, 0);
1515     AddContextDeviceCPU(context);
1516     printf("==========Create model==========\n");
1517     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1518     ASSERT_NE(model, nullptr);
1519     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1520 }
1521 
1522 // 正常场景:Context设置CPU,绑大核
1523 HWTEST(MSLiteTest, OHOS_Context_CPU_0007, Function | MediumTest | Level1) {
1524     printf("==========Init Context==========\n");
1525     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1526     ASSERT_NE(context, nullptr);
1527     OH_AI_ContextSetThreadNum(context, 4);
1528     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1529     printf("==========thread_num:%d\n", thread_num);
1530     ASSERT_EQ(thread_num, 4);
1531     OH_AI_ContextSetThreadAffinityMode(context, 1);
1532     int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
1533     printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
1534     ASSERT_EQ(thread_affinity_mode, 1);
1535     AddContextDeviceCPU(context);
1536     printf("==========Create model==========\n");
1537     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1538     ASSERT_NE(model, nullptr);
1539     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1540 }
1541 
1542 // 正常场景:Context设置CPU,绑中核
1543 HWTEST(MSLiteTest, OHOS_Context_CPU_0008, Function | MediumTest | Level1) {
1544     printf("==========Init Context==========\n");
1545     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1546     ASSERT_NE(context, nullptr);
1547     OH_AI_ContextSetThreadNum(context, 4);
1548     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1549     printf("==========thread_num:%d\n", thread_num);
1550     ASSERT_EQ(thread_num, 4);
1551     OH_AI_ContextSetThreadAffinityMode(context, 2);
1552     int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
1553     printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
1554     ASSERT_EQ(thread_affinity_mode, 2);
1555     AddContextDeviceCPU(context);
1556     printf("==========Create model==========\n");
1557     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1558     ASSERT_NE(model, nullptr);
1559     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1560 }
1561 
1562 // 异常场景:Context设置CPU,绑核失败
1563 HWTEST(MSLiteTest, OHOS_Context_CPU_0009, Function | MediumTest | Level1) {
1564     printf("==========Init Context==========\n");
1565     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1566     ASSERT_NE(context, nullptr);
1567     OH_AI_ContextSetThreadNum(context, 4);
1568     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1569     printf("==========thread_num:%d\n", thread_num);
1570     ASSERT_EQ(thread_num, 4);
1571     OH_AI_ContextSetThreadAffinityMode(context, 3);
1572     int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
1573     printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
1574     ASSERT_EQ(thread_affinity_mode, 0);
1575     AddContextDeviceCPU(context);
1576     printf("==========Create model==========\n");
1577     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1578     ASSERT_NE(model, nullptr);
1579     printf("==========Build model==========\n");
1580     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
1581     printf("==========build model return code:%d\n", ret);
1582     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
1583     OH_AI_ModelDestroy(&model);
1584 }
1585 
1586 // 正常场景:Context设置CPU,绑核列表{0,1,2,3}
1587 HWTEST(MSLiteTest, OHOS_Context_CPU_0010, Function | MediumTest | Level1) {
1588     printf("==========Init Context==========\n");
1589     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1590     ASSERT_NE(context, nullptr);
1591     OH_AI_ContextSetThreadNum(context, 4);
1592     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1593     printf("==========thread_num:%d\n", thread_num);
1594     ASSERT_EQ(thread_num, 4);
1595     constexpr size_t core_num = 4;
1596     int32_t core_list[core_num] = {0, 1, 2, 3};
1597     OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num);
1598     size_t ret_core_num;
1599     int32_t *ret_core_list = nullptr;
1600     ret_core_list = const_cast<int32_t *>(OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num));
1601     ASSERT_EQ(ret_core_num, core_num);
1602     for (size_t i = 0; i < ret_core_num; i++) {
1603         printf("==========ret_core_list:%d\n", ret_core_list[i]);
1604         ASSERT_EQ(ret_core_list[i], core_list[i]);
1605     }
1606     free(ret_core_list);
1607     AddContextDeviceCPU(context);
1608     printf("==========Create model==========\n");
1609     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1610     ASSERT_NE(model, nullptr);
1611     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1612 }
1613 
1614 // 正常场景:Context设置CPU,绑核列表和模式同时开启
1615 HWTEST(MSLiteTest, OHOS_Context_CPU_0011, Function | MediumTest | Level1) {
1616     printf("==========Init Context==========\n");
1617     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1618     ASSERT_NE(context, nullptr);
1619     OH_AI_ContextSetThreadNum(context, 4);
1620     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1621     printf("==========thread_num:%d\n", thread_num);
1622     ASSERT_EQ(thread_num, 4);
1623     OH_AI_ContextSetThreadAffinityMode(context, 1);
1624     int thread_affinity_mode = OH_AI_ContextGetThreadAffinityMode(context);
1625     printf("==========thread_affinity_mode:%d\n", thread_affinity_mode);
1626     constexpr size_t core_num = 4;
1627     int32_t core_list[core_num] = {0, 1, 3, 4};
1628     OH_AI_ContextSetThreadAffinityCoreList(context, core_list, core_num);
1629     size_t ret_core_num;
1630     int32_t *ret_core_list = nullptr;
1631     ret_core_list = const_cast<int32_t *>(OH_AI_ContextGetThreadAffinityCoreList(context, &ret_core_num));
1632     ASSERT_EQ(ret_core_num, core_num);
1633     for (size_t i = 0; i < ret_core_num; i++) {
1634         printf("==========ret_core_list:%d\n", ret_core_list[i]);
1635         ASSERT_EQ(ret_core_list[i], core_list[i]);
1636     }
1637     free(ret_core_list);
1638     AddContextDeviceCPU(context);
1639     printf("==========Create model==========\n");
1640     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1641     ASSERT_NE(model, nullptr);
1642     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1643 }
1644 
1645 // 正常场景:Context设置CPU,开启并行
1646 HWTEST(MSLiteTest, OHOS_Context_CPU_0012, Function | MediumTest | Level1) {
1647     printf("==========Init Context==========\n");
1648     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1649     ASSERT_NE(context, nullptr);
1650     OH_AI_ContextSetThreadNum(context, 4);
1651     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1652     printf("==========thread_num:%d\n", thread_num);
1653     ASSERT_EQ(thread_num, 4);
1654     OH_AI_ContextSetEnableParallel(context, true);
1655     bool is_parallel = OH_AI_ContextGetEnableParallel(context);
1656     printf("==========is_parallel:%d\n", is_parallel);
1657     ASSERT_EQ(is_parallel, true);
1658     AddContextDeviceCPU(context);
1659     printf("==========Create model==========\n");
1660     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1661     ASSERT_NE(model, nullptr);
1662     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1663 }
1664 
1665 // 正常场景:Context设置CPU,关闭并行
1666 HWTEST(MSLiteTest, OHOS_Context_CPU_0013, Function | MediumTest | Level1) {
1667     printf("==========Init Context==========\n");
1668     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1669     ASSERT_NE(context, nullptr);
1670     OH_AI_ContextSetThreadNum(context, 4);
1671     int32_t thread_num = OH_AI_ContextGetThreadNum(context);
1672     printf("==========thread_num:%d\n", thread_num);
1673     ASSERT_EQ(thread_num, 4);
1674     OH_AI_ContextSetEnableParallel(context, false);
1675     bool is_parallel = OH_AI_ContextGetEnableParallel(context);
1676     printf("==========is_parallel:%d\n", is_parallel);
1677     ASSERT_EQ(is_parallel, false);
1678     AddContextDeviceCPU(context);
1679     printf("==========Create model==========\n");
1680     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1681     ASSERT_NE(model, nullptr);
1682     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1683 }
1684 
1685 // 正常场景:Context设置CPU,开启fp16
1686 HWTEST(MSLiteTest, OHOS_Context_CPU_0014, Function | MediumTest | Level1) {
1687     printf("==========Init Context==========\n");
1688     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1689     ASSERT_NE(context, nullptr);
1690     OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
1691     ASSERT_NE(cpu_device_info, nullptr);
1692     OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, true);
1693     bool is_fp16 = OH_AI_DeviceInfoGetEnableFP16(cpu_device_info);
1694     printf("==========is_fp16:%d\n", is_fp16);
1695     ASSERT_EQ(is_fp16, true);
1696     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
1697     printf("==========Create model==========\n");
1698     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1699     ASSERT_NE(model, nullptr);
1700     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1701 }
1702 
1703 // 正常场景:Context设置CPU,关闭fp16
1704 HWTEST(MSLiteTest, OHOS_Context_CPU_0015, Function | MediumTest | Level1) {
1705     printf("==========Init Context==========\n");
1706     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1707     ASSERT_NE(context, nullptr);
1708     OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
1709     ASSERT_NE(cpu_device_info, nullptr);
1710     OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
1711     bool is_fp16 = OH_AI_DeviceInfoGetEnableFP16(cpu_device_info);
1712     printf("==========is_fp16:%d\n", is_fp16);
1713     ASSERT_EQ(is_fp16, false);
1714     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
1715     printf("==========Create model==========\n");
1716     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1717     ASSERT_NE(model, nullptr);
1718     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1719 }
1720 
1721 // 正常场景:Context设置CPU,设置厂商名称
1722 HWTEST(MSLiteTest, OHOS_Context_CPU_0016, Function | MediumTest | Level1) {
1723     printf("==========Init Context==========\n");
1724     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1725     ASSERT_NE(context, nullptr);
1726     OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
1727     ASSERT_NE(cpu_device_info, nullptr);
1728     OH_AI_DeviceInfoSetProvider(cpu_device_info, "vendor_new");
1729     char *proInfo = const_cast<char *>(OH_AI_DeviceInfoGetProvider(cpu_device_info));
1730     ASSERT_EQ(strcmp(proInfo, "vendor_new"), 0);
1731     free(proInfo);
1732     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
1733     printf("==========Create model==========\n");
1734     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1735     ASSERT_NE(model, nullptr);
1736     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1737 }
1738 
1739 // 正常场景:Context设置CPU,设置厂商设备类型
1740 HWTEST(MSLiteTest, OHOS_Context_CPU_0017, Function | MediumTest | Level1) {
1741     printf("==========Init Context==========\n");
1742     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1743     ASSERT_NE(context, nullptr);
1744     OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
1745     ASSERT_NE(cpu_device_info, nullptr);
1746     OH_AI_DeviceInfoSetProviderDevice(cpu_device_info, "cpu_new");
1747     char *proInfo = const_cast<char *>(OH_AI_DeviceInfoGetProviderDevice(cpu_device_info));
1748     ASSERT_EQ(strcmp(proInfo, "cpu_new"), 0);
1749     free(proInfo);
1750     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
1751     printf("==========Create model==========\n");
1752     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1753     ASSERT_NE(model, nullptr);
1754     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
1755 }
1756 
1757 // 正常场景:Context设置CPU,销毁MSDeviceInfo
1758 HWTEST(MSLiteTest, OHOS_Context_CPU_0018, Function | MediumTest | Level1) {
1759     printf("==========Init Context==========\n");
1760     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1761     ASSERT_NE(context, nullptr);
1762     OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
1763     ASSERT_NE(cpu_device_info, nullptr);
1764     OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info);
1765     printf("==========device_type:%d\n", device_type);
1766     ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU);
1767     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
1768     OH_AI_DeviceInfoDestroy(&cpu_device_info);
1769     ASSERT_EQ(cpu_device_info, nullptr);
1770 }
1771 
1772 // 正常场景:Context设置CPU,销毁OH_AI_Context
1773 HWTEST(MSLiteTest, OHOS_Context_CPU_0019, Function | MediumTest | Level1) {
1774     printf("==========Init Context==========\n");
1775     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1776     ASSERT_NE(context, nullptr);
1777     OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
1778     ASSERT_NE(cpu_device_info, nullptr);
1779     OH_AI_DeviceType device_type = OH_AI_DeviceInfoGetDeviceType(cpu_device_info);
1780     printf("==========device_type:%d\n", device_type);
1781     ASSERT_EQ(device_type, OH_AI_DEVICETYPE_CPU);
1782     OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
1783     OH_AI_ContextDestroy(&context);
1784     ASSERT_EQ(context, nullptr);
1785 }
1786 
1787 // 异常场景:Context不设置device info
1788 HWTEST(MSLiteTest, OHOS_Context_CPU_0020, Function | MediumTest | Level1) {
1789     printf("==========Init Context==========\n");
1790     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1791     ASSERT_NE(context, nullptr);
1792     printf("==========Create model==========\n");
1793     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1794     ASSERT_NE(model, nullptr);
1795     printf("==========Build model==========\n");
1796     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
1797     printf("==========build model return code:%d\n", ret);
1798     ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
1799     OH_AI_ModelDestroy(&model);
1800 }
1801 
1802 // 正常场景:Context设置NPU,频率为1
1803 HWTEST(MSLiteTest, OHOS_Context_NPU_0002, Function | MediumTest | Level1) {
1804     printf("==========Init Context==========\n");
1805     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1806     ASSERT_NE(context, nullptr);
1807     OH_AI_DeviceInfoHandle npu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_KIRIN_NPU);
1808     ASSERT_NE(npu_device_info, nullptr);
1809     OH_AI_DeviceInfoSetFrequency(npu_device_info, 1);
1810     int frequency = OH_AI_DeviceInfoGetFrequency(npu_device_info);
1811     ASSERT_EQ(frequency, 1);
1812     OH_AI_ContextAddDeviceInfo(context, npu_device_info);
1813     printf("==========Create model==========\n");
1814     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1815     ASSERT_NE(model, nullptr);
1816     OH_AI_ContextDestroy(&context);
1817 }
1818 
1819 // 正常场景:ModelBuild,调用指针方法
1820 HWTEST(MSLiteTest, OHOS_Model_Build_0001, Function | MediumTest | Level1) {
1821     printf("==========Init Context==========\n");
1822     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1823     ASSERT_NE(context, nullptr);
1824     AddContextDeviceCPU(context);
1825     printf("==========Create model==========\n");
1826     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1827     ASSERT_NE(model, nullptr);
1828     ModelPredict(model, context, "ml_face_isface", {}, true, true, false);
1829 }
1830 
1831 // 异常场景:ModelBuild,model_data指向的不是模型数据
1832 HWTEST(MSLiteTest, OHOS_Model_Build_0002, Function | MediumTest | Level1) {
1833     printf("==========ReadFile==========\n");
1834     size_t size1;
1835     size_t *ptr_size1 = &size1;
1836     const char *imagePath = "/data/test/ml_face_isface.input";
1837     char *imageBuf = ReadFile(imagePath, ptr_size1);
1838     ASSERT_NE(imageBuf, nullptr);
1839     printf("==========Init Context==========\n");
1840     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1841     ASSERT_NE(context, nullptr);
1842     AddContextDeviceCPU(context);
1843     printf("==========Create model==========\n");
1844     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1845     ASSERT_NE(model, nullptr);
1846     printf("==========Build model==========\n");
1847     OH_AI_Status ret = OH_AI_ModelBuild(model, imageBuf, size1, OH_AI_MODELTYPE_MINDIR, context);
1848     printf("==========build model return code:%d\n", ret);
1849     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
1850     delete[] imageBuf;
1851     OH_AI_ModelDestroy(&model);
1852 }
1853 
1854 // 异常场景:ModelBuild,model_data为空
1855 HWTEST(MSLiteTest, OHOS_Model_Build_0003, Function | MediumTest | Level1) {
1856     printf("==========Init Context==========\n");
1857     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1858     ASSERT_NE(context, nullptr);
1859     AddContextDeviceCPU(context);
1860     printf("==========Create model==========\n");
1861     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1862     ASSERT_NE(model, nullptr);
1863     printf("==========Build model==========\n");
1864     OH_AI_Status ret = OH_AI_ModelBuild(model, nullptr, 0, OH_AI_MODELTYPE_MINDIR, context);
1865     printf("==========build model return code:%d\n", ret);
1866     ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
1867     OH_AI_ModelDestroy(&model);
1868 }
1869 
1870 // 异常场景:ModelBuild,data_size为0
1871 HWTEST(MSLiteTest, OHOS_Model_Build_0004, Function | MediumTest | Level1) {
1872     printf("==========ReadFile==========\n");
1873     size_t size;
1874     size_t *ptr_size = &size;
1875     const char *graphPath = "/data/test/ml_face_isface.ms";
1876     char *graphBuf = ReadFile(graphPath, ptr_size);
1877     ASSERT_NE(graphBuf, nullptr);
1878     printf("==========Init Context==========\n");
1879     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1880     ASSERT_NE(context, nullptr);
1881     AddContextDeviceCPU(context);
1882     printf("==========Create model==========\n");
1883     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1884     ASSERT_NE(model, nullptr);
1885     printf("==========Build model==========\n");
1886     OH_AI_Status ret = OH_AI_ModelBuild(model, graphBuf, 0, OH_AI_MODELTYPE_MINDIR, context);
1887     printf("==========build model return code:%d\n", ret);
1888     ASSERT_EQ(ret, OH_AI_STATUS_LITE_INPUT_PARAM_INVALID);
1889     delete[] graphBuf;
1890     OH_AI_ModelDestroy(&model);
1891 }
1892 
1893 // 异常场景:ModelBuild,读取路径方法,且路径不是模型数据路径
1894 HWTEST(MSLiteTest, OHOS_Model_Build_0005, Function | MediumTest | Level1) {
1895     printf("==========Init Context==========\n");
1896     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1897     ASSERT_NE(context, nullptr);
1898     AddContextDeviceCPU(context);
1899     printf("==========Create model==========\n");
1900     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1901     ASSERT_NE(model, nullptr);
1902     printf("==========Build model==========\n");
1903     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.input", OH_AI_MODELTYPE_MINDIR, context);
1904     printf("==========build model return code:%d\n", ret);
1905     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
1906     OH_AI_ModelDestroy(&model);
1907 }
1908 
1909 // 异常场景:ModelBuild,读取路径方法,路径为空
1910 HWTEST(MSLiteTest, OHOS_Model_Build_0006, Function | MediumTest | Level1) {
1911     printf("==========Init Context==========\n");
1912     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1913     ASSERT_NE(context, nullptr);
1914     AddContextDeviceCPU(context);
1915     printf("==========Create model==========\n");
1916     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1917     ASSERT_NE(model, nullptr);
1918     printf("==========Build model==========\n");
1919     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "", OH_AI_MODELTYPE_MINDIR, context);
1920     printf("==========build model return code:%d\n", ret);
1921     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
1922     OH_AI_ModelDestroy(&model);
1923 }
1924 
1925 // 异常场景:ModelBuild,model_type不支持
1926 HWTEST(MSLiteTest, OHOS_Model_Build_0007, Function | MediumTest | Level1) {
1927     printf("==========Init Context==========\n");
1928     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1929     ASSERT_NE(context, nullptr);
1930     AddContextDeviceCPU(context);
1931     printf("==========Create model==========\n");
1932     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1933     ASSERT_NE(model, nullptr);
1934     printf("==========Build model==========\n");
1935     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_INVALID, context);
1936     printf("==========build model return code:%d\n", ret);
1937     ASSERT_EQ(ret, OH_AI_STATUS_LITE_PARAM_INVALID);
1938     OH_AI_ModelDestroy(&model);
1939 }
1940 
1941 // 异常场景:ModelBuild,model_context为空
1942 HWTEST(MSLiteTest, OHOS_Model_Build_0008, Function | MediumTest | Level1) {
1943     printf("==========Init Context==========\n");
1944     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1945     ASSERT_NE(context, nullptr);
1946     AddContextDeviceCPU(context);
1947     printf("==========Create model==========\n");
1948     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1949     ASSERT_NE(model, nullptr);
1950     printf("==========Build model==========\n");
1951     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, nullptr);
1952     printf("==========build model return code:%d\n", ret);
1953     ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
1954     OH_AI_ModelDestroy(&model);
1955 }
1956 
1957 // 正常场景:ModelBuild,调用GetOutputs获取输出
1958 HWTEST(MSLiteTest, OHOS_Model_Build_0009, Function | MediumTest | Level1) {
1959     printf("==========Init Context==========\n");
1960     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1961     ASSERT_NE(context, nullptr);
1962     AddContextDeviceCPU(context);
1963     printf("==========Create model==========\n");
1964     OH_AI_ModelHandle model = OH_AI_ModelCreate();
1965     ASSERT_NE(model, nullptr);
1966     printf("==========Model build==========\n");
1967     OH_AI_ModelBuildFromFile(model,"/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
1968     printf("==========Model Predict==========\n");
1969     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
1970     OH_AI_TensorHandleArray output;
1971     FillInputsData(inputs,"ml_face_isface",false);
1972     OH_AI_Status ret = OH_AI_ModelPredict(model, inputs, &output, nullptr, nullptr);
1973     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
1974     printf("==========GetOutput==========\n");
1975     OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model);
1976     for (size_t i = 0; i < outputs.handle_num; ++i) {
1977         OH_AI_TensorHandle tensor = outputs.handle_list[i];
1978         int64_t element_num = OH_AI_TensorGetElementNum(tensor);
1979         printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
1980         float *output_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
1981         printf("output data is:");
1982         for (int j = 0; j < element_num && j <= 20; ++j) {
1983             printf("%f ", output_data[j]);
1984         }
1985         printf("\n");
1986         printf("==========compFp32WithTData==========\n");
1987         string expectedDataFile = "/data/test/ml_face_isface" + std::to_string(i) + ".output";
1988         bool result = compFp32WithTData(output_data, expectedDataFile, 0.01, 0.01, false);
1989         EXPECT_EQ(result, true);
1990     }
1991 }
1992 
1993 // 正常场景:ModelResize,shape与之前一致
1994 HWTEST(MSLiteTest, OHOS_Model_Resize_0001, Function | MediumTest | Level1) {
1995     printf("==========Init Context==========\n");
1996     OH_AI_ContextHandle context = OH_AI_ContextCreate();
1997     ASSERT_NE(context, nullptr);
1998     AddContextDeviceCPU(context);
1999     printf("==========Create model==========\n");
2000     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2001     ASSERT_NE(model, nullptr);
2002     ModelPredict(model, context, "ml_ocr_cn", {4, {1, 32, 512, 1}}, false, true, false);
2003 }
2004 
2005 // 正常场景:ModelResize,shape与之前不一致
2006 HWTEST(MSLiteTest, OHOS_Model_Resize_0002, Function | MediumTest | Level1) {
2007     printf("==========Init Context==========\n");
2008     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2009     ASSERT_NE(context, nullptr);
2010     AddContextDeviceCPU(context);
2011     printf("==========Create model==========\n");
2012     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2013     ASSERT_NE(model, nullptr);
2014     printf("==========Build model==========\n");
2015     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context);
2016     printf("==========build model return code:%d\n", ret);
2017     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2018     printf("==========GetInputs==========\n");
2019     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2020     ASSERT_NE(inputs.handle_list, nullptr);
2021     printf("==========Resizes==========\n");
2022     OH_AI_ShapeInfo shape_infos = {4, {1, 64, 256, 1}};
2023     OH_AI_Status resize_ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
2024     printf("==========Resizes return code:%d\n", resize_ret);
2025     ASSERT_EQ(resize_ret, OH_AI_STATUS_SUCCESS);
2026     FillInputsData(inputs, "ml_ocr_cn", false);
2027     OH_AI_TensorHandleArray outputs;
2028     printf("==========Model Predict==========\n");
2029     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2030     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
2031 }
2032 
2033 // 异常场景:ModelResize,shape为三维
2034 HWTEST(MSLiteTest, OHOS_Model_Resize_0003, Function | MediumTest | Level1) {
2035     printf("==========Init Context==========\n");
2036     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2037     ASSERT_NE(context, nullptr);
2038     AddContextDeviceCPU(context);
2039     printf("==========Create model==========\n");
2040     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2041     ASSERT_NE(model, nullptr);
2042     printf("==========Build model==========\n");
2043     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context);
2044     printf("==========build model return code:%d\n", ret);
2045     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2046     printf("==========GetInputs==========\n");
2047     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2048     ASSERT_NE(inputs.handle_list, nullptr);
2049     printf("==========Resizes==========\n");
2050     OH_AI_ShapeInfo shape_infos = {4, {1, 32, 1}};
2051     ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
2052     printf("==========Resizes return code:%d\n", ret);
2053     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
2054     OH_AI_ModelDestroy(&model);
2055 }
2056 
2057 // 异常场景:ModelResize,shape值有负数
2058 HWTEST(MSLiteTest, OHOS_Model_Resize_0004, Function | MediumTest | Level1) {
2059     printf("==========Init Context==========\n");
2060     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2061     ASSERT_NE(context, nullptr);
2062     AddContextDeviceCPU(context);
2063     printf("==========Create model==========\n");
2064     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2065     ASSERT_NE(model, nullptr);
2066     printf("==========Build model==========\n");
2067     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context);
2068     printf("==========build model return code:%d\n", ret);
2069     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2070     printf("==========GetInputs==========\n");
2071     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2072     ASSERT_NE(inputs.handle_list, nullptr);
2073     printf("==========Resizes==========\n");
2074     OH_AI_ShapeInfo shape_infos = {4, {1, -32, 32, 1}};
2075     ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
2076     printf("==========Resizes return code:%d\n", ret);
2077     ASSERT_EQ(ret, OH_AI_STATUS_LITE_PARAM_INVALID);
2078     OH_AI_ModelDestroy(&model);
2079 }
2080 
2081 // 异常场景:ModelResize,不支持resize的模型
2082 HWTEST(MSLiteTest, OHOS_Model_Resize_0005, Function | MediumTest | Level1) {
2083     printf("==========Init Context==========\n");
2084     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2085     ASSERT_NE(context, nullptr);
2086     AddContextDeviceCPU(context);
2087     printf("==========Create model==========\n");
2088     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2089     ASSERT_NE(model, nullptr);
2090     printf("==========Build model==========\n");
2091     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2092     printf("==========build model return code:%d\n", ret);
2093     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2094     printf("==========GetInputs==========\n");
2095     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2096     ASSERT_NE(inputs.handle_list, nullptr);
2097     printf("==========Resizes==========\n");
2098     OH_AI_ShapeInfo shape_infos = {4, {1, 96, 96, 1}};
2099     ret = OH_AI_ModelResize(model, inputs, &shape_infos, inputs.handle_num);
2100     printf("==========Resizes return code:%d\n", ret);
2101     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
2102     OH_AI_ModelDestroy(&model);
2103 }
2104 
2105 // 正常场景:ModelPredict
2106 HWTEST(MSLiteTest, OHOS_Model_Predict_0001, Function | MediumTest | Level1) {
2107     printf("==========Init Context==========\n");
2108     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2109     ASSERT_NE(context, nullptr);
2110     AddContextDeviceCPU(context);
2111     printf("==========Create model==========\n");
2112     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2113     ASSERT_NE(model, nullptr);
2114     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
2115 }
2116 
2117 // 异常场景:ModelPredict,model被销毁
2118 HWTEST(MSLiteTest, OHOS_Model_Predict_0002, Function | MediumTest | Level1) {
2119     printf("==========Init Context==========\n");
2120     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2121     ASSERT_NE(context, nullptr);
2122     AddContextDeviceCPU(context);
2123     printf("==========Create model==========\n");
2124     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2125     ASSERT_NE(model, nullptr);
2126     printf("==========Build model==========\n");
2127     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2128     printf("==========build model return code:%d\n", ret);
2129     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2130     printf("==========GetInputs==========\n");
2131     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2132     ASSERT_NE(inputs.handle_list, nullptr);
2133     FillInputsData(inputs, "ml_face_isface", true);
2134     printf("==========Model Predict==========\n");
2135     OH_AI_TensorHandleArray outputs;
2136     OH_AI_ModelDestroy(&model);
2137     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2138     printf("==========Model Predict return code:%d\n", ret);
2139     ASSERT_EQ(ret, OH_AI_STATUS_LITE_NULLPTR);
2140 }
2141 
2142 // 异常场景:ModelPredict,input为空
2143 HWTEST(MSLiteTest, OHOS_Model_Predict_0003, Function | MediumTest | Level1) {
2144     printf("==========Init Context==========\n");
2145     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2146     ASSERT_NE(context, nullptr);
2147     AddContextDeviceCPU(context);
2148     printf("==========Create model==========\n");
2149     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2150     ASSERT_NE(model, nullptr);
2151     printf("==========Build model==========\n");
2152     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2153     printf("==========build model return code:%d\n", ret);
2154     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2155     printf("==========Model Predict==========\n");
2156     OH_AI_TensorHandleArray inputs;
2157     OH_AI_TensorHandleArray outputs;
2158     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2159     printf("==========Model Predict return code:%d\n", ret);
2160     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
2161     OH_AI_ModelDestroy(&model);
2162 }
2163 
2164 // 正常场景:ModelPredict,传入回调函数
2165 HWTEST(MSLiteTest, OHOS_Model_Predict_0004, Function | MediumTest | Level1) {
2166     printf("==========Init Context==========\n");
2167     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2168     ASSERT_NE(context, nullptr);
2169     AddContextDeviceCPU(context);
2170     printf("==========Create model==========\n");
2171     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2172     ASSERT_NE(model, nullptr);
2173     ModelPredict(model, context, "ml_face_isface", {}, false, true, true);
2174 }
2175 
2176 // 正常场景:ModelGetInputByTensorName
2177 HWTEST(MSLiteTest, OHOS_Model_GetInputByTensorName_0001, Function | MediumTest | Level1) {
2178     printf("==========ReadFile==========\n");
2179     size_t size1;
2180     size_t *ptr_size1 = &size1;
2181     const char *imagePath = "/data/test/ml_face_isface.input";
2182     char *imageBuf = ReadFile(imagePath, ptr_size1);
2183     ASSERT_NE(imageBuf, nullptr);
2184     printf("==========Init Context==========\n");
2185     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2186     ASSERT_NE(context, nullptr);
2187     AddContextDeviceCPU(context);
2188     printf("==========Create model==========\n");
2189     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2190     ASSERT_NE(model, nullptr);
2191     printf("==========Build model==========\n");
2192     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2193     printf("==========build model return code:%d\n", ret);
2194     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2195     printf("==========GetInputs==========\n");
2196     OH_AI_TensorHandle tensor = OH_AI_ModelGetInputByTensorName(model, "data");
2197     ASSERT_NE(tensor, nullptr);
2198     int64_t element_num = OH_AI_TensorGetElementNum(tensor);
2199     printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
2200     float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
2201     ASSERT_NE(input_data, nullptr);
2202     printf("==========Transpose==========\n");
2203     size_t shape_num;
2204     const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num);
2205     auto imageBuf_nhwc = new char[size1];
2206     PackNCHWToNHWCFp32(imageBuf, imageBuf_nhwc, shape[0], shape[1] * shape[2], shape[3]);
2207     memcpy_s(input_data, size1, imageBuf_nhwc, size1);
2208     printf("input data is:");
2209     for (int j = 0; j < element_num && j <= 20; ++j) {
2210         printf("%f ", input_data[j]);
2211     }
2212     printf("\n");
2213     printf("==========Model Predict==========\n");
2214     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2215     ASSERT_NE(inputs.handle_list, nullptr);
2216     OH_AI_TensorHandleArray outputs;
2217     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2218     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2219     CompareResult(outputs, "ml_face_isface");
2220     delete[] imageBuf;
2221     OH_AI_ModelDestroy(&model);
2222 }
2223 
2224 // 异常场景:ModelGetInputByTensorName,名称不存在
2225 HWTEST(MSLiteTest, OHOS_Model_GetInputByTensorName_0002, Function | MediumTest | Level1) {
2226     printf("==========Init Context==========\n");
2227     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2228     ASSERT_NE(context, nullptr);
2229     AddContextDeviceCPU(context);
2230     printf("==========Create model==========\n");
2231     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2232     ASSERT_NE(model, nullptr);
2233     printf("==========Build model==========\n");
2234     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2235     printf("==========build model return code:%d\n", ret);
2236     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2237     printf("==========GetInputs==========\n");
2238     OH_AI_TensorHandle tensor = OH_AI_ModelGetInputByTensorName(model, "aaa");
2239     ASSERT_EQ(tensor, nullptr);
2240     OH_AI_ModelDestroy(&model);
2241 }
2242 
2243 // 正常场景:ModelGetOutputByTensorName
2244 HWTEST(MSLiteTest, OHOS_Model_GetOutputByTensorName_0001, Function | MediumTest | Level1) {
2245     printf("==========Init Context==========\n");
2246     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2247     ASSERT_NE(context, nullptr);
2248     AddContextDeviceCPU(context);
2249     printf("==========Create model==========\n");
2250     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2251     ASSERT_NE(model, nullptr);
2252     printf("==========Build model==========\n");
2253     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2254     printf("==========build model return code:%d\n", ret);
2255     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2256     printf("==========GetInputs==========\n");
2257     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2258     ASSERT_NE(inputs.handle_list, nullptr);
2259     FillInputsData(inputs, "ml_face_isface", true);
2260     printf("==========Model Predict==========\n");
2261     OH_AI_TensorHandleArray outputs;
2262     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2263     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2264     printf("==========GetOutput==========\n");
2265     OH_AI_TensorHandle tensor = OH_AI_ModelGetOutputByTensorName(model, "prob");
2266     ASSERT_NE(tensor, nullptr);
2267     int64_t element_num = OH_AI_TensorGetElementNum(tensor);
2268     printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
2269     float *output_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
2270     printf("output data is:");
2271     for (int j = 0; j < element_num && j <= 20; ++j) {
2272         printf("%f ", output_data[j]);
2273     }
2274     printf("\n");
2275     printf("==========compFp32WithTData==========\n");
2276     bool result = compFp32WithTData(output_data, "/data/test/ml_face_isface0.output", 0.01, 0.01, false);
2277     EXPECT_EQ(result, true);
2278     OH_AI_ModelDestroy(&model);
2279 }
2280 
2281 // 异常场景:ModelGetOutputByTensorName,名称不存在
2282 HWTEST(MSLiteTest, OHOS_Model_GetOutputByTensorName_0002, Function | MediumTest | Level1) {
2283     printf("==========Init Context==========\n");
2284     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2285     ASSERT_NE(context, nullptr);
2286     AddContextDeviceCPU(context);
2287     printf("==========Create model==========\n");
2288     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2289     ASSERT_NE(model, nullptr);
2290     printf("==========Build model==========\n");
2291     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2292     printf("==========build model return code:%d\n", ret);
2293     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2294     printf("==========GetInputs==========\n");
2295     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2296     ASSERT_NE(inputs.handle_list, nullptr);
2297     FillInputsData(inputs, "ml_face_isface", true);
2298     printf("==========Model Predict==========\n");
2299     OH_AI_TensorHandleArray outputs;
2300     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2301     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2302     printf("==========GetOutput==========\n");
2303     OH_AI_TensorHandle tensor = OH_AI_ModelGetOutputByTensorName(model, "aaa");
2304     ASSERT_EQ(tensor, nullptr);
2305     OH_AI_ModelDestroy(&model);
2306 }
2307 
2308 // 正常场景:MSTensorCreate,创建tensor
2309 HWTEST(MSLiteTest, OHOS_Tensor_Create_0001, Function | MediumTest | Level1) {
2310     printf("==========Init Context==========\n");
2311     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2312     ASSERT_NE(context, nullptr);
2313     AddContextDeviceCPU(context);
2314     printf("==========Create model==========\n");
2315     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2316     ASSERT_NE(model, nullptr);
2317     printf("==========Build model==========\n");
2318     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2319     printf("==========build model return code:%d\n", ret);
2320     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2321     printf("==========GetInputs==========\n");
2322     constexpr size_t create_shape_num = 4;
2323     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2324     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2325                             create_shape_num, nullptr, 0);
2326     ASSERT_NE(tensor, nullptr);
2327     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2328     inputs.handle_list[0] = tensor;
2329     FillInputsData(inputs, "ml_face_isface", true);
2330     printf("==========Model Predict==========\n");
2331     OH_AI_TensorHandleArray outputs;
2332     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2333     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2334     CompareResult(outputs, "ml_face_isface");
2335     OH_AI_ModelDestroy(&model);
2336 }
2337 
2338 // 正常场景:MSTensorDestroy,销毁tensor
2339 HWTEST(MSLiteTest, OHOS_Tensor_Create_0002, Function | MediumTest | Level1) {
2340     printf("==========ReadFile==========\n");
2341     size_t size1;
2342     size_t *ptr_size1 = &size1;
2343     const char *imagePath = "/data/test/ml_face_isface.input";
2344     char *imageBuf = ReadFile(imagePath, ptr_size1);
2345     ASSERT_NE(imageBuf, nullptr);
2346     printf("==========OH_AI_TensorCreate==========\n");
2347     constexpr size_t create_shape_num = 4;
2348     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2349     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2350                                            create_shape_num, imageBuf, size1);
2351     ASSERT_NE(tensor, nullptr);
2352     delete[] imageBuf;
2353     OH_AI_TensorDestroy(&tensor);
2354     ASSERT_EQ(tensor, nullptr);
2355 }
2356 
2357 // 正常场景:MSTensorGetName,获取tensor名称
2358 HWTEST(MSLiteTest, OHOS_Tensor_Create_0003, Function | MediumTest | Level1) {
2359     printf("==========ReadFile==========\n");
2360     size_t size1;
2361     size_t *ptr_size1 = &size1;
2362     const char *imagePath = "/data/test/ml_face_isface.input";
2363     char *imageBuf = ReadFile(imagePath, ptr_size1);
2364     ASSERT_NE(imageBuf, nullptr);
2365     printf("==========OH_AI_TensorCreate==========\n");
2366     constexpr size_t create_shape_num = 4;
2367     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2368     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2369                                             create_shape_num, imageBuf, size1);
2370     ASSERT_NE(tensor, nullptr);
2371     const char *tensor_name = OH_AI_TensorGetName(tensor);
2372     ASSERT_EQ(strcmp(tensor_name, "data"), 0);
2373     delete[] imageBuf;
2374     OH_AI_TensorDestroy(&tensor);
2375 }
2376 
2377 // 正常场景:MSTensorGetName,设置tensor名称
2378 HWTEST(MSLiteTest, OHOS_Tensor_Create_0004, Function | MediumTest | Level1) {
2379     printf("==========ReadFile==========\n");
2380     size_t size1;
2381     size_t *ptr_size1 = &size1;
2382     const char *imagePath = "/data/test/ml_face_isface.input";
2383     char *imageBuf = ReadFile(imagePath, ptr_size1);
2384     ASSERT_NE(imageBuf, nullptr);
2385     printf("==========OH_AI_TensorCreate==========\n");
2386     constexpr size_t create_shape_num = 4;
2387     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2388     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2389                                            create_shape_num, imageBuf, size1);
2390     ASSERT_NE(tensor, nullptr);
2391     OH_AI_TensorSetName(tensor, "new_data");
2392     const char *tensor_name = OH_AI_TensorGetName(tensor);
2393     ASSERT_EQ(strcmp(tensor_name, "new_data"), 0);
2394     delete[] imageBuf;
2395     OH_AI_TensorDestroy(&tensor);
2396 }
2397 
2398 // 正常场景:MSTensorGetDataType,获取tensor数据类型
2399 HWTEST(MSLiteTest, OHOS_Tensor_Create_0005, Function | MediumTest | Level1) {
2400     printf("==========ReadFile==========\n");
2401     size_t size1;
2402     size_t *ptr_size1 = &size1;
2403     const char *imagePath = "/data/test/ml_face_isface.input";
2404     char *imageBuf = ReadFile(imagePath, ptr_size1);
2405     ASSERT_NE(imageBuf, nullptr);
2406     printf("==========OH_AI_TensorCreate==========\n");
2407     constexpr size_t create_shape_num = 4;
2408     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2409     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2410                                            create_shape_num, imageBuf, size1);
2411     ASSERT_NE(tensor, nullptr);
2412     OH_AI_DataType data_type = OH_AI_TensorGetDataType(tensor);
2413     ASSERT_EQ(data_type, OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
2414     delete[] imageBuf;
2415     OH_AI_TensorDestroy(&tensor);
2416 }
2417 
2418 // 正常场景:MSTensorSetDataType,设置tensor数据类型
2419 HWTEST(MSLiteTest, OHOS_Tensor_Create_0006, Function | MediumTest | Level1) {
2420     printf("==========ReadFile==========\n");
2421     size_t size1;
2422     size_t *ptr_size1 = &size1;
2423     const char *imagePath = "/data/test/ml_face_isface.input";
2424     char *imageBuf = ReadFile(imagePath, ptr_size1);
2425     ASSERT_NE(imageBuf, nullptr);
2426     printf("==========OH_AI_TensorCreate==========\n");
2427     constexpr size_t create_shape_num = 4;
2428     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2429     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2430                                            create_shape_num, imageBuf, size1);
2431     ASSERT_NE(tensor, nullptr);
2432     OH_AI_TensorSetDataType(tensor, OH_AI_DATATYPE_NUMBERTYPE_FLOAT16);
2433     OH_AI_DataType data_type = OH_AI_TensorGetDataType(tensor);
2434     ASSERT_EQ(data_type, OH_AI_DATATYPE_NUMBERTYPE_FLOAT16);
2435     delete[] imageBuf;
2436     OH_AI_TensorDestroy(&tensor);
2437 }
2438 
2439 // 正常场景:MSTensorGetShape,获取tensor维度
2440 HWTEST(MSLiteTest, OHOS_Tensor_Create_0007, Function | MediumTest | Level1) {
2441     printf("==========ReadFile==========\n");
2442     size_t size1;
2443     size_t *ptr_size1 = &size1;
2444     const char *imagePath = "/data/test/ml_face_isface.input";
2445     char *imageBuf = ReadFile(imagePath, ptr_size1);
2446     ASSERT_NE(imageBuf, nullptr);
2447     printf("==========OH_AI_TensorCreate==========\n");
2448     constexpr size_t create_shape_num = 4;
2449     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2450     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2451                                             create_shape_num, imageBuf, size1);
2452     ASSERT_NE(tensor, nullptr);
2453     size_t ret_shape_num;
2454     const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num);
2455     ASSERT_EQ(ret_shape_num, create_shape_num);
2456     for (size_t i = 0; i < ret_shape_num; i++) {
2457         ASSERT_EQ(ret_shape[i], create_shape[i]);
2458     }
2459     delete[] imageBuf;
2460     OH_AI_TensorDestroy(&tensor);
2461 }
2462 
2463 // 正常场景:MSTensorSetShape,设置tensor维度
2464 HWTEST(MSLiteTest, OHOS_Tensor_Create_0008, Function | MediumTest | Level1) {
2465     printf("==========ReadFile==========\n");
2466     size_t size1;
2467     size_t *ptr_size1 = &size1;
2468     const char *imagePath = "/data/test/ml_face_isface.input";
2469     char *imageBuf = ReadFile(imagePath, ptr_size1);
2470     ASSERT_NE(imageBuf, nullptr);
2471     printf("==========OH_AI_TensorCreate==========\n");
2472     constexpr size_t create_shape_num = 4;
2473     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2474     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2475                                            create_shape_num, imageBuf, size1);
2476     ASSERT_NE(tensor, nullptr);
2477     size_t ret_shape_num;
2478     const int64_t *ret_shape = OH_AI_TensorGetShape(tensor, &ret_shape_num);
2479     ASSERT_EQ(ret_shape_num, create_shape_num);
2480     for (size_t i = 0; i < ret_shape_num; i++) {
2481         ASSERT_EQ(ret_shape[i], create_shape[i]);
2482     }
2483     constexpr size_t new_shape_num = 4;
2484     int64_t new_shape[new_shape_num] = {1, 32, 32, 1};
2485     OH_AI_TensorSetShape(tensor, new_shape, new_shape_num);
2486     size_t new_ret_shape_num;
2487     const int64_t *new_ret_shape = OH_AI_TensorGetShape(tensor, &new_ret_shape_num);
2488     ASSERT_EQ(new_ret_shape_num, new_shape_num);
2489     for (size_t i = 0; i < new_ret_shape_num; i++) {
2490         ASSERT_EQ(new_ret_shape[i], new_shape[i]);
2491     }
2492     delete[] imageBuf;
2493     OH_AI_TensorDestroy(&tensor);
2494 }
2495 
2496 // 正常场景:MSTensorGetFormat,获取tensor格式
2497 HWTEST(MSLiteTest, OHOS_Tensor_Create_0009, Function | MediumTest | Level1) {
2498     printf("==========ReadFile==========\n");
2499     size_t size1;
2500     size_t *ptr_size1 = &size1;
2501     const char *imagePath = "/data/test/ml_face_isface.input";
2502     char *imageBuf = ReadFile(imagePath, ptr_size1);
2503     ASSERT_NE(imageBuf, nullptr);
2504     printf("==========OH_AI_TensorCreate==========\n");
2505     constexpr size_t create_shape_num = 4;
2506     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2507     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2508                                            create_shape_num, imageBuf, size1);
2509     ASSERT_NE(tensor, nullptr);
2510     OH_AI_Format data_format = OH_AI_TensorGetFormat(tensor);
2511     ASSERT_EQ(data_format, OH_AI_FORMAT_NHWC);
2512     delete[] imageBuf;
2513     OH_AI_TensorDestroy(&tensor);
2514 }
2515 
2516 // 正常场景:MSTensorSetFormat,设置tensor格式
2517 HWTEST(MSLiteTest, OHOS_Tensor_Create_0010, Function | MediumTest | Level1) {
2518     printf("==========ReadFile==========\n");
2519     size_t size1;
2520     size_t *ptr_size1 = &size1;
2521     const char *imagePath = "/data/test/ml_face_isface.input";
2522     char *imageBuf = ReadFile(imagePath, ptr_size1);
2523     ASSERT_NE(imageBuf, nullptr);
2524     printf("==========OH_AI_TensorCreate==========\n");
2525     constexpr size_t create_shape_num = 4;
2526     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2527     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2528                                            create_shape_num, imageBuf, size1);
2529     ASSERT_NE(tensor, nullptr);
2530     OH_AI_TensorSetFormat(tensor, OH_AI_FORMAT_NHWC);
2531     OH_AI_Format data_format = OH_AI_TensorGetFormat(tensor);
2532     ASSERT_EQ(data_format, OH_AI_FORMAT_NHWC);
2533     delete[] imageBuf;
2534     OH_AI_TensorDestroy(&tensor);
2535 }
2536 
2537 // 正常场景:MSTensorGetData,获取tensor数据
2538 HWTEST(MSLiteTest, OHOS_Tensor_Create_0011, Function | MediumTest | Level1) {
2539     printf("==========ReadFile==========\n");
2540     size_t size1;
2541     size_t *ptr_size1 = &size1;
2542     const char *imagePath = "/data/test/ml_face_isface.input";
2543     char *imageBuf = ReadFile(imagePath, ptr_size1);
2544     ASSERT_NE(imageBuf, nullptr);
2545     printf("==========OH_AI_TensorCreate==========\n");
2546     constexpr size_t create_shape_num = 4;
2547     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2548     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2549                                            create_shape_num, imageBuf, size1);
2550     ASSERT_NE(tensor, nullptr);
2551     const float *ret_data = static_cast<const float *>(OH_AI_TensorGetData(tensor));
2552     ASSERT_NE(ret_data, nullptr);
2553     printf("return data is:");
2554     for (int i = 0; i < 20; ++i) {
2555         printf("%f ", ret_data[i]);
2556     }
2557     printf("\n");
2558     delete[] imageBuf;
2559     OH_AI_TensorDestroy(&tensor);
2560 }
2561 
2562 // 正常场景:MSTensorSetData,设置tensor数据
2563 HWTEST(MSLiteTest, OHOS_Tensor_Create_0012, Function | MediumTest | Level1) {
2564     printf("==========ReadFile==========\n");
2565     size_t size1;
2566     size_t *ptr_size1 = &size1;
2567     const char *imagePath = "/data/test/ml_face_isface.input";
2568     char *imageBuf = ReadFile(imagePath, ptr_size1);
2569     ASSERT_NE(imageBuf, nullptr);
2570     printf("==========OH_AI_TensorCreate==========\n");
2571     constexpr size_t create_shape_num = 4;
2572     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2573     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2574                                            create_shape_num, imageBuf, size1);
2575     ASSERT_NE(tensor, nullptr);
2576     constexpr size_t data_len = 6;
2577     float data[data_len] = {1, 2, 3, 4, 5, 6};
2578     OH_AI_TensorSetData(tensor, data);
2579     const float *ret_data = static_cast<const float *>(OH_AI_TensorGetData(tensor));
2580     ASSERT_NE(ret_data, nullptr);
2581     printf("return data is:");
2582     for (size_t i = 0; i < data_len; i++) {
2583         ASSERT_EQ(ret_data[i], data[i]);
2584         printf("%f ", ret_data[i]);
2585     }
2586     printf("\n");
2587     delete[] imageBuf;
2588     OH_AI_TensorDestroy(&tensor);
2589 }
2590 
2591 // 正常场景:MSTensorGetElementNum,获取tensor元素
2592 HWTEST(MSLiteTest, OHOS_Tensor_Create_0013, Function | MediumTest | Level1) {
2593     printf("==========ReadFile==========\n");
2594     size_t size1;
2595     size_t *ptr_size1 = &size1;
2596     const char *imagePath = "/data/test/ml_face_isface.input";
2597     char *imageBuf = ReadFile(imagePath, ptr_size1);
2598     ASSERT_NE(imageBuf, nullptr);
2599     printf("==========OH_AI_TensorCreate==========\n");
2600     constexpr size_t create_shape_num = 4;
2601     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2602     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2603                                            create_shape_num, imageBuf, size1);
2604     ASSERT_NE(tensor, nullptr);
2605     int64_t element_num = OH_AI_TensorGetElementNum(tensor);
2606     printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
2607     ASSERT_EQ(element_num, 6912);
2608     delete[] imageBuf;
2609     OH_AI_TensorDestroy(&tensor);
2610 }
2611 
2612 // 正常场景:MSTensorGetDataSize,获取tensor大小
2613 HWTEST(MSLiteTest, OHOS_Tensor_Create_0014, Function | MediumTest | Level1) {
2614     printf("==========ReadFile==========\n");
2615     size_t size1;
2616     size_t *ptr_size1 = &size1;
2617     const char *imagePath = "/data/test/ml_face_isface.input";
2618     char *imageBuf = ReadFile(imagePath, ptr_size1);
2619     ASSERT_NE(imageBuf, nullptr);
2620     printf("==========OH_AI_TensorCreate==========\n");
2621     constexpr size_t create_shape_num = 4;
2622     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2623     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2624                                            create_shape_num, imageBuf, size1);
2625     ASSERT_NE(tensor, nullptr);
2626     size_t data_size = OH_AI_TensorGetDataSize(tensor);
2627     printf("Tensor data size: %zu.\n", data_size);
2628     ASSERT_EQ(data_size, 6912 * sizeof(float));
2629     delete[] imageBuf;
2630     OH_AI_TensorDestroy(&tensor);
2631 }
2632 
2633 // 正常场景:MSTensorGetMutableData,获取tensor可变数据指针
2634 HWTEST(MSLiteTest, OHOS_Tensor_Create_0015, Function | MediumTest | Level1) {
2635     printf("==========ReadFile==========\n");
2636     size_t size1;
2637     size_t *ptr_size1 = &size1;
2638     const char *imagePath = "/data/test/ml_face_isface.input";
2639     char *imageBuf = ReadFile(imagePath, ptr_size1);
2640     ASSERT_NE(imageBuf, nullptr);
2641     printf("==========OH_AI_TensorCreate==========\n");
2642     constexpr size_t create_shape_num = 4;
2643     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2644     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2645                                            create_shape_num, imageBuf, size1);
2646     ASSERT_NE(tensor, nullptr);
2647     float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
2648     ASSERT_NE(input_data, nullptr);
2649     delete[] imageBuf;
2650     OH_AI_TensorDestroy(&tensor);
2651 }
2652 
2653 // 正常场景:MSTensorClone,拷贝tensor
2654 HWTEST(MSLiteTest, OHOS_Tensor_Create_0016, Function | MediumTest | Level1) {
2655     printf("==========ReadFile==========\n");
2656     size_t size1;
2657     size_t *ptr_size1 = &size1;
2658     const char *imagePath = "/data/test/ml_face_isface.input";
2659     char *imageBuf = ReadFile(imagePath, ptr_size1);
2660     ASSERT_NE(imageBuf, nullptr);
2661     printf("==========OH_AI_TensorCreate==========\n");
2662     constexpr size_t create_shape_num = 4;
2663     int64_t create_shape[create_shape_num] = {1, 48, 48, 3};
2664     OH_AI_TensorHandle tensor = OH_AI_TensorCreate("data", OH_AI_DATATYPE_NUMBERTYPE_FLOAT32, create_shape,
2665                                            create_shape_num, imageBuf, size1);
2666     ASSERT_NE(tensor, nullptr);
2667     OH_AI_TensorHandle clone = OH_AI_TensorClone(tensor);
2668     ASSERT_NE(clone, nullptr);
2669     ASSERT_EQ(strcmp(OH_AI_TensorGetName(clone), "data_duplicate"), 0);
2670     delete[] imageBuf;
2671     OH_AI_TensorDestroy(&tensor);
2672     OH_AI_TensorDestroy(&clone);
2673 }
2674 
2675 // 正常场景:单输入模型
2676 HWTEST(MSLiteTest, OHOS_Input_0001, Function | MediumTest | Level1) {
2677     printf("==========Init Context==========\n");
2678     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2679     ASSERT_NE(context, nullptr);
2680     AddContextDeviceCPU(context);
2681     printf("==========Create model==========\n");
2682     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2683     ASSERT_NE(model, nullptr);
2684     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
2685 }
2686 
2687 // 正常场景:多输入模型
2688 HWTEST(MSLiteTest, OHOS_Input_0002, Function | MediumTest | Level1) {
2689     printf("==========Init Context==========\n");
2690     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2691     ASSERT_NE(context, nullptr);
2692     AddContextDeviceCPU(context);
2693     printf("==========Create model==========\n");
2694     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2695     ASSERT_NE(model, nullptr);
2696     printf("==========Build model==========\n");
2697     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_headpose_pb2tflite.ms",
2698         OH_AI_MODELTYPE_MINDIR, context);
2699     printf("==========build model return code:%d\n", ret);
2700     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2701     printf("==========GetInputs==========\n");
2702     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2703     ASSERT_NE(inputs.handle_list, nullptr);
2704     FillInputsData(inputs, "ml_headpose_pb2tflite", false);
2705     printf("==========Model Predict==========\n");
2706     OH_AI_TensorHandleArray outputs;
2707     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2708     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2709     CompareResult(outputs, "ml_headpose_pb2tflite", 0.02, 0.02);
2710     OH_AI_ModelDestroy(&model);
2711 }
2712 
2713 // 正常场景:输入为uint8模型
2714 HWTEST(MSLiteTest, OHOS_Input_0003, Function | MediumTest | Level1) {
2715     printf("==========ReadFile==========\n");
2716     size_t size1;
2717     size_t *ptr_size1 = &size1;
2718     const char *imagePath = "/data/test/aiy_vision_classifier_plants_V1_3.input";
2719     char *imageBuf = ReadFile(imagePath, ptr_size1);
2720     ASSERT_NE(imageBuf, nullptr);
2721     printf("==========Init Context==========\n");
2722     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2723     ASSERT_NE(context, nullptr);
2724     AddContextDeviceCPU(context);
2725     printf("==========Create model==========\n");
2726     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2727     ASSERT_NE(model, nullptr);
2728     printf("==========Build model==========\n");
2729     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/aiy_vision_classifier_plants_V1_3.ms", OH_AI_MODELTYPE_MINDIR,
2730                                    context);
2731     printf("==========build model return code:%d\n", ret);
2732     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2733     printf("==========GetInputs==========\n");
2734     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2735     ASSERT_NE(inputs.handle_list, nullptr);
2736     for (size_t i = 0; i < inputs.handle_num; ++i) {
2737         OH_AI_TensorHandle tensor = inputs.handle_list[i];
2738         int64_t element_num = OH_AI_TensorGetElementNum(tensor);
2739         printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
2740         void *input_data = OH_AI_TensorGetMutableData(inputs.handle_list[i]);
2741         ASSERT_NE(input_data, nullptr);
2742         memcpy(input_data, imageBuf, size1);
2743     }
2744     printf("==========Model Predict==========\n");
2745     OH_AI_TensorHandleArray outputs;
2746     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2747     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2748     printf("==========GetOutput==========\n");
2749     for (size_t i = 0; i < outputs.handle_num; ++i) {
2750         OH_AI_TensorHandle tensor = outputs.handle_list[i];
2751         int64_t element_num = OH_AI_TensorGetElementNum(tensor);
2752         printf("Tensor name: %s, elements num: %" PRId64 ".\n", OH_AI_TensorGetName(tensor), element_num);
2753         uint8_t *output_data = reinterpret_cast<uint8_t *>(OH_AI_TensorGetMutableData(tensor));
2754         printf("output data is:");
2755         for (int j = 0; j < element_num && j <= 20; ++j) {
2756             printf("%d ", output_data[j]);
2757         }
2758         printf("\n");
2759         printf("==========compFp32WithTData==========\n");
2760         string expectedDataFile = "/data/test/aiy_vision_classifier_plants_V1_3" + std::to_string(i) + ".output";
2761         bool result = compUint8WithTData(output_data, expectedDataFile, 0.01, 0.01, false);
2762         EXPECT_EQ(result, true);
2763     }
2764     delete[] imageBuf;
2765     OH_AI_ModelDestroy(&model);
2766 }
2767 
2768 // 正常场景:量化模型
2769 HWTEST(MSLiteTest, OHOS_Input_0004, Function | MediumTest | Level1) {
2770     printf("==========Init Context==========\n");
2771     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2772     ASSERT_NE(context, nullptr);
2773     AddContextDeviceCPU(context);
2774     printf("==========Create model==========\n");
2775     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2776     ASSERT_NE(model, nullptr);
2777     ModelPredict(model, context, "ml_face_isface_quant", {}, false, true, false);
2778 }
2779 
2780 // 正常场景:循环多次执行推理流程
2781 HWTEST(MSLiteTest, OHOS_Multiple_0001, Function | MediumTest | Level1) {
2782     for (size_t num = 0; num < 50; ++num) {
2783         Predict_CPU();
2784     }
2785 }
2786 
2787 // 异常场景:Model创建一次,Build多次
2788 HWTEST(MSLiteTest, OHOS_Multiple_0002, Function | MediumTest | Level1) {
2789     printf("==========Init Context==========\n");
2790     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2791     ASSERT_NE(context, nullptr);
2792     AddContextDeviceCPU(context);
2793     printf("==========Create model==========\n");
2794     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2795     ASSERT_NE(model, nullptr);
2796     printf("==========Build model==========\n");
2797     int ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2798     printf("==========build model return code:%d\n", ret);
2799     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2800     printf("==========Build model==========\n");
2801     int ret2 = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2802     printf("==========build model return code:%d\n", ret2);
2803     ASSERT_EQ(ret2, OH_AI_STATUS_LITE_MODEL_REBUILD);
2804     OH_AI_ModelDestroy(&model);
2805 }
2806 
2807 // 正常场景:Model创建一次,Build一次,Predict多次
2808 HWTEST(MSLiteTest, OHOS_Multiple_0003, Function | MediumTest | Level1) {
2809     printf("==========Init Context==========\n");
2810     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2811     ASSERT_NE(context, nullptr);
2812     AddContextDeviceCPU(context);
2813     printf("==========Create model==========\n");
2814     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2815     ASSERT_NE(model, nullptr);
2816     printf("==========Build model==========\n");
2817     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2818     printf("==========build model return code:%d\n", ret);
2819     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2820     printf("==========GetInputs==========\n");
2821     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2822     ASSERT_NE(inputs.handle_list, nullptr);
2823     FillInputsData(inputs, "ml_face_isface", true);
2824     OH_AI_TensorHandleArray outputs;
2825     for (size_t i = 0; i < 50; ++i) {
2826         printf("==========Model Predict==========\n");
2827         OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2828         ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
2829     }
2830     CompareResult(outputs, "ml_face_isface");
2831     OH_AI_ModelDestroy(&model);
2832 }
2833 
2834 // 正常场景:多次创建和销毁Model
2835 HWTEST(MSLiteTest, OHOS_Multiple_0004, Function | MediumTest | Level1) {
2836     for (size_t i = 0; i < 50; ++i) {
2837         printf("==========Init Context==========\n");
2838         OH_AI_ContextHandle context = OH_AI_ContextCreate();
2839         ASSERT_NE(context, nullptr);
2840         AddContextDeviceCPU(context);
2841         printf("==========Create model==========\n");
2842         OH_AI_ModelHandle model = OH_AI_ModelCreate();
2843         ASSERT_NE(model, nullptr);
2844         printf("==========Build model==========\n");
2845         OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
2846         printf("==========build model return code:%d\n", ret);
2847         ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2848         printf("==========Build model==========\n");
2849         OH_AI_ModelDestroy(&model);
2850     }
2851 }
2852 
2853 // 正常场景:两个模型都在CPU上并行推理
2854 HWTEST(MSLiteTest, OHOS_Parallel_0001, Function | MediumTest | Level1) {
2855     std::cout << "run start" << std::endl;
2856     std::thread t1(Predict_CPU);
2857     std::cout << "1111111111111" << std::endl;
2858     std::thread t2(Predict_CPU);
2859     std::cout << "2222222222222" << std::endl;
2860     t1.join();
2861     t2.join();
2862 }
2863 
2864 // 正常场景:两个模型都在NPU上并行推理
2865 HWTEST(MSLiteTest, OHOS_Parallel_0002, Function | MediumTest | Level1) {
2866     std::cout << "run start" << std::endl;
2867     std::thread t1(Predict_NPU);
2868     std::cout << "1111111111111" << std::endl;
2869     std::thread t2(Predict_NPU);
2870     std::cout << "2222222222222" << std::endl;
2871     t1.join();
2872     t2.join();
2873 }
2874 
2875 // 正常场景:两个模型在CPU NPU上并行推理
2876 HWTEST(MSLiteTest, OHOS_Parallel_0003, Function | MediumTest | Level1) {
2877     std::cout << "run start" << std::endl;
2878     std::thread t1(Predict_CPU);
2879     std::cout << "1111111111111" << std::endl;
2880     std::thread t2(Predict_NPU);
2881     std::cout << "2222222222222" << std::endl;
2882     t1.join();
2883     t2.join();
2884 }
2885 
2886 // 正常场景:r1.3转换的模型在r1.5上推理
2887 HWTEST(MSLiteTest, OHOS_Compatible_0001, Function | MediumTest | Level1) {
2888     printf("==========Init Context==========\n");
2889     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2890     ASSERT_NE(context, nullptr);
2891     AddContextDeviceCPU(context);
2892     printf("==========Create model==========\n");
2893     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2894     ASSERT_NE(model, nullptr);
2895     printf("==========Build model==========\n");
2896     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_r13.ms",
2897         OH_AI_MODELTYPE_MINDIR, context);
2898     printf("==========build model return code:%d\n", ret);
2899     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2900     printf("==========GetInputs==========\n");
2901     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2902     ASSERT_NE(inputs.handle_list, nullptr);
2903     FillInputsData(inputs, "ml_face_isface", true);
2904     printf("==========Model Predict==========\n");
2905     OH_AI_TensorHandleArray outputs;
2906     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2907     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2908     CompareResult(outputs, "ml_face_isface");
2909     OH_AI_ModelDestroy(&model);
2910 }
2911 
2912 
2913 // 正常场景:离线模型支持NNRT后端,单模型输入
2914 HWTEST(MSLiteTest, OHOS_OfflineModel_0001, Function | MediumTest | Level1) {
2915     if (!IsNNRTAvailable()) {
2916         printf("NNRt is not available, skip this test");
2917         return;
2918     }
2919 
2920     printf("==========Init Context==========\n");
2921     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2922     ASSERT_NE(context, nullptr);
2923     AddContextDeviceNNRT(context);
2924     printf("==========Create model==========\n");
2925     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2926     ASSERT_NE(model, nullptr);
2927     printf("==========Build model==========\n");
2928     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_offline_model.ms",
2929         OH_AI_MODELTYPE_MINDIR, context);
2930     printf("==========build model return code:%d\n", ret);
2931     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2932     printf("==========GetInputs==========\n");
2933     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2934     ASSERT_NE(inputs.handle_list, nullptr);
2935     FillInputsData(inputs, "ml_face_isface", true);
2936     printf("==========Model Predict==========\n");
2937     OH_AI_TensorHandleArray outputs;
2938     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2939     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2940     CompareResult(outputs, "ml_face_isface");
2941     OH_AI_ModelDestroy(&model);
2942 }
2943 
2944 // 正常场景:离线模型支持NNRT后端,多输入模型
2945 HWTEST(MSLiteTest, OHOS_OfflineModel_0002, Function | MediumTest | Level1) {
2946     if (!IsNNRTAvailable()) {
2947         printf("NNRt is not available, skip this test");
2948         return;
2949     }
2950 
2951     printf("==========Init Context==========\n");
2952     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2953     ASSERT_NE(context, nullptr);
2954     AddContextDeviceNNRT(context);
2955     printf("==========Create model==========\n");
2956     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2957     ASSERT_NE(model, nullptr);
2958     printf("==========Build model==========\n");
2959     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_headpose_pb2tflite_offline_model.ms",
2960         OH_AI_MODELTYPE_MINDIR, context);
2961     printf("==========build model return code:%d\n", ret);
2962     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2963     printf("==========GetInputs==========\n");
2964     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2965     ASSERT_NE(inputs.handle_list, nullptr);
2966     FillInputsData(inputs, "ml_headpose_pb2tflite", false);
2967     printf("==========Model Predict==========\n");
2968     OH_AI_TensorHandleArray outputs;
2969     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
2970     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2971     CompareResult(outputs, "ml_headpose_pb2tflite", 0.02, 0.02);
2972     OH_AI_ModelDestroy(&model);
2973 }
2974 
2975 
2976 // 正常场景:离线模型支持NNRT后端,Model创建一次,Build一次,Predict多次
2977 HWTEST(MSLiteTest, OHOS_OfflineModel_0004, Function | MediumTest | Level1) {
2978     if (!IsNNRTAvailable()) {
2979         printf("NNRt is not available, skip this test");
2980         return;
2981     }
2982 
2983     printf("==========Init Context==========\n");
2984     OH_AI_ContextHandle context = OH_AI_ContextCreate();
2985     ASSERT_NE(context, nullptr);
2986     AddContextDeviceNNRT(context);
2987     printf("==========Create model==========\n");
2988     OH_AI_ModelHandle model = OH_AI_ModelCreate();
2989     ASSERT_NE(model, nullptr);
2990     printf("==========Build model==========\n");
2991     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_offline_model.ms", OH_AI_MODELTYPE_MINDIR, context);
2992     printf("==========build model return code:%d\n", ret);
2993     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
2994     printf("==========GetInputs==========\n");
2995     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
2996     ASSERT_NE(inputs.handle_list, nullptr);
2997     FillInputsData(inputs, "ml_face_isface", true);
2998     OH_AI_TensorHandleArray outputs;
2999     for (size_t i = 0; i < 50; ++i) {
3000         printf("==========Model Predict==========\n");
3001         OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3002         ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
3003     }
3004     CompareResult(outputs, "ml_face_isface");
3005     OH_AI_ModelDestroy(&model);
3006 }
3007 
3008 // 正常场景:离线模型支持NNRT后端,Model创建一次,Build多次
3009 HWTEST(MSLiteTest, OHOS_OfflineModel_0005, Function | MediumTest | Level1) {
3010     if (!IsNNRTAvailable()) {
3011         printf("NNRt is not available, skip this test");
3012         return;
3013     }
3014 
3015     printf("==========Init Context==========\n");
3016     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3017     ASSERT_NE(context, nullptr);
3018     AddContextDeviceNNRT(context);
3019     printf("==========Create model==========\n");
3020     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3021     ASSERT_NE(model, nullptr);
3022     printf("==========Build model==========\n");
3023     int ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_offline_model.ms", OH_AI_MODELTYPE_MINDIR, context);
3024     printf("==========build model return code:%d\n", ret);
3025     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3026     printf("==========Build model==========\n");
3027     int ret2 = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_offline_model.ms", OH_AI_MODELTYPE_MINDIR, context);
3028     printf("==========build model return code:%d\n", ret2);
3029     ASSERT_EQ(ret2, OH_AI_STATUS_SUCCESS);
3030     OH_AI_ModelDestroy(&model);
3031 }
3032 
3033 // 异常场景:离线模型支持NNRT后端,ModelPredict,input为空
3034 HWTEST(MSLiteTest, OHOS_OfflineModel_0006, Function | MediumTest | Level1) {
3035     if (!IsNNRTAvailable()) {
3036         printf("NNRt is not available, skip this test");
3037         return;
3038     }
3039 
3040     printf("==========Init Context==========\n");
3041     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3042     ASSERT_NE(context, nullptr);
3043     AddContextDeviceNNRT(context);
3044     printf("==========Create model==========\n");
3045     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3046     ASSERT_NE(model, nullptr);
3047     printf("==========Build model==========\n");
3048     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_offline_model.ms", OH_AI_MODELTYPE_MINDIR, context);
3049     printf("==========build model return code:%d\n", ret);
3050     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3051     printf("==========Model Predict==========\n");
3052     OH_AI_TensorHandleArray inputs;
3053     OH_AI_TensorHandleArray outputs;
3054     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3055     printf("==========Model Predict return code:%d\n", ret);
3056     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
3057     OH_AI_ModelDestroy(&model);
3058 }
3059 
3060 // 异常场景:非离线模型支持NNRT后端,ms模型未转换为NNRT后端模型
3061 HWTEST(MSLiteTest, OHOS_OfflineModel_0007, Function | MediumTest | Level1) {
3062     if (!IsNNRTAvailable()) {
3063         printf("NNRt is not available, skip this test");
3064         return;
3065     }
3066 
3067     printf("==========Init Context==========\n");
3068     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3069     ASSERT_NE(context, nullptr);
3070     AddContextDeviceNNRT(context);
3071     AddContextDeviceCPU(context);  // nnrt算子支持有限,加cpu设备走异构推理
3072     printf("==========Create model==========\n");
3073     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3074     ASSERT_NE(model, nullptr);
3075     ModelPredict(model, context, "ml_face_isface", {}, false, true, false);
3076 }
3077 
3078 // 正常场景:覆盖模型ml_ocr_cn
3079 HWTEST(MSLiteTest, OHOS_OfflineModel_0008, Function | MediumTest | Level1) {
3080     if (!IsNNRTAvailable()) {
3081         printf("NNRt is not available, skip this test");
3082         return;
3083     }
3084 
3085     printf("==========Init Context==========\n");
3086     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3087     ASSERT_NE(context, nullptr);
3088     AddContextDeviceNNRT(context);
3089     printf("==========Create model==========\n");
3090     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3091     ASSERT_NE(model, nullptr);
3092     printf("==========Build model==========\n");
3093     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn_offline_model.ms",
3094         OH_AI_MODELTYPE_MINDIR, context);
3095     printf("==========build model return code:%d\n", ret);
3096     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3097     printf("==========GetInputs==========\n");
3098     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3099     ASSERT_NE(inputs.handle_list, nullptr);
3100     FillInputsData(inputs, "ml_ocr_cn", false);
3101     printf("==========Model Predict==========\n");
3102     OH_AI_TensorHandleArray outputs;
3103     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3104     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3105     CompareResult(outputs, "ml_ocr_cn");
3106     OH_AI_ModelDestroy(&model);
3107 }
3108 
3109 // 正常场景:离线模型覆盖NPU
3110 HWTEST(MSLiteTest, OHOS_OfflineModel_0009, Function | MediumTest | Level1) {
3111     if (!IsNPU()) {
3112         printf("NNRt is not NPU, skip this test");
3113         return;
3114     }
3115 
3116     printf("==========Init Context==========\n");
3117     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3118     ASSERT_NE(context, nullptr);
3119     AddContextDeviceNNRT(context);
3120     printf("==========Create model==========\n");
3121     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3122     ASSERT_NE(model, nullptr);
3123     printf("==========Build model==========\n");
3124     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/tinynet.om.ms",
3125         OH_AI_MODELTYPE_MINDIR, context);
3126     printf("==========build model return code:%d\n", ret);
3127     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3128     printf("==========GetInputs==========\n");
3129     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3130     ASSERT_NE(inputs.handle_list, nullptr);
3131     for (size_t i = 0; i < inputs.handle_num; ++i) {
3132         OH_AI_TensorHandle tensor = inputs.handle_list[i];
3133         float *input_data = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(tensor));
3134         size_t element_num = OH_AI_TensorGetElementNum(tensor);
3135         std::random_device rd;
3136         std::mt19937 gen(rd());
3137         std::uniform_real_distribution<float> dis(0.0f,1.0f);
3138         for (int z=0;z<element_num;z++) {
3139             input_data[z] = dis(gen);
3140         }
3141     }
3142     printf("==========Model Predict==========\n");
3143     OH_AI_TensorHandleArray outputs;
3144     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3145     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3146     OH_AI_ModelDestroy(&model);
3147 }
3148 
3149 // 正常场景:delegate异构,使用低级接口创建nnrt device info,选取第一个NNRT设备
3150 HWTEST(MSLiteTest, OHOS_NNRT_0001, Function | MediumTest | Level1) {
3151     printf("==========Init Context==========\n");
3152     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3153     ASSERT_NE(context, nullptr);
3154     AddContextDeviceNNRT(context);
3155     AddContextDeviceCPU(context);
3156     printf("==========Create model==========\n");
3157     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3158     ASSERT_NE(model, nullptr);
3159     printf("==========Build model==========\n");
3160     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms",
3161         OH_AI_MODELTYPE_MINDIR, context);
3162     printf("==========build model return code:%d\n", ret);
3163     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3164     printf("==========GetInputs==========\n");
3165     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3166     ASSERT_NE(inputs.handle_list, nullptr);
3167     FillInputsData(inputs, "ml_face_isface", true);
3168     printf("==========Model Predict==========\n");
3169     OH_AI_TensorHandleArray outputs;
3170     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3171     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3172     CompareResult(outputs, "ml_face_isface");
3173     OH_AI_ModelDestroy(&model);
3174 }
3175 
3176 //  正常场景:delegate异构,使用高级接口创建nnrt device info,根据类型确定NNRT设备
3177 HWTEST(MSLiteTest, OHOS_NNRT_0002, Function | MediumTest | Level1) {
3178     printf("==========Init Context==========\n");
3179     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3180     ASSERT_NE(context, nullptr);
3181     AddContextDeviceNNRTByType(context);
3182     AddContextDeviceCPU(context);
3183     printf("==========Create model==========\n");
3184     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3185     ASSERT_NE(model, nullptr);
3186     printf("==========Build model==========\n");
3187     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms",
3188         OH_AI_MODELTYPE_MINDIR, context);
3189     printf("==========build model return code:%d\n", ret);
3190     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3191     printf("==========GetInputs==========\n");
3192     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3193     ASSERT_NE(inputs.handle_list, nullptr);
3194     FillInputsData(inputs, "ml_face_isface", true);
3195     printf("==========Model Predict==========\n");
3196     OH_AI_TensorHandleArray outputs;
3197     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3198     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3199     CompareResult(outputs, "ml_face_isface");
3200     OH_AI_ModelDestroy(&model);
3201 }
3202 
3203 // 正常场景:delegate异构,使用高级接口创建nnrt device info,根据名称确定NNRT设备
3204 HWTEST(MSLiteTest, OHOS_NNRT_0003, Function | MediumTest | Level1) {
3205     printf("==========Init Context==========\n");
3206     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3207     ASSERT_NE(context, nullptr);
3208     AddContextDeviceNNRTByName(context);
3209     AddContextDeviceCPU(context);
3210     printf("==========Create model==========\n");
3211     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3212     ASSERT_NE(model, nullptr);
3213     printf("==========Build model==========\n");
3214     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms",
3215         OH_AI_MODELTYPE_MINDIR, context);
3216     printf("==========build model return code:%d\n", ret);
3217     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3218     printf("==========GetInputs==========\n");
3219     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3220     ASSERT_NE(inputs.handle_list, nullptr);
3221     FillInputsData(inputs, "ml_face_isface", true);
3222     printf("==========Model Predict==========\n");
3223     OH_AI_TensorHandleArray outputs;
3224     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3225     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3226     CompareResult(outputs, "ml_face_isface");
3227     OH_AI_ModelDestroy(&model);
3228 }
3229 
3230 
3231 // 正常场景:delegate异构,设置NNRT扩展选项,包括cache路径
3232 HWTEST(MSLiteTest, OHOS_NNRT_0005, Function | MediumTest | Level1) {
3233     printf("==========Init Context==========\n");
3234     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3235     ASSERT_NE(context, nullptr);
3236     AddContextDeviceNNRTWithCache(context, "/data/local/tmp/", "1");
3237     AddContextDeviceCPU(context);
3238 
3239     printf("==========Create model==========\n");
3240     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3241     ASSERT_NE(model, nullptr);
3242 
3243     printf("==========Build model==========\n");
3244     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms",
3245         OH_AI_MODELTYPE_MINDIR, context);
3246     printf("==========build model return code:%d\n", ret);
3247     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3248 
3249     printf("==========GetInputs==========\n");
3250     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3251     ASSERT_NE(inputs.handle_list, nullptr);
3252     FillInputsData(inputs, "ml_face_isface", true);
3253 
3254     printf("==========Model Predict==========\n");
3255     OH_AI_TensorHandleArray outputs;
3256     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3257     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3258     CompareResult(outputs, "ml_face_isface");
3259     OH_AI_ModelDestroy(&model);
3260 }
3261 
3262 
3263 // 正常场景:通过OH_AI_ModelGetInputs和OH_AI_ModelGetOutputs获取,实现数据免拷贝
3264 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_copy_free_0001, Function | MediumTest | Level1) {
3265     if (!IsNPU()) {
3266         printf("NNRt is not NPU, skip this test");
3267         return;
3268     }
3269     printf("==========Init Context==========\n");
3270     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3271     AddContextDeviceNNRT(context);
3272     printf("==========Build model==========\n");
3273     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3274     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3275     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3276     printf("==========GetInputs==========\n");
3277     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3278     ASSERT_NE(inputs.handle_list, nullptr);
3279     FillInputsData(inputs, "ml_face_isface", true);
3280     printf("==========Model Predict==========\n");
3281     OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model);
3282     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3283     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3284     CompareResult(outputs, "ml_face_isface");
3285     OH_AI_ModelDestroy(&model);
3286 }
3287 
RunMSLiteModel(OH_AI_ModelHandle model,string model_name,bool is_transpose)3288 void RunMSLiteModel(OH_AI_ModelHandle model, string model_name, bool is_transpose) {
3289     const size_t MAX_DIMS = 10;
3290     int64_t shape[MAX_DIMS];
3291     size_t shape_num;
3292     OH_AI_TensorHandleArray in_tensor_array;
3293     OH_AI_TensorHandleArray out_tensor_array;
3294     printf("==========OH_AI_TensorSetAllocator in_tensor==========\n");
3295     OH_AI_TensorHandleArray inputs_handle = OH_AI_ModelGetInputs(model);
3296     in_tensor_array.handle_num = inputs_handle.handle_num;
3297     in_tensor_array.handle_list = (OH_AI_TensorHandle *)malloc(sizeof(OH_AI_TensorHandle) * in_tensor_array.handle_num);
3298     for (size_t i = 0; i < inputs_handle.handle_num; i++) {
3299         auto ori_tensor = inputs_handle.handle_list[i];
3300         auto shape_ptr = OH_AI_TensorGetShape(ori_tensor, &shape_num);
3301         for (size_t j = 0; j < shape_num; j++) {
3302         shape[j] = shape_ptr[j];
3303         }
3304         void *in_allocator = OH_AI_TensorGetAllocator(ori_tensor);
3305         OH_AI_TensorHandle in_tensor = OH_AI_TensorCreate(OH_AI_TensorGetName(ori_tensor), OH_AI_TensorGetDataType(ori_tensor),
3306                                         shape, shape_num, nullptr, 0);
3307         OH_AI_TensorSetAllocator(in_tensor, in_allocator);
3308         in_tensor_array.handle_list[i] = in_tensor;
3309     }
3310     printf("==========FillInputsData==========\n");
3311     FillInputsData(in_tensor_array, model_name, is_transpose);
3312     printf("==========OH_AI_TensorSetAllocator out_tensor==========\n");
3313     OH_AI_TensorHandleArray outputs_handle = OH_AI_ModelGetOutputs(model);
3314     out_tensor_array.handle_num = outputs_handle.handle_num;
3315     out_tensor_array.handle_list = (OH_AI_TensorHandle *)malloc(sizeof(OH_AI_TensorHandle) * out_tensor_array.handle_num);
3316     for (size_t i = 0; i < outputs_handle.handle_num; i++) {
3317         auto ori_tensor = outputs_handle.handle_list[i];
3318         auto shape_ptr = OH_AI_TensorGetShape(ori_tensor, &shape_num);
3319         for (size_t j = 0; j < shape_num; j++) {
3320         shape[j] = shape_ptr[j];
3321         }
3322         void *in_allocator = OH_AI_TensorGetAllocator(ori_tensor);
3323         OH_AI_TensorHandle out_tensor = OH_AI_TensorCreate(OH_AI_TensorGetName(ori_tensor), OH_AI_TensorGetDataType(ori_tensor),
3324                                         shape, shape_num, nullptr, 0);
3325         OH_AI_TensorSetAllocator(out_tensor, in_allocator);
3326         out_tensor_array.handle_list[i] = out_tensor;
3327     }
3328     printf("==========OH_AI_ModelPredict==========\n");
3329     auto ret = OH_AI_ModelPredict(model, in_tensor_array, &out_tensor_array, NULL, NULL);
3330     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3331     printf("==========OH_AI_TensorDestroy==========\n");
3332     CompareResult(out_tensor_array, model_name);
3333     for (size_t i = 0; i < in_tensor_array.handle_num; i++) {
3334         auto ori_tensor = in_tensor_array.handle_list[i];
3335         OH_AI_TensorDestroy(&ori_tensor);
3336     }
3337     free(in_tensor_array.handle_list);
3338     for (size_t i = 0; i < out_tensor_array.handle_num; i++) {
3339         auto ori_tensor = out_tensor_array.handle_list[i];
3340         OH_AI_TensorDestroy(&ori_tensor);
3341     }
3342     free(out_tensor_array.handle_list);
3343 }
3344 
3345 // 正常场景:通过OH_AI_TensorCreate创建输入输出tensor,实现数据免拷贝
3346 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_copy_free_0002, Function | MediumTest | Level1) {
3347     if (!IsNPU()) {
3348         printf("NNRt is not NPU, skip this test");
3349         return;
3350     }
3351     printf("==========Init Context==========\n");
3352     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3353     AddContextDeviceNNRT(context);
3354     printf("==========Build model==========\n");
3355     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3356     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3357     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3358     RunMSLiteModel(model, "ml_face_isface", true);
3359     printf("==========OH_AI_ModelDestroy==========\n");
3360     OH_AI_ModelDestroy(&model);
3361 }
3362 
3363 // 异常场景:OH_AI_TensorSetAllocator设置空指针
3364 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_copy_free_0003, Function | MediumTest | Level1) {
3365     if (!IsNPU()) {
3366         printf("NNRt is not NPU, skip this test");
3367         return;
3368     }
3369     printf("==========Init Context==========\n");
3370     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3371     AddContextDeviceNNRT(context);
3372     printf("==========Build model==========\n");
3373     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3374     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3375     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3376     printf("==========GetInputs==========\n");
3377     const size_t MAX_DIMS = 10;
3378     int64_t shape[MAX_DIMS];
3379     size_t shape_num;
3380     OH_AI_TensorHandleArray in_tensor_array;
3381     OH_AI_TensorHandleArray inputs_handle = OH_AI_ModelGetInputs(model);
3382     in_tensor_array.handle_num = inputs_handle.handle_num;
3383     in_tensor_array.handle_list = (OH_AI_TensorHandle *)malloc(sizeof(OH_AI_TensorHandle) * in_tensor_array.handle_num);
3384     for (size_t i = 0; i < inputs_handle.handle_num; i++) {
3385         auto ori_tensor = inputs_handle.handle_list[i];
3386         auto shape_ptr = OH_AI_TensorGetShape(ori_tensor, &shape_num);
3387         for (size_t j = 0; j < shape_num; j++) {
3388         shape[j] = shape_ptr[j];
3389         }
3390         OH_AI_TensorHandle in_tensor = OH_AI_TensorCreate(OH_AI_TensorGetName(ori_tensor),
3391                                         OH_AI_TensorGetDataType(ori_tensor), shape, shape_num, nullptr, 0);
3392         OH_AI_Status ret = OH_AI_TensorSetAllocator(in_tensor, nullptr);
3393         ASSERT_NE(ret, OH_AI_STATUS_SUCCESS);
3394         in_tensor_array.handle_list[i] = in_tensor;
3395     }
3396 }
3397 
3398 // 正常场景:通过OH_AI_TensorCreate创建输入输出tensor,实现数据免拷贝, CPU后端场景
3399 HWTEST(MSLiteTest, SUB_AI_MindSpore_CPU_copy_free_0001, Function | MediumTest | Level1) {
3400     printf("==========Init Context==========\n");
3401     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3402     AddContextDeviceCPU(context);
3403     printf("==========Build model==========\n");
3404     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3405     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3406     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3407     RunMSLiteModel(model, "ml_face_isface", true);
3408     printf("==========OH_AI_ModelDestroy==========\n");
3409     OH_AI_ModelDestroy(&model);
3410 }
3411 
3412 
3413 // 正常场景:npu循环推理
3414 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_copy_free_0004, Function | MediumTest | Level1) {
3415     if (!IsNPU()) {
3416         printf("NNRt is not NPU, skip this test");
3417         return;
3418     }
3419     printf("==========Init Context==========\n");
3420     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3421     AddContextDeviceNNRT(context);
3422     printf("==========Build model==========\n");
3423     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3424     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3425     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3426     printf("==========GetInputs==========\n");
3427     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3428     ASSERT_NE(inputs.handle_list, nullptr);
3429     FillInputsData(inputs, "ml_face_isface", true);
3430     for (size_t i = 0; i < 50; ++i) {
3431         printf("==========Model Predict==========\n");
3432         OH_AI_TensorHandleArray outputs;
3433         ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3434         ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3435         CompareResult(outputs, "ml_face_isface");
3436     }
3437     OH_AI_ModelDestroy(&model);
3438 }
3439 
3440 // 正常场景:npu免拷贝场景循环推理
3441 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_copy_free_0005, Function | MediumTest | Level1) {
3442     if (!IsNPU()) {
3443         printf("NNRt is not NPU, skip this test");
3444         return;
3445     }
3446     printf("==========Init Context==========\n");
3447     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3448     AddContextDeviceNNRT(context);
3449     printf("==========Build model==========\n");
3450     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3451     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3452     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3453     const size_t MAX_DIMS = 10;
3454     int64_t shape[MAX_DIMS];
3455     size_t shape_num;
3456     OH_AI_TensorHandleArray in_tensor_array;
3457     OH_AI_TensorHandleArray out_tensor_array;
3458     printf("==========OH_AI_TensorSetAllocator in_tensor==========\n");
3459     OH_AI_TensorHandleArray inputs_handle = OH_AI_ModelGetInputs(model);
3460     in_tensor_array.handle_num = inputs_handle.handle_num;
3461     in_tensor_array.handle_list = (OH_AI_TensorHandle *)malloc(sizeof(OH_AI_TensorHandle) * in_tensor_array.handle_num);
3462     for (size_t i = 0; i < inputs_handle.handle_num; i++) {
3463         auto ori_tensor = inputs_handle.handle_list[i];
3464         auto shape_ptr = OH_AI_TensorGetShape(ori_tensor, &shape_num);
3465         for (size_t j = 0; j < shape_num; j++) {
3466         shape[j] = shape_ptr[j];
3467         }
3468         void *in_allocator = OH_AI_TensorGetAllocator(ori_tensor);
3469         OH_AI_TensorHandle in_tensor = OH_AI_TensorCreate(OH_AI_TensorGetName(ori_tensor), OH_AI_TensorGetDataType(ori_tensor),
3470                                         shape, shape_num, nullptr, 0);
3471         OH_AI_TensorSetAllocator(in_tensor, in_allocator);
3472         in_tensor_array.handle_list[i] = in_tensor;
3473     }
3474     printf("==========FillInputsData==========\n");
3475     FillInputsData(in_tensor_array, "ml_face_isface", true);
3476     printf("==========OH_AI_TensorSetAllocator out_tensor==========\n");
3477     OH_AI_TensorHandleArray outputs_handle = OH_AI_ModelGetOutputs(model);
3478     out_tensor_array.handle_num = outputs_handle.handle_num;
3479     out_tensor_array.handle_list = (OH_AI_TensorHandle *)malloc(sizeof(OH_AI_TensorHandle) * out_tensor_array.handle_num);
3480     for (size_t i = 0; i < outputs_handle.handle_num; i++) {
3481         auto ori_tensor = outputs_handle.handle_list[i];
3482         auto shape_ptr = OH_AI_TensorGetShape(ori_tensor, &shape_num);
3483         for (size_t j = 0; j < shape_num; j++) {
3484         shape[j] = shape_ptr[j];
3485         }
3486         void *in_allocator = OH_AI_TensorGetAllocator(ori_tensor);
3487         OH_AI_TensorHandle out_tensor = OH_AI_TensorCreate(OH_AI_TensorGetName(ori_tensor), OH_AI_TensorGetDataType(ori_tensor),
3488                                         shape, shape_num, nullptr, 0);
3489         OH_AI_TensorSetAllocator(out_tensor, in_allocator);
3490         out_tensor_array.handle_list[i] = out_tensor;
3491     }
3492     for (size_t i = 0; i < 50; ++i) {
3493         printf("==========OH_AI_ModelPredict==========\n");
3494         auto ret = OH_AI_ModelPredict(model, in_tensor_array, &out_tensor_array, NULL, NULL);
3495         ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3496         CompareResult(out_tensor_array, "ml_face_isface");
3497     }
3498     printf("==========OH_AI_TensorDestroy==========\n");
3499     for (size_t i = 0; i < in_tensor_array.handle_num; i++) {
3500         auto ori_tensor = in_tensor_array.handle_list[i];
3501         OH_AI_TensorDestroy(&ori_tensor);
3502     }
3503     free(in_tensor_array.handle_list);
3504     for (size_t i = 0; i < out_tensor_array.handle_num; i++) {
3505         auto ori_tensor = out_tensor_array.handle_list[i];
3506         OH_AI_TensorDestroy(&ori_tensor);
3507     }
3508     free(out_tensor_array.handle_list);
3509     printf("==========OH_AI_ModelDestroy==========\n");
3510     OH_AI_ModelDestroy(&model);
3511 }
3512 
3513 // 正常场景:NPU权重量化模型
3514 HWTEST(MSLiteTest, OHOS_NNRT_QUANT_0001, Function | MediumTest | Level1) {
3515     if (!IsNPU()) {
3516         printf("NNRt is not NPU, skip this test");
3517         return;
3518     }
3519     printf("==========Init Context==========\n");
3520     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3521     ASSERT_NE(context, nullptr);
3522     AddContextDeviceNNRT(context);
3523     printf("==========Create model==========\n");
3524     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3525     ASSERT_NE(model, nullptr);
3526     ModelPredict(model, context, "ml_face_isface_quant", {}, false, true, false);
3527 }
3528 
3529 
3530 // add nnrt hiai device info
AddContextDeviceHIAI(OH_AI_ContextHandle context)3531 void AddContextDeviceHIAI(OH_AI_ContextHandle context) {
3532     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3533     size_t num = 0;
3534     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3535     std::cout << "found " << num << " nnrt devices" << std::endl;
3536     NNRTDeviceDesc *desc_1 = nullptr;
3537     for (size_t i = 0; i < num; i++) {
3538         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3539         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3540         if (strcmp(name, "HIAI_F") == 0) {
3541             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3542         }
3543     }
3544 
3545     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3546     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3547     const char *band_mode = "HIAI_BANDMODE_HIGH";
3548     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3549 
3550     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3551 }
3552 
3553 // 异常场景:HIAI流程,离线模型支持NNRT后端,Model创建一次,Build多次
3554 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0004, Function | MediumTest | Level1) {
3555     if (!IsNPU()) {
3556         printf("NNRt is not NPU, skip this test");
3557         return;
3558     }
3559     printf("==========Init Context==========\n");
3560     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3561     AddContextDeviceHIAI(context);
3562     printf("==========Create model==========\n");
3563     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3564     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3565     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3566     printf("==========Build model==========\n");
3567     OH_AI_Status ret2 = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3568     ASSERT_EQ(ret2, OH_AI_STATUS_LITE_MODEL_REBUILD);
3569     OH_AI_ModelDestroy(&model);
3570 }
3571 
3572 // 异常场景:HIAI流程,离线模型支持NNRT后端,ModelPredict,input为空
3573 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0005, Function | MediumTest | Level1) {
3574     if (!IsNPU()) {
3575         printf("NNRt is not NPU, skip this test");
3576         return;
3577     }
3578     printf("==========Init Context==========\n");
3579     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3580     AddContextDeviceHIAI(context);
3581     printf("==========Create model==========\n");
3582     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3583     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3584     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3585     printf("==========Model Predict==========\n");
3586     OH_AI_TensorHandleArray inputs;
3587     OH_AI_TensorHandleArray outputs;
3588     ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3589     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
3590     OH_AI_ModelDestroy(&model);
3591 }
3592 
3593 // 异常场景:HIAI流程,非离线模型支持NNRT后端,ms模型未转换为NNRT后端模型
3594 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0006, Function | MediumTest | Level1) {
3595     if (!IsNPU()) {
3596         printf("NNRt is not NPU, skip this test");
3597         return;
3598     }
3599     printf("==========Init Context==========\n");
3600     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3601     AddContextDeviceHIAI(context);
3602     printf("==========Create model==========\n");
3603     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3604     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3605     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
3606 }
3607 
3608 // 正常场景:HIAI流程,离线模型配置量化参数
3609 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0007, Function | MediumTest | Level1) {
3610     if (!IsNPU()) {
3611         printf("NNRt is not NPU, skip this test");
3612         return;
3613     }
3614     printf("==========Init Context==========\n");
3615     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3616     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3617     size_t num = 0;
3618     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3619     std::cout << "found " << num << " nnrt devices" << std::endl;
3620     NNRTDeviceDesc *desc_1 = nullptr;
3621     for (size_t i = 0; i < num; i++) {
3622         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3623         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3624         if (strcmp(name, "HIAI_F") == 0) {
3625             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3626         }
3627     }
3628 
3629     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3630     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3631     const char *band_mode = "HIAI_BANDMODE_HIGH";
3632     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3633     size_t q_size;
3634     char *quant_config = ReadFile("/data/test/test_model_param", &q_size);
3635     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "QuantConfigData", quant_config, q_size);
3636 
3637     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3638     printf("==========Create model==========\n");
3639     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3640     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/test_model.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3641     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3642     printf("==========GetInputs==========\n");
3643     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3644     ASSERT_NE(inputs.handle_list, nullptr);
3645     FillInputsData(inputs, "test_model", false);
3646     printf("==========Model Predict==========\n");
3647     OH_AI_TensorHandleArray outputs;
3648     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3649     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
3650     CompareResult(outputs, "test_model", 0.01, 0.01, true);
3651     OH_AI_ModelDestroy(&model);
3652 }
3653 
3654 // 正常场景:HIAI流程,设置量化配置QuantConfigData为空指针时等于不量化
3655 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0008, Function | MediumTest | Level1) {
3656     if (!IsNPU()) {
3657         printf("NNRt is not NPU, skip this test");
3658         return;
3659     }
3660     printf("==========Init Context==========\n");
3661     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3662     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3663     size_t num = 0;
3664     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3665     std::cout << "found " << num << " nnrt devices" << std::endl;
3666     NNRTDeviceDesc *desc_1 = nullptr;
3667     for (size_t i = 0; i < num; i++) {
3668         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3669         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3670         if (strcmp(name, "HIAI_F") == 0) {
3671             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3672         }
3673     }
3674 
3675     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3676     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3677     const char *band_mode = "HIAI_BANDMODE_HIGH";
3678     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3679     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "QuantConfigData", nullptr, 0);
3680 
3681     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3682     printf("==========Create model==========\n");
3683     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3684     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/test_model.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3685     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3686     printf("==========GetInputs==========\n");
3687     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3688     ASSERT_NE(inputs.handle_list, nullptr);
3689     FillInputsData(inputs, "test_model", false);
3690     printf("==========Model Predict==========\n");
3691     OH_AI_TensorHandleArray outputs;
3692     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3693     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
3694     CompareResult(outputs, "test_model", 0.01, 0.01, true);
3695     OH_AI_ModelDestroy(&model);
3696 }
3697 
3698 // 异常场景:HIAI流程,设置量化配置QuantConfigData为错误配置文件
3699 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0009, Function | MediumTest | Level1) {
3700     if (!IsNPU()) {
3701         printf("NNRt is not NPU, skip this test");
3702         return;
3703     }
3704     printf("==========Init Context==========\n");
3705     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3706     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3707     size_t num = 0;
3708     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3709     std::cout << "found " << num << " nnrt devices" << std::endl;
3710     NNRTDeviceDesc *desc_1 = nullptr;
3711     for (size_t i = 0; i < num; i++) {
3712         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3713         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3714         if (strcmp(name, "HIAI_F") == 0) {
3715             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3716         }
3717     }
3718 
3719     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3720     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3721     const char *band_mode = "HIAI_BANDMODE_HIGH";
3722     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3723     size_t q_size;
3724     char *quant_config = ReadFile("/data/test/test_model.om.ms", &q_size);
3725     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "QuantConfigData", quant_config, q_size);
3726 
3727     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3728     printf("==========Create model==========\n");
3729     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3730     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/test_model.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3731     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
3732 }
3733 
3734 // 异常场景:HIAI流程,设置量化q_size为异常值
3735 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0010, Function | MediumTest | Level1) {
3736     if (!IsNPU()) {
3737         printf("NNRt is not NPU, skip this test");
3738         return;
3739     }
3740     printf("==========Init Context==========\n");
3741     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3742     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3743     size_t num = 0;
3744     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3745     std::cout << "found " << num << " nnrt devices" << std::endl;
3746     NNRTDeviceDesc *desc_1 = nullptr;
3747     for (size_t i = 0; i < num; i++) {
3748         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3749         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3750         if (strcmp(name, "HIAI_F") == 0) {
3751             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3752         }
3753     }
3754 
3755     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3756     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3757     const char *band_mode = "HIAI_BANDMODE_HIGH";
3758     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3759     size_t q_size;
3760     char *quant_config = ReadFile("/data/test/test_model_param", &q_size);
3761     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "QuantConfigData", quant_config, 0);
3762 
3763     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3764     printf("==========Create model==========\n");
3765     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3766     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/test_model.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3767     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
3768 }
3769 
3770 // 正常场景:HIAI流程,设置 NPU 和外围输入/输出(I/O)设备的带宽模式BandMode模式为HIAI_BANDMODE_NORMAL
3771 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0011, Function | MediumTest | Level1) {
3772     if (!IsNPU()) {
3773         printf("NNRt is not NPU, skip this test");
3774         return;
3775     }
3776     printf("==========Init Context==========\n");
3777     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3778     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3779     size_t num = 0;
3780     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3781     std::cout << "found " << num << " nnrt devices" << std::endl;
3782     NNRTDeviceDesc *desc_1 = nullptr;
3783     for (size_t i = 0; i < num; i++) {
3784         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3785         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3786         if (strcmp(name, "HIAI_F") == 0) {
3787             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3788         }
3789     }
3790 
3791     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3792     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3793     const char *band_mode = "HIAI_BANDMODE_NORMAL";
3794     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3795     size_t q_size;
3796     char *quant_config = ReadFile("/data/test/test_model_param", &q_size);
3797     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "QuantConfigData", quant_config, q_size);
3798 
3799     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3800     printf("==========Create model==========\n");
3801     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3802     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/test_model.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3803     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3804     printf("==========GetInputs==========\n");
3805     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3806     ASSERT_NE(inputs.handle_list, nullptr);
3807     FillInputsData(inputs, "test_model", false);
3808     printf("==========Model Predict==========\n");
3809     OH_AI_TensorHandleArray outputs;
3810     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3811     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
3812     CompareResult(outputs, "test_model", 0.01, 0.01, true);
3813     OH_AI_ModelDestroy(&model);
3814 }
3815 
3816 // 正常场景:HIAI流程,设置 NPU 和外围输入/输出(I/O)设备的带宽模式BandMode模式为HIAI_BANDMODE_LOW
3817 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0012, Function | MediumTest | Level1) {
3818     if (!IsNPU()) {
3819         printf("NNRt is not NPU, skip this test");
3820         return;
3821     }
3822     printf("==========Init Context==========\n");
3823     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3824     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3825     size_t num = 0;
3826     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3827     std::cout << "found " << num << " nnrt devices" << std::endl;
3828     NNRTDeviceDesc *desc_1 = nullptr;
3829     for (size_t i = 0; i < num; i++) {
3830         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3831         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3832         if (strcmp(name, "HIAI_F") == 0) {
3833             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3834         }
3835     }
3836 
3837     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3838     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3839     const char *band_mode = "HIAI_BANDMODE_LOW";
3840     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3841     size_t q_size;
3842     char *quant_config = ReadFile("/data/test/test_model_param", &q_size);
3843     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "QuantConfigData", quant_config, q_size);
3844 
3845     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3846     printf("==========Create model==========\n");
3847     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3848     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/test_model.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3849     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3850     printf("==========GetInputs==========\n");
3851     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3852     ASSERT_NE(inputs.handle_list, nullptr);
3853     FillInputsData(inputs, "test_model", false);
3854     printf("==========Model Predict==========\n");
3855     OH_AI_TensorHandleArray outputs;
3856     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3857     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
3858     CompareResult(outputs, "test_model", 0.01, 0.01, true);
3859     OH_AI_ModelDestroy(&model);
3860 }
3861 
3862 // 正常场景:HIAI流程,设置 NPU 和外围输入/输出(I/O)设备的带宽模式BandMode模式为HIAI_BANDMODE_UNSET
3863 HWTEST(MSLiteTest, SUB_AI_MindSpore_HIAI_OfflineModel_0013, Function | MediumTest | Level1) {
3864     if (!IsNPU()) {
3865         printf("NNRt is not NPU, skip this test");
3866         return;
3867     }
3868     printf("==========Init Context==========\n");
3869     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3870     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3871     size_t num = 0;
3872     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3873     std::cout << "found " << num << " nnrt devices" << std::endl;
3874     NNRTDeviceDesc *desc_1 = nullptr;
3875     for (size_t i = 0; i < num; i++) {
3876         auto desc = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3877         auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc);
3878         if (strcmp(name, "HIAI_F") == 0) {
3879             desc_1 = OH_AI_GetElementOfNNRTDeviceDescs(descs, i);
3880         }
3881     }
3882 
3883     auto id_1 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_1);
3884     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_1);
3885     const char *band_mode = "HIAI_BANDMODE_UNSET";
3886     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "BandMode", band_mode, strlen(band_mode));
3887     size_t q_size;
3888     char *quant_config = ReadFile("/data/test/test_model_param", &q_size);
3889     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "QuantConfigData", quant_config, q_size);
3890 
3891     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3892     printf("==========Create model==========\n");
3893     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3894     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/test_model.om.ms", OH_AI_MODELTYPE_MINDIR, context);
3895     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3896     printf("==========GetInputs==========\n");
3897     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3898     ASSERT_NE(inputs.handle_list, nullptr);
3899     FillInputsData(inputs, "test_model", false);
3900     printf("==========Model Predict==========\n");
3901     OH_AI_TensorHandleArray outputs;
3902     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3903     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
3904     CompareResult(outputs, "test_model", 0.01, 0.01, true);
3905     OH_AI_ModelDestroy(&model);
3906 }
3907 
3908 
PrintMem(const std::string & position)3909 void PrintMem(const std::string &position) {
3910   std::string proc_file = "/proc/" + std::to_string(getpid()) + "/status";
3911   std::ifstream infile(proc_file);
3912   if (infile.good()) {
3913     std::string line;
3914     while (std::getline(infile, line)) {
3915       if (line.find("VmRSS") != std::string::npos) {
3916         std::cout << position << " mem size: " << line << std::endl;
3917       }
3918     }
3919     infile.close();
3920   }
3921 }
3922 
3923 
3924 // 正常场景:context配置cache信息,执行推理流程
3925 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_Cache_0001, Function | MediumTest | Level1) {
3926     if (!IsNPU()) {
3927         printf("NNRt is not NPU, skip this test");
3928         return;
3929     }
3930     printf("==========OH_AI_ContextCreate==========\n");
3931     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3932     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3933     size_t num = 0;
3934     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3935     auto desc_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs, 0);
3936     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc_0);
3937     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name << std::endl;
3938     auto id_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_0);
3939     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_0);
3940     const char *cache_path = "/data/local/tmp";
3941     const char *cache_version = "1";
3942     const char *model_name = "cache_model";
3943     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CachePath", cache_path, strlen(cache_path));
3944     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CacheVersion", cache_version, strlen(cache_version));
3945     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "ModelName", model_name, strlen(model_name));
3946     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3947     printf("==========OH_AI_ModelCreate==========\n");
3948     PrintMem("before build");
3949     uint64_t timeStartPrepare = getTimeInUs();
3950     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3951     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
3952     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3953     uint64_t timeEndPrepare = getTimeInUs();
3954     float init_session_time_once = (timeEndPrepare - timeStartPrepare) / 1000.0;
3955     std::cout << "init_session_time_once: " << init_session_time_once << std::endl;
3956     PrintMem("after build");
3957     printf("==========GetInputs==========\n");
3958     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3959     ASSERT_NE(inputs.handle_list, nullptr);
3960     FillInputsData(inputs, "ml_face_isface", true);
3961     printf("==========Model Predict==========\n");
3962     OH_AI_TensorHandleArray outputs;
3963     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
3964     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
3965     CompareResult(outputs, "ml_face_isface", 0.01, 0.01, true);
3966     OH_AI_ModelDestroy(&model);
3967 }
3968 
3969 // 正常场景:context配置cache信息,量化模型执行推理流程
3970 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_Cache_0002, Function | MediumTest | Level1) {
3971     if (!IsNPU()) {
3972         printf("NNRt is not NPU, skip this test");
3973         return;
3974     }
3975     printf("==========OH_AI_ContextCreate==========\n");
3976     OH_AI_ContextHandle context = OH_AI_ContextCreate();
3977     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
3978     size_t num = 0;
3979     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
3980     auto desc_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs, 0);
3981     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc_0);
3982     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name << std::endl;
3983     auto id_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_0);
3984     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_0);
3985     const char *cache_path = "/data/local/tmp";
3986     const char *cache_version = "1";
3987     const char *model_name = "cache_model_quant";
3988     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CachePath", cache_path, strlen(cache_path));
3989     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CacheVersion", cache_version, strlen(cache_version));
3990     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "ModelName", model_name, strlen(model_name));
3991     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
3992     printf("==========OH_AI_ModelCreate==========\n");
3993     OH_AI_ModelHandle model = OH_AI_ModelCreate();
3994     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface_quant.ms", OH_AI_MODELTYPE_MINDIR, context);
3995     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
3996     printf("==========GetInputs==========\n");
3997     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
3998     ASSERT_NE(inputs.handle_list, nullptr);
3999     FillInputsData(inputs, "ml_face_isface_quant", true);
4000     printf("==========Model Predict==========\n");
4001     OH_AI_TensorHandleArray outputs;
4002     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
4003     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
4004     CompareResult(outputs, "ml_face_isface_quant", 0.01, 0.01, true);
4005     OH_AI_ModelDestroy(&model);
4006 }
4007 
4008 // 正常场景:多个不同模型在同一路径下缓存,执行推理流程
4009 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_Cache_0003, Function | MediumTest | Level1) {
4010     if (!IsNPU()) {
4011         printf("NNRt is not NPU, skip this test");
4012         return;
4013     }
4014     printf("==========OH_AI_ContextCreate==========\n");
4015     OH_AI_ContextHandle context = OH_AI_ContextCreate();
4016     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
4017     size_t num = 0;
4018     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
4019     auto desc_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs, 0);
4020     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc_0);
4021     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name << std::endl;
4022     auto id_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_0);
4023     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_0);
4024     const char *cache_path = "/data/local/tmp";
4025     const char *cache_version = "1";
4026     const char *model_name = "cache_a";
4027     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CachePath", cache_path, strlen(cache_path));
4028     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CacheVersion", cache_version, strlen(cache_version));
4029     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "ModelName", model_name, strlen(model_name));
4030     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
4031     printf("==========OH_AI_ModelCreate==========\n");
4032     OH_AI_ModelHandle model = OH_AI_ModelCreate();
4033     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context);
4034     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
4035     printf("==========OH_AI_ContextCreate2==========\n");
4036     OH_AI_ContextHandle context2 = OH_AI_ContextCreate();
4037     auto nnrt_device_info2 = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
4038     size_t num2 = 0;
4039     auto descs2 = OH_AI_GetAllNNRTDeviceDescs(&num2);
4040     auto desc2_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs2, 0);
4041     auto name2 = OH_AI_GetNameFromNNRTDeviceDesc(desc2_0);
4042     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name2 << std::endl;
4043     auto id2_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc2_0);
4044     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info2, id2_0);
4045     const char *cache_path2 = "/data/local/tmp";
4046     const char *cache_version2 = "1";
4047     const char *model_name2 = "cache_b";
4048     OH_AI_DeviceInfoAddExtension(nnrt_device_info2, "CachePath", cache_path2, strlen(cache_path2));
4049     OH_AI_DeviceInfoAddExtension(nnrt_device_info2, "CacheVersion", cache_version2, strlen(cache_version2));
4050     OH_AI_DeviceInfoAddExtension(nnrt_device_info2, "ModelName", model_name2, strlen(model_name2));
4051     OH_AI_ContextAddDeviceInfo(context2, nnrt_device_info2);
4052     printf("==========OH_AI_ModelCreate2==========\n");
4053     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
4054     OH_AI_Status ret2 = OH_AI_ModelBuildFromFile(model2, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context2);
4055     ASSERT_EQ(ret2, OH_AI_STATUS_SUCCESS);
4056     printf("==========GetInputs==========\n");
4057     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
4058     ASSERT_NE(inputs.handle_list, nullptr);
4059     FillInputsData(inputs, "ml_ocr_cn", true);
4060     printf("==========Model Predict==========\n");
4061     OH_AI_TensorHandleArray outputs;
4062     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
4063     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
4064     CompareResult(outputs, "ml_ocr_cn", 0.01, 0.01, true);
4065     OH_AI_ModelDestroy(&model);
4066     printf("==========GetInputs2==========\n");
4067     OH_AI_TensorHandleArray inputs2 = OH_AI_ModelGetInputs(model2);
4068     ASSERT_NE(inputs2.handle_list, nullptr);
4069     FillInputsData(inputs2, "ml_face_isface", true);
4070     printf("==========Model Predict2==========\n");
4071     OH_AI_TensorHandleArray outputs2;
4072     OH_AI_Status predict_ret2 = OH_AI_ModelPredict(model2, inputs2, &outputs2, nullptr, nullptr);
4073     ASSERT_EQ(predict_ret2, OH_AI_STATUS_SUCCESS);
4074     CompareResult(outputs2, "ml_face_isface", 0.01, 0.01, true);
4075     OH_AI_ModelDestroy(&model2);
4076 }
4077 
4078 // 异常场景:CachePath路径非法值或不存在
4079 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_Cache_0004, Function | MediumTest | Level1) {
4080     if (!IsNPU()) {
4081         printf("NNRt is not NPU, skip this test");
4082         return;
4083     }
4084     printf("==========OH_AI_ContextCreate==========\n");
4085     OH_AI_ContextHandle context = OH_AI_ContextCreate();
4086     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
4087     size_t num = 0;
4088     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
4089     auto desc_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs, 0);
4090     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc_0);
4091     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name << std::endl;
4092     auto id_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_0);
4093     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_0);
4094     const char *cache_path = "/data/local/tmp/notexist/";
4095     const char *cache_version = "1";
4096     const char *model_name = "cache_error";
4097     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CachePath", cache_path, strlen(cache_path));
4098     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CacheVersion", cache_version, strlen(cache_version));
4099     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "ModelName", model_name, strlen(model_name));
4100     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
4101     printf("==========OH_AI_ModelCreate==========\n");
4102     OH_AI_ModelHandle model = OH_AI_ModelCreate();
4103     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
4104     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
4105 }
4106 
4107 // 异常场景:CacheVersion在取值范围外
4108 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_Cache_0005, Function | MediumTest | Level1) {
4109     if (!IsNPU()) {
4110         printf("NNRt is not NPU, skip this test");
4111         return;
4112     }
4113     printf("==========OH_AI_ContextCreate==========\n");
4114     OH_AI_ContextHandle context = OH_AI_ContextCreate();
4115     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
4116     size_t num = 0;
4117     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
4118     auto desc_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs, 0);
4119     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc_0);
4120     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name << std::endl;
4121     auto id_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_0);
4122     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_0);
4123     const char *cache_path = "/data/local/tmp";
4124     const char *cache_version = "-1";
4125     const char *model_name = "cache_error";
4126     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CachePath", cache_path, strlen(cache_path));
4127     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CacheVersion", cache_version, strlen(cache_version));
4128     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "ModelName", model_name, strlen(model_name));
4129     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
4130     printf("==========OH_AI_ModelCreate==========\n");
4131     OH_AI_ModelHandle model = OH_AI_ModelCreate();
4132     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
4133     ASSERT_EQ(ret, OH_AI_STATUS_LITE_ERROR);
4134 }
4135 
4136 // 异常场景:a模型生成缓存,b模型用相同的CachePath、CacheVersion、modelname
4137 HWTEST(MSLiteTest, SUB_AI_MindSpore_NNRT_Cache_0006, Function | MediumTest | Level1) {
4138     if (!IsNPU()) {
4139         printf("NNRt is not NPU, skip this test");
4140         return;
4141     }
4142     printf("==========OH_AI_ContextCreate==========\n");
4143     OH_AI_ContextHandle context = OH_AI_ContextCreate();
4144     auto nnrt_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
4145     size_t num = 0;
4146     auto descs = OH_AI_GetAllNNRTDeviceDescs(&num);
4147     auto desc_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs, 0);
4148     auto name = OH_AI_GetNameFromNNRTDeviceDesc(desc_0);
4149     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name << std::endl;
4150     auto id_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc_0);
4151     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info, id_0);
4152     const char *cache_path = "/data/local/tmp";
4153     const char *cache_version = "1";
4154     const char *model_name = "cache_same";
4155     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CachePath", cache_path, strlen(cache_path));
4156     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "CacheVersion", cache_version, strlen(cache_version));
4157     OH_AI_DeviceInfoAddExtension(nnrt_device_info, "ModelName", model_name, strlen(model_name));
4158     OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
4159     printf("==========OH_AI_ModelCreate==========\n");
4160     OH_AI_ModelHandle model = OH_AI_ModelCreate();
4161     OH_AI_Status ret = OH_AI_ModelBuildFromFile(model, "/data/test/ml_face_isface.ms", OH_AI_MODELTYPE_MINDIR, context);
4162     ASSERT_EQ(ret, OH_AI_STATUS_SUCCESS);
4163     printf("==========OH_AI_ContextCreate2==========\n");
4164     OH_AI_ContextHandle context2 = OH_AI_ContextCreate();
4165     auto nnrt_device_info2 = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_NNRT);
4166     size_t num2 = 0;
4167     auto descs2 = OH_AI_GetAllNNRTDeviceDescs(&num2);
4168     auto desc2_0 = OH_AI_GetElementOfNNRTDeviceDescs(descs2, 0);
4169     auto name2 = OH_AI_GetNameFromNNRTDeviceDesc(desc2_0);
4170     std::cout << "OH_AI_GetNameFromNNRTDeviceDesc: " << name2 << std::endl;
4171     auto id2_0 = OH_AI_GetDeviceIdFromNNRTDeviceDesc(desc2_0);
4172     OH_AI_DeviceInfoSetDeviceId(nnrt_device_info2, id2_0);
4173     const char *cache_path2 = "/data/local/tmp";
4174     const char *cache_version2 = "1";
4175     const char *model_name2 = "cache_same";
4176     OH_AI_DeviceInfoAddExtension(nnrt_device_info2, "CachePath", cache_path2, strlen(cache_path2));
4177     OH_AI_DeviceInfoAddExtension(nnrt_device_info2, "CacheVersion", cache_version2, strlen(cache_version2));
4178     OH_AI_DeviceInfoAddExtension(nnrt_device_info2, "ModelName", model_name2, strlen(model_name2));
4179     OH_AI_ContextAddDeviceInfo(context2, nnrt_device_info2);
4180     printf("==========OH_AI_ModelCreate2==========\n");
4181     OH_AI_ModelHandle model2 = OH_AI_ModelCreate();
4182     OH_AI_Status ret2 = OH_AI_ModelBuildFromFile(model2, "/data/test/ml_ocr_cn.ms", OH_AI_MODELTYPE_MINDIR, context2);
4183     ASSERT_EQ(ret2, OH_AI_STATUS_SUCCESS);
4184     printf("==========GetInputs==========\n");
4185     OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
4186     ASSERT_NE(inputs.handle_list, nullptr);
4187     FillInputsData(inputs, "ml_face_isface", true);
4188     printf("==========Model Predict==========\n");
4189     OH_AI_TensorHandleArray outputs;
4190     OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr);
4191     ASSERT_EQ(predict_ret, OH_AI_STATUS_SUCCESS);
4192     CompareResult(outputs, "ml_face_isface", 0.01, 0.01, true);
4193     OH_AI_ModelDestroy(&model);
4194     printf("==========GetInputs2==========\n");
4195     OH_AI_TensorHandleArray inputs2 = OH_AI_ModelGetInputs(model2);
4196     ASSERT_NE(inputs2.handle_list, nullptr);
4197     FillInputsData(inputs2, "ml_ocr_cn", true);
4198     printf("==========Model Predict2==========\n");
4199     OH_AI_TensorHandleArray outputs2;
4200     OH_AI_Status predict_ret2 = OH_AI_ModelPredict(model2, inputs2, &outputs2, nullptr, nullptr);
4201     ASSERT_EQ(predict_ret2, OH_AI_STATUS_LITE_ERROR);
4202     OH_AI_ModelDestroy(&model2);
4203 }
4204