• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vector>
17 #include <v1_0/nnrt_types.h>
18 #include <v1_0/innrt_device.h>
19 #include <v1_0/iprepared_model.h>
20 
21 #include "gtest/gtest.h"
22 #include "mindir.h"
23 #include "mindir_lite_graph.h"
24 
25 #include "interfaces/kits/c/neural_network_runtime.h"
26 #include "frameworks/native/memory_manager.h"
27 #include "common/hdi_nnrt_test_utils.h"
28 #include "common/hdi_nnrt_test.h"
29 
30 using namespace std;
31 using namespace testing::ext;
32 using namespace OHOS::NeuralNetworkRuntime;
33 using namespace OHOS::NeuralNetworkRuntime::Test;
34 
35 namespace {
36 
37 class ModelRunTest : public HDINNRtTest {};
38 
AddModelTest(OHOS::sptr<V1_0::INnrtDevice> & device_,V1_0::ModelConfig & modelConfig,bool isDynamic)39 void AddModelTest(OHOS::sptr<V1_0::INnrtDevice> &device_, V1_0::ModelConfig &modelConfig, bool isDynamic)
40 {
41     OH_NNModel *model = nullptr;
42     if (isDynamic) {
43         HDICommon::BuildAddGraphDynamic(&model);
44     } else {
45         HDICommon::BuildAddGraph(&model);
46     }
47     ASSERT_NE(model, nullptr);
48     // conver model from OH_NNModel to V1_0:Model
49     V1_0::Model *iModel = nullptr;
50     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
51     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
52     // prepare model
53     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
54     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
55 
56     std::vector<V1_0::IOTensor> inputs;
57     std::vector<V1_0::IOTensor> outputs;
58     std::vector<std::vector<int32_t>> outputsDims;
59     std::vector<bool> isOutputBufferEnough;
60 
61     std::vector<void* > mapedMemorys;
62     // set inputs
63     std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
64     for (uint32_t i = 0; i < inputValue.size(); i++) {
65         std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
66 
67         auto tensor = HDICommon::CreateIOTensor(device_);
68         auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
69         mapedMemorys.emplace_back(memAddress);
70         // set input data
71         HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
72         inputs.emplace_back(tensor);
73     }
74     // set outputs
75     auto outputTensor = HDICommon::CreateIOTensor(device_);
76     outputs.emplace_back(outputTensor);
77     // model run
78     EXPECT_EQ(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
79 
80     // map memory to get output buffer
81     auto memAddress = HDICommon::MapMemory(outputs[0].data.fd, ADDEND_BUFFER_LENGTH);
82     mapedMemorys.emplace_back(memAddress);
83 
84     auto buffer = (float *)memAddress;
85     std::vector<float> expectValue(ADDEND_DATA_SIZE, ADD_VALUE_RESULT);
86     std::vector<float> outputValue(buffer, buffer + ADDEND_DATA_SIZE);
87     PrintTensor(buffer, ADDEND_DATA_SIZE);
88     // check output
89     EXPECT_TRUE(CheckExpectOutput(outputValue, expectValue)) << "output value check failed.";
90 
91     // release
92     if (tensorBuffer.fd != -1) {
93         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
94     }
95     HDICommon::ReleaseBufferOfTensors(device_, inputs);
96     HDICommon::ReleaseBufferOfTensors(device_, outputs);
97     HDICommon::UnmapAllMemory(mapedMemorys);
98 }
99 
100 } // namespace
101 
102 /**
103  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0200
104  * @tc.name   : 定长模型端到端推理
105  * @tc.desc   : [C- SOFTWARE -0200]
106  */
107 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0200, Function | MediumTest | Level1)
108 {
109     V1_0::ModelConfig modelConfig = {
110         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
111     AddModelTest(device_, modelConfig, false);
112 }
113 
114 /**
115  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0300
116  * @tc.name   : 定长模型端到端推理-fp16
117  * @tc.desc   : [C- SOFTWARE -0200]
118  */
119 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0300, Function | MediumTest | Level2)
120 {
121     bool isFloat16Supported = false;
122     EXPECT_EQ(HDF_SUCCESS, device_->IsFloat16PrecisionSupported(isFloat16Supported));
123     if (!isFloat16Supported) {
124         GTEST_SKIP() << "Float16 precision is not supported.";
125     }
126 
127     V1_0::ModelConfig modelConfig = {
128         .enableFloat16 = true, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
129     AddModelTest(device_, modelConfig, false);
130 }
131 
132 /**
133  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0400
134  * @tc.name   : 变长模型推理
135  * @tc.desc   : [C- SOFTWARE -0200]
136  */
137 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0400, Function | MediumTest | Level2)
138 {
139     bool isDynamicInputSupported = false;
140     EXPECT_EQ(HDF_SUCCESS, device_->IsDynamicInputSupported(isDynamicInputSupported));
141     if (!isDynamicInputSupported) {
142         GTEST_SKIP() << "Dynamic input is not supported.";
143     }
144 
145     V1_0::ModelConfig modelConfig = {
146         .enableFloat16 = true, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
147     AddModelTest(device_, modelConfig, true);
148 }
149 
150 /**
151  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0500
152  * @tc.name   : 模型推理,inputs为空
153  * @tc.desc   : [C- SOFTWARE -0200]
154  */
155 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0500, Function | MediumTest | Level3)
156 {
157     OH_NNModel *model = nullptr;
158     HDICommon::BuildAddGraph(&model);
159     ASSERT_NE(model, nullptr);
160 
161     V1_0::Model *iModel = nullptr;
162     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
163     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
164     // model config
165     V1_0::ModelConfig modelConfig = {
166         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
167     // prepared model
168     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
169     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
170 
171     std::vector<V1_0::IOTensor> inputs;
172     std::vector<V1_0::IOTensor> outputs;
173     std::vector<std::vector<int32_t>> outputsDims;
174     std::vector<bool> isOutputBufferEnough;
175     vector<void* > mapedMemorys;
176 
177     // only set outputs
178     auto outputTensor = HDICommon::CreateIOTensor(device_);
179     outputs.emplace_back(outputTensor);
180     // model run, retcode less than HDF_SUCCESS
181     EXPECT_GT(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
182 
183     // release
184     if (tensorBuffer.fd != -1) {
185         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
186     }
187     HDICommon::ReleaseBufferOfTensors(device_, outputs);
188 }
189 
190 /**
191  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0600
192  * @tc.name   : 模型推理,outputs为空
193  * @tc.desc   : [C- SOFTWARE -0200]
194  */
195 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0600, Function | MediumTest | Level3)
196 {
197     OH_NNModel *model = nullptr;
198     HDICommon::BuildAddGraph(&model);
199     ASSERT_NE(model, nullptr);
200 
201     V1_0::Model *iModel = nullptr;
202     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
203     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
204     // model config
205     V1_0::ModelConfig modelConfig = {
206         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
207     // prepared model
208     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
209     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
210 
211     std::vector<V1_0::IOTensor> inputs;
212     std::vector<V1_0::IOTensor> outputs;
213     std::vector<std::vector<int32_t>> outputsDims;
214     std::vector<bool> isOutputBufferEnough;
215     vector<void* > mapedMemorys;
216 
217     // only set inputs
218     std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
219     for (uint32_t i = 0; i < inputValue.size(); i++) {
220         std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
221 
222         auto tensor = HDICommon::CreateIOTensor(device_);
223         auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
224         mapedMemorys.emplace_back(memAddress);
225         // set input data
226         HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
227         inputs.emplace_back(tensor);
228     }
229     // model run
230     EXPECT_GT(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
231 
232     // release
233     if (tensorBuffer.fd != -1) {
234         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
235     }
236     HDICommon::ReleaseBufferOfTensors(device_, inputs);
237     HDICommon::UnmapAllMemory(mapedMemorys);
238 }
239 
240 /**
241  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0100
242  * @tc.name   : 性能模式设置为PERFORMANCE_NONE,模型推理
243  * @tc.desc   : [C- SOFTWARE -0200]
244  */
245 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0100, Function | MediumTest | Level2)
246 {
247     V1_0::ModelConfig modelConfig = {
248         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_NONE, .priority = V1_0::PRIORITY_MEDIUM};
249     AddModelTest(device_, modelConfig, false);
250 }
251 
252 /**
253  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0200
254  * @tc.name   : 性能模式设置为PERFORMANCE_LOW,模型推理
255  * @tc.desc   : [C- SOFTWARE -0200]
256  */
257 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0200, Function | MediumTest | Level2)
258 {
259     V1_0::ModelConfig modelConfig = {
260         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_LOW, .priority = V1_0::PRIORITY_MEDIUM};
261     AddModelTest(device_, modelConfig, false);
262 }
263 
264 /**
265  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0300
266  * @tc.name   : 性能模式设置为PERFORMANCE_MEDIUM,模型推理
267  * @tc.desc   : [C- SOFTWARE -0200]
268  */
269 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0300, Function | MediumTest | Level2)
270 {
271     V1_0::ModelConfig modelConfig = {
272         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_MEDIUM, .priority = V1_0::PRIORITY_MEDIUM};
273     AddModelTest(device_, modelConfig, false);
274 }
275 
276 /**
277  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0400
278  * @tc.name   : 性能模式设置为PERFORMANCE_HIGH,模型推理
279  * @tc.desc   : [C- SOFTWARE -0200]
280  */
281 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0400, Function | MediumTest | Level2)
282 {
283     V1_0::ModelConfig modelConfig = {
284         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_HIGH, .priority = V1_0::PRIORITY_HIGH};
285     AddModelTest(device_, modelConfig, false);
286 }
287 
288 /**
289  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0500
290  * @tc.name   : 性能模式设置为PERFORMANCE_EXTREME,模型推理
291  * @tc.desc   : [C- SOFTWARE -0200]
292  */
293 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0500, Function | MediumTest | Level2)
294 {
295     V1_0::ModelConfig modelConfig = {
296         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_LOW};
297     AddModelTest(device_, modelConfig, false);
298 }
299 
300 /**
301  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0600
302  * @tc.name   : 优先级设置为PRIORITY_NONE,模型推理
303  * @tc.desc   : [C- SOFTWARE -0200]
304  */
305 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0600, Function | MediumTest | Level2)
306 {
307     V1_0::ModelConfig modelConfig = {
308         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_NONE};
309     AddModelTest(device_, modelConfig, false);
310 }
311 
312 /**
313  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0700
314  * @tc.name   : 优先级设置为PRIORITY_LOW,模型推理
315  * @tc.desc   : [C- SOFTWARE -0200]
316  */
317 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0700, Function | MediumTest | Level2)
318 {
319     V1_0::ModelConfig modelConfig = {
320         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_HIGH, .priority = V1_0::PRIORITY_LOW};
321     AddModelTest(device_, modelConfig, false);
322 }
323 
324 /**
325  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0800
326  * @tc.name   : 优先级设置为PRIORITY_MEDIUM,模型推理
327  * @tc.desc   : [C- SOFTWARE -0200]
328  */
329 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0800, Function | MediumTest | Level2)
330 {
331     V1_0::ModelConfig modelConfig = {
332         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_MEDIUM};
333     AddModelTest(device_, modelConfig, false);
334 }
335 
336 /**
337  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0900
338  * @tc.name   : 优先级设置为PRIORITY_HIGH,模型推理
339  * @tc.desc   : [C- SOFTWARE -0200]
340  */
341 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0900, Function | MediumTest | Level2)
342 {
343     V1_0::ModelConfig modelConfig = {
344         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
345     AddModelTest(device_, modelConfig, false);
346 }
347 
348 /**
349  * @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_1000
350  * @tc.name   : 加载模型缓存,模型推理
351  * @tc.desc   : [C- SOFTWARE -0200]
352  */
353 HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_1000, Function | MediumTest | Level1)
354 {
355     bool isModelCacheSupported = false;
356     EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isModelCacheSupported));
357     if (!isModelCacheSupported) {
358         GTEST_SKIP() << "Model cache is not supported.";
359     }
360 
361     OH_NNModel *model = nullptr;
362     HDICommon::BuildAddGraph(&model);
363     ASSERT_NE(model, nullptr);
364 
365     V1_0::Model *iModel = nullptr;
366     V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
367     ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
368     // model config
369     V1_0::ModelConfig modelConfig = {
370         .enableFloat16 = false, .mode = V1_0::PERFORMANCE_HIGH, .priority = V1_0::PRIORITY_HIGH};
371     // prepared model
372     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
373     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
374     // export model cache
375     std::vector<V1_0::SharedBuffer> modelCache;
376     EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
377     // prepared model from cache
378     OHOS::sptr<V1_0::IPreparedModel> iPreparedModel1;
379     EXPECT_EQ(HDF_SUCCESS, device_->PrepareModelFromModelCache(modelCache, modelConfig, iPreparedModel1));
380 
381     std::vector<V1_0::IOTensor> inputs;
382     std::vector<V1_0::IOTensor> outputs;
383     std::vector<std::vector<int32_t>> outputsDims;
384     std::vector<bool> isOutputBufferEnough;
385     vector<void* > mapedMemorys;
386 
387     // set inputs
388     std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
389     for (uint32_t i = 0; i < inputValue.size(); i++) {
390         std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
391         auto tensor = HDICommon::CreateIOTensor(device_);
392         auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
393         mapedMemorys.emplace_back(memAddress);
394         // set input data
395         HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
396         inputs.emplace_back(tensor);
397     }
398     // set outputs
399     auto outputTensor = HDICommon::CreateIOTensor(device_);
400     outputs.emplace_back(outputTensor);
401     // model run
402     EXPECT_EQ(HDF_SUCCESS, iPreparedModel1->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
403 
404     // map memory to get output buffer
405     auto memAddress = HDICommon::MapMemory(outputs[0].data.fd, ADDEND_BUFFER_LENGTH);
406     mapedMemorys.emplace_back(memAddress);
407 
408     auto buffer = (float *)memAddress;
409     std::vector<float> expectValue(ADDEND_DATA_SIZE, ADD_VALUE_RESULT);
410     std::vector<float> outputValue(buffer, buffer + ADDEND_DATA_SIZE);
411     // check output
412     EXPECT_TRUE(CheckExpectOutput(outputValue, expectValue)) << "output value check failed.";
413 
414     // release
415     if (tensorBuffer.fd != -1) {
416         EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
417     }
418     HDICommon::ReleaseBufferOfTensors(device_, inputs);
419     HDICommon::ReleaseBufferOfTensors(device_, outputs);
420     HDICommon::UnmapAllMemory(mapedMemorys);
421 }
422