1 /*
2 * Copyright (c) 2022-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <sstream>
17 #include <vector>
18 #include <thread>
19 #include <v1_0/nnrt_types.h>
20 #include <v1_0/innrt_device.h>
21 #include <v1_0/iprepared_model.h>
22
23 #include "gtest/gtest.h"
24 #include "mindir.h"
25 #include "mindir_lite_graph.h"
26
27 #include "interfaces/kits/c/neural_network_runtime.h"
28 #include "frameworks/native/memory_manager.h"
29 #include "common/hdi_nnrt_test_utils.h"
30 #include "common/hdi_nnrt_test.h"
31
32 using namespace std;
33 using namespace testing::ext;
34 using namespace OHOS::NeuralNetworkRuntime;
35 using namespace OHOS::NeuralNetworkRuntime::Test;
36
37 namespace {
38
39 // number of thread to create
40 const int THREAD_NUM = 3;
41
42 // number of times to run
43 const int STRESS_COUNT = 100000;
44
45 // number of print frequency
46 const int PRINT_FREQ = 500;
47
48 class StabilityTest : public HDINNRtTest {};
49
PrepareModelTest(OHOS::sptr<V1_0::INnrtDevice> device,V1_0::Model * iModel)50 void PrepareModelTest(OHOS::sptr<V1_0::INnrtDevice> device, V1_0::Model *iModel)
51 {
52 OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
53 V1_0::ModelConfig config;
54 EXPECT_EQ(HDF_SUCCESS, device->PrepareModel(*iModel, config, iPreparedModel));
55 }
56
RunModelTest(OHOS::sptr<V1_0::INnrtDevice> device,OHOS::sptr<V1_0::IPreparedModel> iPreparedModel)57 void RunModelTest(OHOS::sptr<V1_0::INnrtDevice> device, OHOS::sptr<V1_0::IPreparedModel> iPreparedModel)
58 {
59 std::vector<V1_0::IOTensor> inputs;
60 std::vector<V1_0::IOTensor> outputs;
61 std::vector<std::vector<int32_t>> outputsDims;
62 std::vector<bool> isOutputBufferEnough;
63 std::vector<void* > mapedMemorys;
64
65 // set inputs
66 std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
67 for (uint32_t i = 0; i < inputValue.size(); i++) {
68 std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
69
70 auto tensor = HDICommon::CreateIOTensor(device);
71 auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
72 mapedMemorys.emplace_back(memAddress);
73 // set input data
74 HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
75 inputs.emplace_back(tensor);
76 }
77 // set outputs
78 auto outputTensor = HDICommon::CreateIOTensor(device);
79 outputs.emplace_back(outputTensor);
80 // model run
81 EXPECT_EQ(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
82
83 // map memory to get output buffer
84 auto memAddress = HDICommon::MapMemory(outputs[0].data.fd, ADDEND_BUFFER_LENGTH);
85 mapedMemorys.emplace_back(memAddress);
86
87 auto buffer = (float *)memAddress;
88 std::vector<float> expectValue(ADDEND_DATA_SIZE, ADD_VALUE_RESULT);
89 std::vector<float> outputValue(buffer, buffer + ADDEND_DATA_SIZE);
90 // check output
91 EXPECT_TRUE(CheckExpectOutput(outputValue, expectValue)) << "output value check failed.";
92 }
93
94 } // namespace
95
96 /**
97 * @tc.number : SUB_AI_NNRt_Reliability_South_Stress_0100
98 * @tc.name : 多线程并发模型编译,编译成功
99 * @tc.desc : [C- SOFTWARE -0200]
100 */
101 HWTEST_F(StabilityTest, SUB_AI_NNRt_Reliability_South_Stress_0100, Reliability | MediumTest | Level2)
102 {
103 OHOS::sptr<V1_0::INnrtDevice> device = V1_0::INnrtDevice::Get();
104 std::vector<V1_0::Model *> iModels;
105 std::vector<OHOS::sptr<V1_0::IPreparedModel>> iPreparedModels;
106 std::vector<V1_0::SharedBuffer> tensorBuffers;
107 for (int i = 0; i < THREAD_NUM; i++) {
108 // build graph with NNModel
109 OH_NNModel *model = nullptr;
110 HDICommon::BuildAddGraph(&model);
111 // convert NNModel to V1_0::Model
112 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
113 V1_0::Model *iModel = nullptr;
114 auto retConvert = HDICommon::ConvertModel(device, model, tensorBuffer, &iModel);
115 EXPECT_EQ(OH_NN_SUCCESS, retConvert) << "ConvertModel failed";
116 if (retConvert != OH_NN_SUCCESS) {
117 break;
118 }
119 iModels.emplace_back(iModel);
120 tensorBuffers.emplace_back(tensorBuffer);
121 }
122 for (int i = 0; i < STRESS_COUNT; i++) {
123 // create threads to prepare model
124 std::vector<std::thread> threads;
125 for (auto &iModel : iModels) {
126 threads.emplace_back(std::thread(PrepareModelTest, device, iModel));
127 }
128 // wait for thread finish
129 for (auto &th : threads) {
130 th.join();
131 }
132 if (i % PRINT_FREQ == 0) {
133 printf("[NnrtTest] SUB_AI_NNRt_Reliability_South_Stress_0100 times: %d/%d\n", i, STRESS_COUNT);
134 }
135 }
136 for (int i=0; i< iModels.size(); i++) {
137 mindspore::lite::MindIR_Model_Destroy(&iModels[i]);
138 if (tensorBuffers[i].fd != -1) {
139 EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffers[i]));
140 }
141 }
142 }
143
144 /**
145 * @tc.number : SUB_AI_NNR_Reliability_South_Stress_0200
146 * @tc.name : 模型推理多线程并发长稳测试
147 * @tc.desc : [C- SOFTWARE -0200]
148 */
149 HWTEST_F(StabilityTest, SUB_AI_NNR_Reliability_South_Stress_0200, Reliability | MediumTest | Level2)
150 {
151 OHOS::sptr<V1_0::INnrtDevice> device = V1_0::INnrtDevice::Get();
152
153 std::vector<V1_0::Model *> iModels;
154 std::vector<OHOS::sptr<V1_0::IPreparedModel>> iPreparedModels;
155 std::vector<V1_0::SharedBuffer> tensorBuffers;
156 for (int i = 0; i < THREAD_NUM; i++) {
157 // build graph with NNModel
158 OH_NNModel *model = nullptr;
159 HDICommon::BuildAddGraph(&model);
160 // convert NNModel to V1_0::Model
161 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
162 V1_0::Model *iModel = nullptr;
163 auto retConvert = HDICommon::ConvertModel(device, model, tensorBuffer, &iModel);
164 EXPECT_EQ(OH_NN_SUCCESS, retConvert) << "ConvertModel failed";
165 if (retConvert != OH_NN_SUCCESS) {
166 break;
167 }
168 iModels.emplace_back(iModel);
169 tensorBuffers.emplace_back(tensorBuffer);
170 // prepare model
171 OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
172 V1_0::ModelConfig config = {
173 .enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
174 auto retPrepare = device->PrepareModel(*iModel, config, iPreparedModel);
175 EXPECT_EQ(HDF_SUCCESS, retPrepare) << "PrepareModel failed";
176 if (retPrepare != HDF_SUCCESS) {
177 break;
178 }
179 iPreparedModels.emplace_back(iPreparedModel);
180 }
181 for (int i = 0; i < STRESS_COUNT; i++) {
182 // create threads to run model
183 std::vector<std::thread> threads;
184 for (auto &iPreparedModel : iPreparedModels) {
185 threads.emplace_back(std::thread(RunModelTest, device, iPreparedModel));
186 }
187 // wait for thread finish
188 for (auto &th : threads) {
189 th.join();
190 }
191 if (i % PRINT_FREQ == 0) {
192 printf("[NnrtTest] SUB_AI_NNRt_Reliability_South_Stress_0200 times: %d/%d\n", i, STRESS_COUNT);
193 }
194 }
195 for (size_t i=0; i< iModels.size(); i++) {
196 mindspore::lite::MindIR_Model_Destroy(&iModels[i]);
197 if (tensorBuffers[i].fd != -1) {
198 EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffers[i]));
199 }
200 }
201 }