1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <cstdlib>
17 #include <fstream>
18 #include <iostream>
19 #include <string>
20 #include <thread>
21
22 #include "securec.h"
23
24 #include "test/system_test/common/nnrt_test.h"
25
26 namespace OHOS {
27 namespace NeuralNetworkRuntime {
28 namespace SystemTest {
29 constexpr int TMP_LENGTH = 32;
30 constexpr int PATH_LENGTH = 255;
31 constexpr int STRESS_COUNT = 10000000;
32 const float EPSILON = 1e-4;
33 const uint32_t NO_DEVICE_COUNT = 0;
34 const uint32_t ADDEND_DATA_LENGTH = 12 * sizeof(float);
35 const std::string VMRSS = "VmSize:";
36
37 class StressTest : public NNRtTest {
38 public:
39 StressTest() = default;
40 };
41
GetVMRSS(pid_t pid)42 std::string GetVMRSS(pid_t pid)
43 {
44 std::string fileName{"/proc/"};
45 fileName += std::to_string(pid) + "/status";
46 std::ifstream ifs(fileName, std::ios::binary);
47 if (!ifs.is_open()) {
48 std::cout << "Failed to open " << fileName << std::endl;
49 return "";
50 }
51
52 std::string vmRss;
53 // Extract physical memory use from process status.
54 while (!ifs.eof()) {
55 getline(ifs, vmRss);
56 // Compare the first seven characters, which is "VmSize:".
57 if (vmRss.compare(0, 7, VMRSS) == 0) {
58 break;
59 }
60 }
61 ifs.close();
62
63 time_t t = time(nullptr);
64 char tmp[TMP_LENGTH] {' '};
65 strftime(&(tmp[1]), TMP_LENGTH * sizeof(char), "%Y-%m-%d %H:%M:%S", localtime(&t));
66
67 return vmRss + tmp;
68 }
69
PrintVMRSS(pid_t pid)70 void PrintVMRSS(pid_t pid)
71 {
72 char path[PATH_LENGTH];
73 if (!getcwd(path, PATH_LENGTH)) {
74 std::cout << "Failed to get current path" << std::endl;
75 return;
76 }
77 std::string pathStr = path;
78 std::string pathFull = pathStr + "/RealtimeVMRSS_" + std::to_string(pid) + ".txt";
79
80 std::ofstream out(pathFull, std::ios::app);
81 if (!out.is_open()) {
82 std::cout << "Some error occurs" << std::endl;
83 return;
84 }
85
86 while (true) {
87 std::string rss = GetVMRSS(pid);
88 if (rss.empty()) {
89 std::cout << "Some error occurs" << std::endl;
90 out.close();
91 return;
92 }
93
94 out << rss << std::endl;
95 sleep(1);
96 }
97 }
98
99 /*
100 * @tc.name: stress_test_001
101 * @tc.desc: Check memory leak by repeatly implement end-to-end execution.
102 * @tc.type: FUNC
103 */
104 HWTEST_F(StressTest, stress_test_001, testing::ext::TestSize.Level1)
105 {
106 std::cout << "Start RunDoubleConvStressTest test cast." << std::endl;
107
108 pid_t pidOfStressTest = getpid();
109 std::thread thread(PrintVMRSS, pidOfStressTest);
110
111 size_t targetDevice{0};
112
113 int8_t activationValue{0};
114 CppQuantParam quantParam{{}, {}, {}};
115 CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
116 CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
117 CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
118 CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
119 std::vector<CppTensor> tensors{addend1, addend2, activation, output};
120
121 std::vector<float> firstAddendValue(12, 1.23);
122 std::vector<float> secondAddendValue(12, 2.34);
123 float outputBuffer[12];
124 std::vector<float> expectedOutput(12, 3.57);
125
126 for (int i = 0; i < STRESS_COUNT; i++) {
127 tensors = {addend1, addend2, activation, output};
128
129 m_model = OH_NNModel_Construct();
130 ASSERT_NE(nullptr, m_model);
131 ASSERT_EQ(OH_NN_SUCCESS, AddTensors(tensors));
132 ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3}));
133 ASSERT_EQ(OH_NN_SUCCESS, SpecifyInputAndOutput({0, 1}, {3}));
134 ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(m_model));
135
136 m_compilation = OH_NNCompilation_Construct(m_model);
137 ASSERT_NE(nullptr, m_compilation);
138 OH_NNModel_Destroy(&m_model);
139 ASSERT_EQ(nullptr, m_model);
140
141 ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
142 ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
143 targetDevice = m_devices[0]; // Use the first device in system test.
144 ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
145 ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
146
147 m_executor = OH_NNExecutor_Construct(m_compilation);
148 ASSERT_NE(nullptr, m_executor);
149 OH_NNCompilation_Destroy(&m_compilation);
150 ASSERT_EQ(nullptr, m_compilation);
151
152 // Set value of firstAddend
153 ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH));
154
155 // Set value of secondAddend
156 ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH));
157
158 // Set output buffer of output
159 ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH));
160
161 // Run inference and assert output value
162 ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
163 for (int j = 0; j < 12; j++) {
164 ASSERT_LE(std::abs(outputBuffer[j]-expectedOutput[j]), EPSILON);
165 }
166
167 OH_NNExecutor_Destroy(&m_executor);
168 ASSERT_EQ(nullptr, m_executor);
169
170 m_tensors.clear();
171 m_quantParams.clear();
172 m_nodes.clear();
173 m_inputs.clear();
174 m_outputs.clear();
175 m_devices.clear();
176
177 if (i % 1000 == 0) {
178 std::cout << "Execute " << i << "times." << std::endl;
179 }
180 }
181 thread.join();
182 }
183 } // namespace SystemTest
184 } // NeuralNetworkRuntime
185 } // OHOS