1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "frameworks/native/ops/avgpool_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class AvgPoolBuilderTest : public OpsTest {
28 public:
29 void SetUp() override;
30 void TearDown() override;
31
32 void SetPadMode(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetParams();
35
36 public:
37 AvgPoolBuilder m_builder;
38 std::vector<uint32_t> m_inputs{0};
39 std::vector<uint32_t> m_outputs{1};
40 std::vector<uint32_t> m_params{2, 3, 4, 5};
41 std::vector<int32_t> m_input_dim{1, 3, 3, 1};
42 std::vector<int32_t> m_output_dim{1, 2, 2, 1};
43 std::vector<int32_t> m_kenelsize_dim{2};
44 std::vector<int32_t> m_stride_dim{2};
45 std::vector<int32_t> m_param_dim{};
46 };
47
SetUp()48 void AvgPoolBuilderTest::SetUp() {}
49
TearDown()50 void AvgPoolBuilderTest::TearDown() {}
51
SetPadMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)52 void AvgPoolBuilderTest::SetPadMode(OH_NN_DataType dataType,
53 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
54 {
55 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
56 int8_t* padModeValue = new (std::nothrow) int8_t(0);
57 EXPECT_NE(nullptr, padModeValue);
58 tensor->SetBuffer(padModeValue, sizeof(int8_t));
59 m_allTensors.emplace_back(tensor);
60 }
61
SetParams()62 void AvgPoolBuilderTest::SetParams()
63 {
64 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
65 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
66 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
67 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
68 }
69
70 /**
71 * @tc.name: avgpool_build_pad_mode_001
72 * @tc.desc: Verify the success of the build function
73 * @tc.type: FUNC
74 */
75 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_001, TestSize.Level1)
76 {
77 m_paramsIndex = m_params;
78 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
79 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
80
81 SetParams();
82 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
83 }
84
85 /**
86 * @tc.name: avgpool_build_pad_mode_002
87 * @tc.desc: Verify the forbidden of the build function
88 * @tc.type: FUNC
89 */
90 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_002, TestSize.Level1)
91 {
92 m_paramsIndex = m_params;
93 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
94 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
95
96 SetParams();
97 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
98 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
99 }
100
101 /**
102 * @tc.name: avgpool_build_pad_mode_003
103 * @tc.desc: Verify the missing input of the build function
104 * @tc.type: FUNC
105 */
106 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_003, TestSize.Level1)
107 {
108 m_inputs = {};
109 m_outputs = {0};
110 m_params = {1, 2, 3, 4};
111 m_paramsIndex = m_params;
112
113 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
114 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
115
116 SetParams();
117 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
118 }
119
120 /**
121 * @tc.name: avgpool_build_pad_mode_004
122 * @tc.desc: Verify the missing output of the build function
123 * @tc.type: FUNC
124 */
125 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_004, TestSize.Level1)
126 {
127 m_inputs = {0};
128 m_outputs = {};
129 m_params = {1, 2, 3, 4};
130 m_paramsIndex = m_params;
131
132 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
133 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
134
135 SetParams();
136 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
137 }
138
139 /**
140 * @tc.name: avgpool_build_pad_mode_005
141 * @tc.desc: Verify the inputIndex out of bounds of the build function
142 * @tc.type: FUNC
143 */
144 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_005, TestSize.Level1)
145 {
146 m_inputs = {6};
147 m_outputs = {1};
148 m_params = {2, 3, 4, 5};
149 m_paramsIndex = m_params;
150
151 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
152 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
153
154 SetParams();
155 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
156 }
157
158 /**
159 * @tc.name: avgpool_build_pad_mode_006
160 * @tc.desc: Verify the outputIndex out of bounds of the build function
161 * @tc.type: FUNC
162 */
163 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_006, TestSize.Level1)
164 {
165 m_inputs = {0};
166 m_outputs = {6};
167 m_params = {2, 3, 4, 5};
168 m_paramsIndex = m_params;
169
170 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
171 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
172
173 SetParams();
174 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
175 }
176
177 /**
178 * @tc.name: avgpool_build_pad_mode_007
179 * @tc.desc: Verify the invalid kernelSize of the build function
180 * @tc.type: FUNC
181 */
182 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_007, TestSize.Level1)
183 {
184 m_paramsIndex = m_params;
185 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
186 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
187
188 int32_t kernelsNum{2};
189 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
190 OH_NN_AVG_POOL_KERNEL_SIZE);
191 int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1};
192 EXPECT_NE(nullptr, kernelSizeValue);
193
194 tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum);
195 m_allTensors.emplace_back(tensor);
196 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
197 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
198 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
199 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
200 }
201
202 /**
203 * @tc.name: avgpool_build_pad_mode_008
204 * @tc.desc: Verify the invalid stride of the build function
205 * @tc.type: FUNC
206 */
207 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_008, TestSize.Level1)
208 {
209 m_paramsIndex = m_params;
210 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
211 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
212 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
213
214 int32_t strideNum{2};
215 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
216 int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1};
217 EXPECT_NE(nullptr, strideValue);
218
219 tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum);
220 m_allTensors.emplace_back(tensor);
221
222 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
223 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
224 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
225 }
226
227 /**
228 * @tc.name: avgpool_build_pad_mode_009
229 * @tc.desc: Verify the invalid padmode of the build function
230 * @tc.type: FUNC
231 */
232 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_009, TestSize.Level1)
233 {
234 m_paramsIndex = m_params;
235 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
236 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
237
238 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
239 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
240
241 int32_t *padValueTest = new (std::nothrow) int32_t(0);
242 EXPECT_NE(nullptr, padValueTest);
243 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
244 tensor->SetBuffer(padValueTest, sizeof(int32_t));
245 m_allTensors.emplace_back(tensor);
246
247 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
248 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
249 }
250
251
252 /**
253 * @tc.name: avgpool_build_pad_mode_010
254 * @tc.desc: Verify the invalid activation type of the build function
255 * @tc.type: FUNC
256 */
257 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_010, TestSize.Level1)
258 {
259 m_paramsIndex = m_params;
260 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
261 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
262
263 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
264 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
265 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
266 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
267 OH_NN_AVG_POOL_ACTIVATION_TYPE);
268 int32_t* activationValue = new (std::nothrow) int32_t(0);
269 EXPECT_NE(nullptr, activationValue);
270
271 tensor->SetBuffer(activationValue, sizeof(int32_t));
272 m_allTensors.emplace_back(tensor);
273 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
274 }
275
276 /**
277 * @tc.name: avgpool_build_pad_mode_011
278 * @tc.desc: Verify the scalar length of the build function
279 * @tc.type: FUNC
280 */
281 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_011, TestSize.Level1)
282 {
283 m_param_dim = {2};
284 m_paramsIndex = m_params;
285 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
286 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
287
288 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
289 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
290 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
291 int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
292 EXPECT_NE(nullptr, activationValue);
293
294 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
295 OH_NN_AVG_POOL_ACTIVATION_TYPE);
296 tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
297 m_allTensors.emplace_back(tensor);
298 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
299 }
300
301 /**
302 * @tc.name: avgpool_build_pad_mode_012
303 * @tc.desc: Verify the param invalid to avgpool of the build function
304 * @tc.type: FUNC
305 */
306 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_012, TestSize.Level1)
307 {
308 m_paramsIndex = m_params;
309 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
310 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
311
312 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
313 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
314 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
315 int8_t* activationValue = new (std::nothrow) int8_t(0);
316 EXPECT_NE(nullptr, activationValue);
317
318 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
319 OH_NN_DIV_ACTIVATIONTYPE);
320 tensor->SetBuffer(activationValue, sizeof(int8_t));
321 m_allTensors.emplace_back(tensor);
322 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
323 }
324
325 /**
326 * @tc.name: avgpool_build_pad_mode_013
327 * @tc.desc: Verify the invalid padmode of the build function
328 * @tc.type: FUNC
329 */
330 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_013, TestSize.Level1)
331 {
332 m_paramsIndex = m_params;
333 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
334 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
335
336 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
337 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
338 int8_t *padValueTest = new (std::nothrow) int8_t(6);
339 EXPECT_NE(nullptr, padValueTest);
340
341 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
342 tensor->SetBuffer(padValueTest, sizeof(int8_t));
343 m_allTensors.emplace_back(tensor);
344 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
345 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
346 }
347
348 /**
349 * @tc.name: avgpool_build_pad_mode_014
350 * @tc.desc: Verify the invalid activation value of the build function
351 * @tc.type: FUNC
352 */
353 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_014, TestSize.Level1)
354 {
355 m_paramsIndex = m_params;
356 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
357 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
358
359 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
360 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
361 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
362
363 int8_t* activationValue = new (std::nothrow) int8_t(6);
364 EXPECT_NE(nullptr, activationValue);
365 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
366 OH_NN_AVG_POOL_ACTIVATION_TYPE);
367 tensor->SetBuffer(activationValue, sizeof(int8_t));
368 m_allTensors.emplace_back(tensor);
369
370 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
371 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
372 }
373
374 /**
375 * @tc.name: avgpool_getprimitive_pad_mode_001
376 * @tc.desc: Verify the behavior of the GetPrimitive function
377 * @tc.type: FUNC
378 */
379 HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_001, TestSize.Level1)
380 {
381 m_paramsIndex = m_params;
382 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
383 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
384
385 SetParams();
386 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
387 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
388 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
389 EXPECT_NE(expectPrimitive, primitive);
390
391 std::vector<int64_t> returnKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get());
392 std::vector<int64_t> kernelSizeValueTest{1, 1};
393 EXPECT_EQ(kernelSizeValueTest, returnKernelSize);
394
395 std::vector<int64_t> returnStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get());
396 std::vector<int64_t> strideValueTest{1, 1};
397 int returnPadMode = mindspore::lite::MindIR_AvgPoolFusion_GetPadMode(primitive.get());
398 EXPECT_EQ(1, returnPadMode);
399 int returnActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get());
400 EXPECT_EQ(0, returnActivation);
401 }
402
403 /**
404 * @tc.name: avgpool_getprimitive_pad_mode_002
405 * @tc.desc: Verify the behavior of the GetPrimitive function
406 * @tc.type: FUNC
407 */
408 HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_002, TestSize.Level1)
409 {
410 m_paramsIndex = m_params;
411 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
412 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
413 SetParams();
414 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
415 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
416 EXPECT_EQ(expectPrimitive, primitive);
417 }
418 } // namespace UnitTest
419 } // namespace NeuralNetworkRuntime
420 } // namespace OHOS