• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "frameworks/native/ops/maxpool_builder.h"
17 
18 #include "ops_test.h"
19 
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23 
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class MaxPoolBuilderTest : public OpsTest {
28 public:
29     void SetUp() override;
30     void TearDown() override;
31 
32     void SetPadMode(OH_NN_DataType dataType,
33         const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34     void SetParam();
35 
36 public:
37     MaxPoolBuilder m_builder;
38     std::vector<uint32_t> m_inputs{0};
39     std::vector<uint32_t> m_outputs{1};
40     std::vector<uint32_t> m_params{2, 3, 4, 5};
41     std::vector<int32_t> m_input_dim{1, 3, 3, 1};
42     std::vector<int32_t> m_output_dim{1, 2, 2, 1};
43     std::vector<int32_t> m_kenelsize_dim{2};
44     std::vector<int32_t> m_stride_dim{2};
45     std::vector<int32_t> m_param_dim{};
46 };
47 
SetUp()48 void MaxPoolBuilderTest::SetUp() {}
49 
TearDown()50 void MaxPoolBuilderTest::TearDown() {}
51 
SetPadMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)52 void MaxPoolBuilderTest::SetPadMode(OH_NN_DataType dataType,
53     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
54 {
55     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
56     int8_t* padModeValue = new (std::nothrow) int8_t(0);
57     EXPECT_NE(nullptr, padModeValue);
58     tensor->SetBuffer(padModeValue, sizeof(int8_t));
59     m_allTensors.emplace_back(tensor);
60 }
61 
SetParam()62 void MaxPoolBuilderTest::SetParam()
63 {
64     SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
65     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
66     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
67     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
68 }
69 
70 /**
71  * @tc.name: maxpool_build_pad_mode_001
72  * @tc.desc: Verify the success of the build function
73  * @tc.type: FUNC
74  */
75 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_001, TestSize.Level1)
76 {
77     m_paramsIndex = m_params;
78     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
79     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
80     SetParam();
81     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
82 }
83 
84 /**
85  * @tc.name: maxpool_build_pad_mode_002
86  * @tc.desc: Verify the forbidden of the build function
87  * @tc.type: FUNC
88  */
89 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_002, TestSize.Level1)
90 {
91     m_paramsIndex = m_params;
92     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
93     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
94     SetParam();
95     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
96     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
97 }
98 
99 /**
100  * @tc.name: maxpool_build_pad_mode_003
101  * @tc.desc: Verify the missing input of the build function
102  * @tc.type: FUNC
103  */
104 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_003, TestSize.Level1)
105 {
106     m_inputs = {};
107     m_outputs = {0};
108     m_params = {1, 2, 3, 4};
109     m_paramsIndex = m_params;
110 
111     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
112     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
113     SetParam();
114     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
115 }
116 
117 /**
118  * @tc.name: maxpool_build_pad_mode_004
119  * @tc.desc: Verify the missing output of the build function
120  * @tc.type: FUNC
121  */
122 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_004, TestSize.Level1)
123 {
124     m_inputs = {0};
125     m_outputs = {};
126     m_params = {1, 2, 3, 4};
127     m_paramsIndex = m_params;
128 
129     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
130     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
131     SetParam();
132     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
133 }
134 
135 /**
136  * @tc.name: maxpool_build_pad_mode_005
137  * @tc.desc: Verify the inputIndex out of bounds of the build function
138  * @tc.type: FUNC
139  */
140 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_005, TestSize.Level1)
141 {
142     m_inputs = {6};
143     m_outputs = {1};
144     m_params = {2, 3, 4, 5};
145     m_paramsIndex = m_params;
146 
147     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
148     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
149     SetParam();
150     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
151 }
152 
153 /**
154  * @tc.name: maxpool_build_pad_mode_006
155  * @tc.desc: Verify the outputIndex out of bounds of the build function
156  * @tc.type: FUNC
157  */
158 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_006, TestSize.Level1)
159 {
160     m_inputs = {0};
161     m_outputs = {6};
162     m_params = {2, 3, 4, 5};
163     m_paramsIndex = m_params;
164 
165     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
166     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
167     SetParam();
168     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
169 }
170 
171 /**
172  * @tc.name: maxpool_build_pad_mode_007
173  * @tc.desc: Verify the invalid kernelSize of the build function
174  * @tc.type: FUNC
175  */
176 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_007, TestSize.Level1)
177 {
178     m_paramsIndex = m_params;
179     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
180     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
181 
182     int32_t kernelsNum{2};
183     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
184         OH_NN_MAX_POOL_KERNEL_SIZE);
185     int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1};
186     EXPECT_NE(nullptr, kernelSizeValue);
187     tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum);
188     m_allTensors.emplace_back(tensor);
189 
190     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
191     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
192     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
193     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
194 }
195 
196 /**
197  * @tc.name: maxpool_build_pad_mode_008
198  * @tc.desc: Verify the invalid stride of the build function
199  * @tc.type: FUNC
200  */
201 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_008, TestSize.Level1)
202 {
203     m_paramsIndex = m_params;
204     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
205     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
206 
207     SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
208     int32_t strideNum{2};
209     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
210     int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1};
211     EXPECT_NE(nullptr, strideValue);
212 
213     tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum);
214     m_allTensors.emplace_back(tensor);
215     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
216     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
217     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
218 }
219 
220 /**
221  * @tc.name: maxpool_build_pad_mode_009
222  * @tc.desc: Verify the invalid padmode of the build function
223  * @tc.type: FUNC
224  */
225 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_009, TestSize.Level1)
226 {
227     m_paramsIndex = m_params;
228     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
229     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
230 
231     SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
232     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
233     int32_t *padValueTest = new (std::nothrow) int32_t(0);
234     EXPECT_NE(nullptr, padValueTest);
235 
236     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD);
237     tensor->SetBuffer(padValueTest, sizeof(int32_t));
238     m_allTensors.emplace_back(tensor);
239     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
240     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
241 }
242 
243 
244 /**
245  * @tc.name: maxpool_build_pad_mode_010
246  * @tc.desc: Verify the invalid activation of the build function
247  * @tc.type: FUNC
248  */
249 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_010, TestSize.Level1)
250 {
251     m_paramsIndex = m_params;
252     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
253     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
254     SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
255     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
256     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
257 
258     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
259         OH_NN_MAX_POOL_ACTIVATION_TYPE);
260     int32_t* activationValue = new (std::nothrow) int32_t(0);
261     EXPECT_NE(nullptr, activationValue);
262 
263     tensor->SetBuffer(activationValue, sizeof(int32_t));
264     m_allTensors.emplace_back(tensor);
265     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
266 }
267 
268 /**
269  * @tc.name: maxpool_build_pad_mode_011
270  * @tc.desc: Verify the scalar length of the build function
271  * @tc.type: FUNC
272  */
273 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_011, TestSize.Level1)
274 {
275     m_param_dim = {2};
276     m_paramsIndex = m_params;
277     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
278     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
279 
280     SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
281     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
282     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
283     int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
284     EXPECT_NE(nullptr, activationValue);
285 
286     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
287         OH_NN_MAX_POOL_ACTIVATION_TYPE);
288     tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
289     m_allTensors.emplace_back(tensor);
290     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
291 }
292 
293 /**
294  * @tc.name: maxpool_getprimitive_pad_mode_001
295  * @tc.desc: Verify the behavior of the GetPrimitive function
296  * @tc.type: FUNC
297  */
298 HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_001, TestSize.Level1)
299 {
300     m_paramsIndex = m_params;
301     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
302     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
303 
304     SetParam();
305     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
306     LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
307     LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
308     EXPECT_NE(expectPrimitive, primitive);
309 
310     std::vector<int64_t> returnKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get());
311     std::vector<int64_t> kernelSizeValueTest{1, 1};
312     EXPECT_EQ(kernelSizeValueTest, returnKernelSize);
313 
314     std::vector<int64_t> returnStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get());
315     std::vector<int64_t> strideValueTest{1, 1};
316     int returnPadMode = mindspore::lite::MindIR_MaxPoolFusion_GetPadMode(primitive.get());
317     EXPECT_EQ(1, returnPadMode);
318 
319     int returnActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get());
320     EXPECT_EQ(0, returnActivation);
321 }
322 
323 /**
324  * @tc.name: maxpool_getprimitive_pad_mode_002
325  * @tc.desc: Verify the behavior of the GetPrimitive function
326  * @tc.type: FUNC
327  */
328 HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_002, TestSize.Level1)
329 {
330     m_paramsIndex = m_params;
331     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
332     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
333 
334     SetParam();
335     LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
336     LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
337     EXPECT_EQ(expectPrimitive, primitive);
338 }
339 } // namespace UnitTest
340 } // namespace NeuralNetworkRuntime
341 } // namespace OHOS