1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "frameworks/native/ops/maxpool_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class MaxPoolPadBuilderTest : public OpsTest {
28 public:
29 void SetUp();
30 void TearDown();
31
32 void SetPad(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetPadParam();
35
36 public:
37 MaxPoolBuilder m_builder;
38 std::vector<uint32_t> m_inputs{0};
39 std::vector<uint32_t> m_outputs{1};
40 std::vector<uint32_t> m_params{2, 3, 4, 5};
41 std::vector<int32_t> m_input_dim{1, 3, 3, 1};
42 std::vector<int32_t> m_output_dim{1, 2, 2, 1};
43 std::vector<int32_t> m_kenelsize_dim{2};
44 std::vector<int32_t> m_stride_dim{2};
45 std::vector<int32_t> m_pad_dim{4};
46 std::vector<int32_t> m_param_dim{};
47 };
48
SetUp()49 void MaxPoolPadBuilderTest::SetUp() {}
50
TearDown()51 void MaxPoolPadBuilderTest::TearDown() {}
52
SetPad(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)53 void MaxPoolPadBuilderTest::SetPad(OH_NN_DataType dataType,
54 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
55 {
56 int32_t padNum{4};
57 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
58 int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0};
59 EXPECT_NE(nullptr, padValue);
60
61 tensor->SetBuffer(padValue, sizeof(int64_t) * padNum);
62 m_allTensors.emplace_back(tensor);
63 }
64
SetPadParam()65 void MaxPoolPadBuilderTest::SetPadParam()
66 {
67 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
68 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
69 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
70 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
71 }
72
73 /**
74 * @tc.name: maxpool_build_pad_001
75 * @tc.desc: Verify the success of the build function
76 * @tc.type: FUNC
77 */
78 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_001, TestSize.Level1)
79 {
80 m_paramsIndex = m_params;
81 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
82 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
83
84 SetPadParam();
85 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
86 }
87
88 /**
89 * @tc.name: maxpool_build_pad_002
90 * @tc.desc: Verify the forbidden of the build function
91 * @tc.type: FUNC
92 */
93 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_002, TestSize.Level1)
94 {
95 m_paramsIndex = m_params;
96 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
97 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
98
99 SetPadParam();
100 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
101 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
102 }
103
104 /**
105 * @tc.name: maxpool_build_pad_003
106 * @tc.desc: Verify the missing input of the build function
107 * @tc.type: FUNC
108 */
109 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_003, TestSize.Level1)
110 {
111 m_inputs = {};
112 m_outputs = {0};
113 m_params = {1, 2, 3, 4};
114 m_paramsIndex = m_params;
115 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
116 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
117
118 SetPadParam();
119 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
120 }
121
122 /**
123 * @tc.name: maxpool_build_pad_004
124 * @tc.desc: Verify the missing output of the build function
125 * @tc.type: FUNC
126 */
127 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_004, TestSize.Level1)
128 {
129 m_inputs = {0};
130 m_outputs = {};
131 m_params = {1, 2, 3, 4};
132 m_paramsIndex = m_params;
133 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
134 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
135
136 SetPadParam();
137 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
138 }
139
140 /**
141 * @tc.name: maxpool_build_pad_005
142 * @tc.desc: Verify the inputIndex out of bounds of the build function
143 * @tc.type: FUNC
144 */
145 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_005, TestSize.Level1)
146 {
147 m_inputs = {6};
148 m_outputs = {1};
149 m_params = {2, 3, 4, 5};
150 m_paramsIndex = m_params;
151 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
152 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
153
154 SetPadParam();
155 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
156 }
157
158 /**
159 * @tc.name: maxpool_build_pad_006
160 * @tc.desc: Verify the outputIndex out of bounds of the build function
161 * @tc.type: FUNC
162 */
163 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_006, TestSize.Level1)
164 {
165 m_inputs = {0};
166 m_outputs = {6};
167 m_params = {2, 3, 4, 5};
168 m_paramsIndex = m_params;
169 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
170 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
171
172 SetPadParam();
173 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
174 }
175
176 /**
177 * @tc.name: maxpool_build_pad_007
178 * @tc.desc: Verify the invalid kernelSize of the build function
179 * @tc.type: FUNC
180 */
181 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_007, TestSize.Level1)
182 {
183 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
184 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
185
186 int32_t kernelsNum{2};
187 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
188 OH_NN_MAX_POOL_KERNEL_SIZE);
189 int32_t* valueKernelSize = new (std::nothrow) int32_t[kernelsNum]{1, 1};
190 EXPECT_NE(nullptr, valueKernelSize);
191
192 tensor->SetBuffer(valueKernelSize, sizeof(int32_t) * kernelsNum);
193 m_allTensors.emplace_back(tensor);
194
195 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
196 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
197 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
198 m_paramsIndex = m_params;
199 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
200 }
201
202 /**
203 * @tc.name: maxpool_build_pad_008
204 * @tc.desc: Verify the invalid stride of the build function
205 * @tc.type: FUNC
206 */
207 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_008, TestSize.Level1)
208 {
209 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
210 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
211 m_paramsIndex = m_params;
212
213 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
214 int32_t strideNum{2};
215 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
216 int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1};
217 EXPECT_NE(nullptr, strideValue);
218
219 tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum);
220 m_allTensors.emplace_back(tensor);
221 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
222 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
223 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
224 }
225
226 /**
227 * @tc.name: maxpool_build_pad_009
228 * @tc.desc: Verify the invalid pad of the build function
229 * @tc.type: FUNC
230 */
231 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_009, TestSize.Level1)
232 {
233 m_paramsIndex = m_params;
234 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
235 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
236
237 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
238 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
239 int32_t padNum{4};
240 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
241 int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0};
242 EXPECT_NE(nullptr, padValue);
243
244 tensor->SetBuffer(padValue, sizeof(int32_t) * padNum);
245 m_allTensors.emplace_back(tensor);
246 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
247 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
248 }
249
250
251 /**
252 * @tc.name: maxpool_build_pad_010
253 * @tc.desc: Verify the invalid activation of the build function
254 * @tc.type: FUNC
255 */
256 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_010, TestSize.Level1)
257 {
258 m_paramsIndex = m_params;
259 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
260 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
261
262 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
263 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
264 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
265 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
266 OH_NN_MAX_POOL_ACTIVATION_TYPE);
267 int32_t* activationValue = new (std::nothrow) int32_t(0);
268 EXPECT_NE(nullptr, activationValue);
269
270 tensor->SetBuffer(activationValue, sizeof(int32_t));
271 m_allTensors.emplace_back(tensor);
272 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
273 }
274
275 /**
276 * @tc.name: maxpool_build_pad_011
277 * @tc.desc: Verify the activation scalar length of the build function
278 * @tc.type: FUNC
279 */
280 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_011, TestSize.Level1)
281 {
282 m_param_dim = {2};
283 m_paramsIndex = m_params;
284 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
285 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
286
287 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
288 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
289 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
290 int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
291 EXPECT_NE(nullptr, activationValue);
292
293 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
294 OH_NN_MAX_POOL_ACTIVATION_TYPE);
295 tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
296 m_allTensors.emplace_back(tensor);
297 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
298 }
299
300 /**
301 * @tc.name: maxpool_build_pad_012
302 * @tc.desc: Verify the maxpool without set kernelsize of the build function
303 * @tc.type: FUNC
304 */
305 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_012, TestSize.Level1)
306 {
307 m_paramsIndex = m_params;
308 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
309 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
310
311 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr,
312 OH_NN_MAX_POOL_KERNEL_SIZE);
313 m_allTensors.emplace_back(tensor);
314
315 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
316 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
317 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
318 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
319 }
320
321 /**
322 * @tc.name: maxpool_build_pad_013
323 * @tc.desc: Verify the maxpool without set stride of the build function
324 * @tc.type: FUNC
325 */
326 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_013, TestSize.Level1)
327 {
328 m_paramsIndex = m_params;
329 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
330 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
331
332 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
333 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
334 m_allTensors.emplace_back(tensor);
335
336 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
337 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
338 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
339 }
340
341 /**
342 * @tc.name: maxpool_build_pad_014
343 * @tc.desc: Verify the maxpool without set pad of the build function
344 * @tc.type: FUNC
345 */
346 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_014, TestSize.Level1)
347 {
348 m_paramsIndex = m_params;
349 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
350 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
351
352 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
353 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
354 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
355 m_allTensors.emplace_back(tensor);
356
357 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
358 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
359 }
360
361 /**
362 * @tc.name: maxpool_build_pad_015
363 * @tc.desc: Verify the maxpool without set activation of the build function
364 * @tc.type: FUNC
365 */
366 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_015, TestSize.Level1)
367 {
368 m_paramsIndex = m_params;
369 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
370 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
371
372 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
373 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
374 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
375 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
376 OH_NN_MAX_POOL_ACTIVATION_TYPE);
377 m_allTensors.emplace_back(tensor);
378
379 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
380 }
381
382 /**
383 * @tc.name: maxpool_getprimitive_pad_001
384 * @tc.desc: Verify the behavior of the GetPrimitive function
385 * @tc.type: FUNC
386 */
387 HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_001, TestSize.Level1)
388 {
389 m_paramsIndex = m_params;
390 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
391 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
392
393 SetPadParam();
394 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
395 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
396 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
397 EXPECT_NE(expectPrimitive, primitive);
398
399 std::vector<int64_t> expectKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get());
400 std::vector<int64_t> kernelSizeValueTest{1, 1};
401 EXPECT_EQ(kernelSizeValueTest, expectKernelSize);
402
403 std::vector<int64_t> expectStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get());
404 std::vector<int64_t> strideValueTest{1, 1};
405 std::vector<int64_t> expectPadValue = mindspore::lite::MindIR_MaxPoolFusion_GetPad(primitive.get());
406 std::vector<int64_t> padValueValueTest{0, 0, 0, 0};
407 EXPECT_EQ(padValueValueTest, expectPadValue);
408
409 int8_t activationValue = 0;
410 int expectActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get());
411 EXPECT_EQ(activationValue, expectActivation);
412 }
413
414 /**
415 * @tc.name: maxpool_getprimitive_pad_002
416 * @tc.desc: Verify the behavior of the GetPrimitive function
417 * @tc.type: FUNC
418 */
419 HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_002, TestSize.Level1)
420 {
421 m_paramsIndex = m_params;
422 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
423 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
424
425 SetPadParam();
426 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
427 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
428 EXPECT_EQ(expectPrimitive, primitive);
429 }
430 } // namespace UnitTest
431 } // namespace NeuralNetworkRuntime
432 } // namespace OHOS
433