1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "frameworks/native/ops/avgpool_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class AvgPoolPadBuilderTest : public OpsTest {
28 public:
29 void SetUp() override;
30 void TearDown() override;
31
32 void SetPad(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetPadParams();
35
36 public:
37 AvgPoolBuilder m_builder;
38 std::vector<int32_t> m_input_dim{1, 3, 3, 1};
39 std::vector<int32_t> m_output_dim{1, 2, 2, 1};
40 std::vector<int32_t> m_kenelsize_dim{2};
41 std::vector<int32_t> m_stride_dim{2};
42 std::vector<int32_t> m_pad_dim{4};
43 std::vector<int32_t> m_param_dim{};
44 std::vector<uint32_t> m_inputs{0};
45 std::vector<uint32_t> m_outputs{1};
46 std::vector<uint32_t> m_params{2, 3, 4, 5};
47 };
48
SetUp()49 void AvgPoolPadBuilderTest::SetUp() {}
50
TearDown()51 void AvgPoolPadBuilderTest::TearDown() {}
52
SetPad(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)53 void AvgPoolPadBuilderTest::SetPad(OH_NN_DataType dataType,
54 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
55 {
56 int32_t padNum{4};
57 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
58 int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0};
59 EXPECT_NE(nullptr, padValue);
60 tensor->SetBuffer(padValue, sizeof(int64_t) * padNum);
61 m_allTensors.emplace_back(tensor);
62 }
63
SetPadParams()64 void AvgPoolPadBuilderTest::SetPadParams()
65 {
66 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
67 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
68 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
69 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
70 }
71
72 /**
73 * @tc.name: avgpool_build_pad_001
74 * @tc.desc: Verify the success of the build function
75 * @tc.type: FUNC
76 */
77 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_001, TestSize.Level1)
78 {
79 m_paramsIndex = m_params;
80 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
81 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
82 SetPadParams();
83 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
84 }
85
86 /**
87 * @tc.name: avgpool_build_pad_002
88 * @tc.desc: Verify the forbidden of the build function
89 * @tc.type: FUNC
90 */
91 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_002, TestSize.Level1)
92 {
93 m_paramsIndex = m_params;
94 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
95 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
96 SetPadParams();
97 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
98 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
99 }
100
101 /**
102 * @tc.name: avgpool_build_pad_003
103 * @tc.desc: Verify the missing input of the build function
104 * @tc.type: FUNC
105 */
106 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_003, TestSize.Level1)
107 {
108 m_inputs = {};
109 m_outputs = {0};
110 m_params = {1, 2, 3, 4};
111 m_paramsIndex = m_params;
112
113 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
114 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
115
116 SetPadParams();
117 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
118 }
119
120 /**
121 * @tc.name: avgpool_build_pad_004
122 * @tc.desc: Verify the missing output of the build function
123 * @tc.type: FUNC
124 */
125 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_004, TestSize.Level1)
126 {
127 m_inputs = {0};
128 m_outputs = {};
129 m_params = {1, 2, 3, 4};
130 m_paramsIndex = m_params;
131
132 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
133 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
134
135 SetPadParams();
136 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
137 }
138
139 /**
140 * @tc.name: avgpool_build_pad_005
141 * @tc.desc: Verify the inputIndex out of bounds of the build function
142 * @tc.type: FUNC
143 */
144 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_005, TestSize.Level1)
145 {
146 m_inputs = {6};
147 m_outputs = {1};
148 m_params = {2, 3, 4, 5};
149 m_paramsIndex = m_params;
150
151 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
152 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
153
154 SetPadParams();
155 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
156 }
157
158 /**
159 * @tc.name: avgpool_build_pad_006
160 * @tc.desc: Verify the outputIndex out of bounds of the build function
161 * @tc.type: FUNC
162 */
163 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_006, TestSize.Level1)
164 {
165 m_inputs = {0};
166 m_outputs = {6};
167 m_params = {2, 3, 4, 5};
168 m_paramsIndex = m_params;
169
170 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
171 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
172
173 SetPadParams();
174 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
175 }
176
177 /**
178 * @tc.name: avgpool_build_pad_007
179 * @tc.desc: Verify the invalid kernelSize of the build function
180 * @tc.type: FUNC
181 */
182 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_007, TestSize.Level1)
183 {
184 m_paramsIndex = m_params;
185 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
186 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
187
188 int32_t numKernels{2};
189 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
190 OH_NN_AVG_POOL_KERNEL_SIZE);
191 int32_t* kernelSizeValue = new (std::nothrow) int32_t[numKernels]{1, 1};
192 EXPECT_NE(nullptr, kernelSizeValue);
193 tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * numKernels);
194 m_allTensors.emplace_back(tensor);
195
196 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
197 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
198 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
199 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
200 }
201
202 /**
203 * @tc.name: avgpool_build_pad_008
204 * @tc.desc: Verify the invalid stride of the build function
205 * @tc.type: FUNC
206 */
207 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_008, TestSize.Level1)
208 {
209 m_paramsIndex = m_params;
210 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
211 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
212 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
213
214 int32_t numStride{2};
215 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
216 int32_t* strideValue = new (std::nothrow) int32_t[numStride]{1, 1};
217 EXPECT_NE(nullptr, strideValue);
218 tensor->SetBuffer(strideValue, sizeof(int32_t) * numStride);
219 m_allTensors.emplace_back(tensor);
220
221 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
222 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
223 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
224 }
225
226 /**
227 * @tc.name: avgpool_build_pad_009
228 * @tc.desc: Verify the invalid pad of the build function
229 * @tc.type: FUNC
230 */
231 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_009, TestSize.Level1)
232 {
233 m_paramsIndex = m_params;
234 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
235 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
236
237 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
238 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
239 int32_t padNum{4};
240 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
241 int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0};
242 EXPECT_NE(nullptr, padValue);
243
244 tensor->SetBuffer(padValue, sizeof(int32_t) * padNum);
245 m_allTensors.emplace_back(tensor);
246 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
247 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
248 }
249
250
251 /**
252 * @tc.name: avgpool_build_pad_010
253 * @tc.desc: Verify the invalid activation of the build function
254 * @tc.type: FUNC
255 */
256 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_010, TestSize.Level1)
257 {
258 m_paramsIndex = m_params;
259 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
260 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
261
262 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
263 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
264 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
265 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
266 OH_NN_AVG_POOL_ACTIVATION_TYPE);
267 int32_t* activationValue = new (std::nothrow) int32_t(0);
268 EXPECT_NE(nullptr, activationValue);
269
270 tensor->SetBuffer(activationValue, sizeof(int32_t));
271 m_allTensors.emplace_back(tensor);
272 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
273 }
274
275 /**
276 * @tc.name: avgpool_build_pad_011
277 * @tc.desc: Verify the activation scalar length of the build function
278 * @tc.type: FUNC
279 */
280 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_011, TestSize.Level1)
281 {
282 m_param_dim = {2};
283 m_paramsIndex = m_params;
284 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
285 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
286
287 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
288 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
289 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
290 int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
291 EXPECT_NE(nullptr, activationValue);
292
293 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
294 OH_NN_AVG_POOL_ACTIVATION_TYPE);
295 tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
296 m_allTensors.emplace_back(tensor);
297 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
298 }
299
300 /**
301 * @tc.name: avgpool_build_pad_012
302 * @tc.desc: Verify the avgpool without set kernelsize of the build function
303 * @tc.type: FUNC
304 */
305 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_012, TestSize.Level1)
306 {
307 m_paramsIndex = m_params;
308 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
309 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
310
311 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr,
312 OH_NN_AVG_POOL_KERNEL_SIZE);
313 m_allTensors.emplace_back(tensor);
314
315 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
316 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
317 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
318 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
319 }
320
321 /**
322 * @tc.name: avgpool_build_pad_013
323 * @tc.desc: Verify the avgpool without set stride of the build function
324 * @tc.type: FUNC
325 */
326 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_013, TestSize.Level1)
327 {
328 m_paramsIndex = m_params;
329 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
330 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
331
332 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
333 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
334 m_allTensors.emplace_back(tensor);
335
336 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
337 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
338 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
339 }
340
341 /**
342 * @tc.name: avgpool_build_pad_014
343 * @tc.desc: Verify the avgpool without set pad of the build function
344 * @tc.type: FUNC
345 */
346 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_014, TestSize.Level1)
347 {
348 m_paramsIndex = m_params;
349 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
350 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
351
352 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
353 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
354 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
355 m_allTensors.emplace_back(tensor);
356
357 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
358 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
359 }
360
361 /**
362 * @tc.name: avgpool_build_pad_015
363 * @tc.desc: Verify the avgpool without set activation of the build function
364 * @tc.type: FUNC
365 */
366 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_015, TestSize.Level1)
367 {
368 m_paramsIndex = m_params;
369 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
370 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
371
372 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
373 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
374 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
375
376 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
377 OH_NN_AVG_POOL_ACTIVATION_TYPE);
378 m_allTensors.emplace_back(tensor);
379 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
380 }
381
382 /**
383 * @tc.name: avgpool_getprimitive_pad_001
384 * @tc.desc: Verify the behavior of the GetPrimitive function
385 * @tc.type: FUNC
386 */
387 HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_001, TestSize.Level1)
388 {
389 m_paramsIndex = m_params;
390 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
391 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
392
393 SetPadParams();
394 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
395 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
396 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
397 EXPECT_NE(expectPrimitive, primitive);
398
399 std::vector<int64_t> expetctKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get());
400 std::vector<int64_t> kernelSizeValueTest{1, 1};
401 EXPECT_EQ(kernelSizeValueTest, expetctKernelSize);
402 std::vector<int64_t> expetctStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get());
403 std::vector<int64_t> strideValueTest{1, 1};
404 std::vector<int64_t> expetctPadValue = mindspore::lite::MindIR_AvgPoolFusion_GetPad(primitive.get());
405 std::vector<int64_t> padValueValueTest{0, 0, 0, 0};
406 EXPECT_EQ(padValueValueTest, expetctPadValue);
407
408 int8_t activationValue = 0;
409 int expectActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get());
410 EXPECT_EQ(activationValue, expectActivation);
411 }
412
413 /**
414 * @tc.name: avgpool_getprimitive_pad_002
415 * @tc.desc: Verify the behavior of the GetPrimitive function
416 * @tc.type: FUNC
417 */
418 HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_002, TestSize.Level1)
419 {
420 m_paramsIndex = m_params;
421 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
422 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
423
424 SetPadParams();
425 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
426 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
427 EXPECT_EQ(expectPrimitive, primitive);
428 }
429 } // namespace UnitTest
430 } // namespace NeuralNetworkRuntime
431 } // namespace OHOS
432