1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "frameworks/native/ops/argmax_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class ArgMaxBuilderTest : public OpsTest {
28 public:
29 void SetUp();
30 void TearDown();
31
32 void SetArgmaxAxis(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetArgmaxKeepdims(OH_NN_DataType dataType,
35 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
36
37 public:
38 ArgMaxBuilder m_builder;
39 std::vector<uint32_t> m_inputs{0};
40 std::vector<uint32_t> m_outputs{1};
41 std::vector<uint32_t> m_params{2, 3};
42 std::vector<int32_t> m_input_dim{3, 3};
43 std::vector<int32_t> m_output_dim{3, 3};
44 std::vector<int32_t> m_param_dim{};
45 };
46
SetUp()47 void ArgMaxBuilderTest::SetUp() {}
48
TearDown()49 void ArgMaxBuilderTest::TearDown() {}
50
SetArgmaxAxis(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)51 void ArgMaxBuilderTest::SetArgmaxAxis(OH_NN_DataType dataType,
52 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
53 {
54 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
55 int64_t* axisValue = new (std::nothrow) int64_t(0);
56 EXPECT_NE(nullptr, axisValue);
57 tensor->SetBuffer(axisValue, sizeof(int64_t));
58 m_allTensors.emplace_back(tensor);
59 }
60
SetArgmaxKeepdims(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)61 void ArgMaxBuilderTest::SetArgmaxKeepdims(OH_NN_DataType dataType,
62 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
63 {
64 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
65 bool* keepdimsValue = new (std::nothrow) bool(false);
66 EXPECT_NE(nullptr, keepdimsValue);
67 tensor->SetBuffer(keepdimsValue, sizeof(keepdimsValue));
68 m_allTensors.emplace_back(tensor);
69 }
70
71 /**
72 * @tc.name: argmax_build_001
73 * @tc.desc: Verify the success of the build function
74 * @tc.type: FUNC
75 */
76 HWTEST_F(ArgMaxBuilderTest, argmax_build_001, TestSize.Level1)
77 {
78 m_paramsIndex = m_params;
79 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
80 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
81
82 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
83 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
84
85 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
86 }
87 /**
88 * @tc.name: argmax_build_002
89 * @tc.desc: Verify the forbidden of the build function
90 * @tc.type: FUNC
91 */
92 HWTEST_F(ArgMaxBuilderTest, argmax_build_002, TestSize.Level1)
93 {
94 m_inputs = {0};
95 m_outputs = {1};
96 m_params = {2, 3};
97
98 m_paramsIndex = m_params;
99 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
100 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
101
102 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
103 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
104 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
105 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
106 }
107
108 /**
109 * @tc.name: argmax_build_003
110 * @tc.desc: Verify the missing input of the build function
111 * @tc.type: FUNC
112 */
113 HWTEST_F(ArgMaxBuilderTest, argmax_build_003, TestSize.Level1)
114 {
115 m_inputs = {};
116 m_outputs = {1};
117 m_params = {2, 3};
118
119 m_paramsIndex = m_params;
120 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
121 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
122
123 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
124 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
125 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
126 }
127
128 /**
129 * @tc.name: argmax_build_004
130 * @tc.desc: Verify the missing output of the build function
131 * @tc.type: FUNC
132 */
133 HWTEST_F(ArgMaxBuilderTest, argmax_build_004, TestSize.Level1)
134 {
135 m_inputs = {0};
136 m_outputs = {};
137 m_params = {1, 2};
138
139 m_paramsIndex = m_params;
140 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
141 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
142
143 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
144 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
145 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
146 }
147
148 /**
149 * @tc.name: argmax_build_005
150 * @tc.desc: Verify the inputIndex out of bounds of the build function
151 * @tc.type: FUNC
152 */
153 HWTEST_F(ArgMaxBuilderTest, argmax_build_005, TestSize.Level1)
154 {
155 m_inputs = {6};
156 m_outputs = {1};
157 m_params = {2, 3};
158
159 m_paramsIndex = m_params;
160 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
161 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
162
163 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
164 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
165 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
166 }
167
168 /**
169 * @tc.name: argmax_build_006
170 * @tc.desc: Verify the outputIndex out of bounds of the build function
171 * @tc.type: FUNC
172 */
173 HWTEST_F(ArgMaxBuilderTest, argmax_build_006, TestSize.Level1)
174 {
175 m_inputs = {0};
176 m_outputs = {6};
177 m_params = {2, 3};
178
179 m_paramsIndex = m_params;
180 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
181 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
182
183 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
184 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
185 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
186 }
187
188 /**
189 * @tc.name: argmax_build_007
190 * @tc.desc: Verify the invalid axis of the build function
191 * @tc.type: FUNC
192 */
193 HWTEST_F(ArgMaxBuilderTest, argmax_build_007, TestSize.Level1)
194 {
195 m_paramsIndex = m_params;
196 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
197 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
198
199 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_FLOAT32, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
200 float* axisValueTest = new (std::nothrow) float(0);
201 EXPECT_NE(nullptr, axisValueTest);
202
203 tensor->SetBuffer(axisValueTest, sizeof(float));
204 m_allTensors.emplace_back(tensor);
205 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
206 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
207 }
208
209 /**
210 * @tc.name: argmax_build_008
211 * @tc.desc: Verify the invalid keepdims of the build function
212 * @tc.type: FUNC
213 */
214 HWTEST_F(ArgMaxBuilderTest, argmax_build_008, TestSize.Level1)
215 {
216 m_paramsIndex = m_params;
217 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
218 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
219
220 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
221 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
222 int64_t* keepdimsValue = new (std::nothrow) int64_t(0);
223 EXPECT_NE(nullptr, keepdimsValue);
224
225 tensor->SetBuffer(keepdimsValue, sizeof(int64_t));
226 m_allTensors.emplace_back(tensor);
227 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
228 }
229
230 /**
231 * @tc.name: argmax_build_009
232 * @tc.desc: Verify the invalid param to argmax of the build function
233 * @tc.type: FUNC
234 */
235 HWTEST_F(ArgMaxBuilderTest, argmax_build_009, TestSize.Level1)
236 {
237 m_paramsIndex = m_params;
238 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
239 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
240
241 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
242 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
243 int64_t* strideValue = new (std::nothrow) int64_t(0);
244 EXPECT_NE(nullptr, strideValue);
245
246 tensor->SetBuffer(strideValue, sizeof(int64_t));
247 m_allTensors.emplace_back(tensor);
248 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
249 }
250
251 /**
252 * @tc.name: argmax_build_010
253 * @tc.desc: Verify the argmax without set axis of the build function
254 * @tc.type: FUNC
255 */
256 HWTEST_F(ArgMaxBuilderTest, argmax_build_010, TestSize.Level1)
257 {
258 m_paramsIndex = m_params;
259 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
260 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
261
262 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
263 m_allTensors.emplace_back(tensor);
264 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
265 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
266 }
267
268 /**
269 * @tc.name: argmax_build_011
270 * @tc.desc: Verify the argmax without set keepdims of the build function
271 * @tc.type: FUNC
272 */
273 HWTEST_F(ArgMaxBuilderTest, argmax_build_011, TestSize.Level1)
274 {
275 m_paramsIndex = m_params;
276 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
277 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
278
279 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
280 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
281 m_allTensors.emplace_back(tensor);
282 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
283 }
284
285 /**
286 * @tc.name: add_getprimitive_001
287 * @tc.desc: Verify the behavior of the GetPrimitive function
288 * @tc.type: FUNC
289 */
290 HWTEST_F(ArgMaxBuilderTest, add_getprimitive_001, TestSize.Level1)
291 {
292 m_paramsIndex = m_params;
293 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
294 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
295
296 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
297 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
298 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
299
300 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
301 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
302 EXPECT_NE(expectPrimitive, primitive);
303 EXPECT_NE(nullptr, primitive);
304
305 int64_t returnValue = mindspore::lite::MindIR_ArgMaxFusion_GetAxis(primitive.get());
306 EXPECT_EQ(returnValue, 0);
307 bool keepdimsReturn = mindspore::lite::MindIR_ArgMaxFusion_GetKeepDims(primitive.get());
308 EXPECT_EQ(keepdimsReturn, false);
309 }
310
311 /**
312 * @tc.name: add_getprimitive_002
313 * @tc.desc: Verify the behavior of the GetPrimitive function
314 * @tc.type: FUNC
315 */
316 HWTEST_F(ArgMaxBuilderTest, add_getprimitive_002, TestSize.Level1)
317 {
318 m_paramsIndex = m_params;
319 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
320 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
321
322 SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS);
323 SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS);
324 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
325 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
326 EXPECT_EQ(expectPrimitive, primitive);
327 }
328 } // namespace UnitTest
329 } // namespace NeuralNetworkRuntime
330 } // namespace OHOS