• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "frameworks/native/ops/matmul_builder.h"
17 
18 #include "ops_test.h"
19 
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23 
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class MatMulBuilderTest : public OpsTest {
28 public:
29     void SetUp() override;
30     void TearDown() override;
31 
32 protected:
33     void SaveTransposeATensor(OH_NN_DataType dataType,
34         const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
35     void SaveTransposeBTensor(OH_NN_DataType dataType,
36         const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
37     void SaveActivationTensor(OH_NN_DataType dataType,
38         const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
39     void SetInputTensor(std::shared_ptr<NNTensor> inputTensor);
40 
41 protected:
42     MatmulBuilder m_matmul;
43     std::vector<uint32_t> m_inputs {0, 1};
44     std::vector<uint32_t> m_outputs {2};
45     std::vector<uint32_t> m_params {3, 4, 5};
46     std::vector<int32_t> m_inputXDim {1, 1, 3, 2};
47     std::vector<int32_t> m_inputYDim {1, 1, 2, 3};
48     std::vector<int32_t> m_outputDim {1, 1, 3, 3};
49     std::vector<int32_t> m_paramDim {};
50     std::shared_ptr<NNTensor> m_inputTensor {};
51 };
52 
SetUp()53 void MatMulBuilderTest::SetUp() {}
54 
TearDown()55 void MatMulBuilderTest::TearDown() {}
56 
SaveTransposeATensor(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)57 void MatMulBuilderTest::SaveTransposeATensor(OH_NN_DataType dataType,
58     const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
59 {
60     std::shared_ptr<NNTensor> transposeATensor = TransToNNTensor(dataType, dim, quantParam, type);
61     bool* transposeAValue = new (std::nothrow) bool(false);
62     EXPECT_NE(nullptr, transposeAValue);
63     transposeATensor->SetBuffer(transposeAValue, sizeof(bool));
64     m_allTensors.emplace_back(transposeATensor);
65 }
66 
SaveTransposeBTensor(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)67 void MatMulBuilderTest::SaveTransposeBTensor(OH_NN_DataType dataType,
68     const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
69 {
70     std::shared_ptr<NNTensor> transposeBTensor = TransToNNTensor(dataType, dim, quantParam, type);
71     bool* transposeBValue = new (std::nothrow) bool(false);
72     EXPECT_NE(nullptr, transposeBValue);
73     transposeBTensor->SetBuffer(transposeBValue, sizeof(bool));
74     m_allTensors.emplace_back(transposeBTensor);
75 }
76 
SaveActivationTensor(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)77 void MatMulBuilderTest::SaveActivationTensor(OH_NN_DataType dataType,
78     const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
79 {
80     std::shared_ptr<NNTensor> activationTensor = TransToNNTensor(dataType, dim, quantParam, type);
81     int8_t* activationValue = new (std::nothrow) int8_t(0);
82     EXPECT_NE(nullptr, activationValue);
83     activationTensor->SetBuffer(activationValue, sizeof(int8_t));
84     m_allTensors.emplace_back(activationTensor);
85 }
86 
SetInputTensor(std::shared_ptr<NNTensor> inputTensor)87 void MatMulBuilderTest::SetInputTensor(std::shared_ptr<NNTensor> inputTensor)
88 {
89     inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputXDim, nullptr, OH_NN_TENSOR);
90     m_allTensors.emplace_back(inputTensor);
91 
92     inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputYDim, nullptr, OH_NN_TENSOR);
93     m_allTensors.emplace_back(inputTensor);
94 }
95 /**
96  * @tc.name: matmul_build_001
97  * @tc.desc: Verify that the build function returns a successful message.
98  * @tc.type: FUNC
99  */
100 HWTEST_F(MatMulBuilderTest, matmul_build_001, TestSize.Level0)
101 {
102     SetInputTensor(m_inputTensor);
103     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
104     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
105     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
106     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
107 
108     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
109     EXPECT_EQ(OH_NN_SUCCESS, ret);
110 }
111 
112 /**
113  * @tc.name: matmul_build_002
114  * @tc.desc: Verify that the build function returns a failed message with true m_isBuild.
115  * @tc.type: FUNC
116  */
117 HWTEST_F(MatMulBuilderTest, matmul_build_002, TestSize.Level0)
118 {
119     SetInputTensor(m_inputTensor);
120     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
121     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
122     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
123     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
124 
125     EXPECT_EQ(OH_NN_SUCCESS, m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors));
126     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
127     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
128 }
129 
130 /**
131  * @tc.name: matmul_build_003
132  * @tc.desc: Verify that the build function returns a failed message with invalided input.
133  * @tc.type: FUNC
134  */
135 HWTEST_F(MatMulBuilderTest, matmul_build_003, TestSize.Level0)
136 {
137     m_inputs = {0, 1, 2};
138     m_outputs = {3};
139     m_params = {4, 5, 6};
140 
141     SetInputTensor(m_inputTensor);
142     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
143     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
144     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
145     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
146 
147     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
148     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
149 }
150 
151 /**
152  * @tc.name: matmul_build_004
153  * @tc.desc: Verify that the build function returns a failed message with invalided output.
154  * @tc.type: FUNC
155  */
156 HWTEST_F(MatMulBuilderTest, matmul_build_004, TestSize.Level0)
157 {
158     m_outputs = {2, 3};
159     m_params = {4, 5, 6};
160 
161     SetInputTensor(m_inputTensor);
162     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
163     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
164     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
165     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
166     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
167     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
168 }
169 
170 /**
171  * @tc.name: matmul_build_005
172  * @tc.desc: Verify that the build function returns a failed message with empty allTensor.
173  * @tc.type: FUNC
174  */
175 HWTEST_F(MatMulBuilderTest, matmul_build_005, TestSize.Level0)
176 {
177     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputs, m_allTensors);
178     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
179 }
180 
181 /**
182  * @tc.name: matmul_build_006
183  * @tc.desc: Verify that the build function returns a failed message without output tensor.
184  * @tc.type: FUNC
185  */
186 HWTEST_F(MatMulBuilderTest, matmul_build_006, TestSize.Level0)
187 {
188     SetInputTensor(m_inputTensor);
189 
190     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputs, m_allTensors);
191     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
192 }
193 
194 /**
195  * @tc.name: matmul_build_007
196  * @tc.desc: Verify that the build function returns a failed message with invalid transposeA's dataType.
197  * @tc.type: FUNC
198  */
199 HWTEST_F(MatMulBuilderTest, matmul_build_007, TestSize.Level0)
200 {
201     SetInputTensor(m_inputTensor);
202     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
203     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
204     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
205 
206     std::shared_ptr<NNTensor> transposeATensor;
207     transposeATensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
208     int32_t transposeAValue = 1;
209     transposeATensor->SetBuffer(&transposeAValue, sizeof(transposeAValue));
210     m_allTensors.emplace_back(transposeATensor);
211 
212     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
213     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
214     transposeATensor->SetBuffer(nullptr, 0);
215 }
216 
217 /**
218  * @tc.name: matmul_build_008
219  * @tc.desc: Verify that the build function returns a failed message with invalid transposeA's dimension.
220  * @tc.type: FUNC
221  */
222 HWTEST_F(MatMulBuilderTest, matmul_build_008, TestSize.Level0)
223 {
224     std::vector<int32_t> expectParamDim = {2};
225 
226     SetInputTensor(m_inputTensor);
227 
228     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
229     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
230     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
231 
232     std::shared_ptr<NNTensor> transposeATensor;
233     transposeATensor = TransToNNTensor(OH_NN_BOOL, expectParamDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
234     bool transposeAValue[2] = {false, false};
235     transposeATensor->SetBuffer(transposeAValue, 2 * sizeof(bool));
236     m_allTensors.emplace_back(transposeATensor);
237 
238     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
239     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
240     transposeATensor->SetBuffer(nullptr, 0);
241 }
242 
243 /**
244  * @tc.name: matmul_build_009
245  * @tc.desc: Verify that the build function returns a failed message with invalid transposeB's dataType.
246  * @tc.type: FUNC
247  */
248 HWTEST_F(MatMulBuilderTest, matmul_build_009, TestSize.Level0)
249 {
250     SetInputTensor(m_inputTensor);
251 
252     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
253     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
254     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
255 
256     std::shared_ptr<NNTensor> transposeBTensor;
257     transposeBTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
258     int32_t transposeBValue = 1;
259     transposeBTensor->SetBuffer(&transposeBValue, sizeof(transposeBValue));
260     m_allTensors.emplace_back(transposeBTensor);
261 
262     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
263     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
264     transposeBTensor->SetBuffer(nullptr, 0);
265 }
266 
267 /**
268  * @tc.name: matmul_build_010
269  * @tc.desc: Verify that the build function returns a failed message with invalid transposeB's dimension.
270  * @tc.type: FUNC
271  */
272 HWTEST_F(MatMulBuilderTest, matMul_build_010, TestSize.Level0)
273 {
274     std::vector<int32_t> expectParamDim = {2};
275 
276     SetInputTensor(m_inputTensor);
277     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
278     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
279     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
280 
281     std::shared_ptr<NNTensor> transposeBTensor;
282     transposeBTensor = TransToNNTensor(OH_NN_BOOL, expectParamDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
283     bool transposeBValue[2] = {false, false};
284     transposeBTensor->SetBuffer(transposeBValue, 2 * sizeof(bool));
285     m_allTensors.emplace_back(transposeBTensor);
286 
287     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
288     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
289     transposeBTensor->SetBuffer(nullptr, 0);
290 }
291 
292 /**
293  * @tc.name: matmul_build_011
294  * @tc.desc: Verify that the build function returns a failed message with invalid activation's dataType.
295  * @tc.type: FUNC
296  */
297 HWTEST_F(MatMulBuilderTest, matMul_build_011, TestSize.Level0)
298 {
299     SetInputTensor(m_inputTensor);
300     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
301     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
302     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
303 
304     std::shared_ptr<NNTensor> activationTensor;
305     activationTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
306     bool activationValue = false;
307     activationTensor->SetBuffer(&activationValue, sizeof(activationValue));
308     m_allTensors.emplace_back(activationTensor);
309 
310     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
311     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
312     activationTensor->SetBuffer(nullptr, 0);
313 }
314 
315 /**
316  * @tc.name: matmul_build_012
317  * @tc.desc: Verify that the build function returns a failed message with invalid activation's dimension.
318  * @tc.type: FUNC
319  */
320 HWTEST_F(MatMulBuilderTest, matmul_build_012, TestSize.Level0)
321 {
322     std::vector<int32_t> expectParamDim = {2};
323 
324     SetInputTensor(m_inputTensor);
325     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
326     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
327     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
328 
329     std::shared_ptr<NNTensor> activationTensor;
330     activationTensor = TransToNNTensor(OH_NN_INT8, expectParamDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
331     int8_t activationValue[2] = {0, 1};
332     activationTensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
333     m_allTensors.emplace_back(activationTensor);
334 
335     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
336     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
337     activationTensor->SetBuffer(nullptr, 0);
338 }
339 
340 /**
341  * @tc.name: matmul_build_013
342  * @tc.desc: Verify that the build function returns a failed message with invalid activation's data.
343  * @tc.type: FUNC
344  */
345 HWTEST_F(MatMulBuilderTest, matmul_build_013, TestSize.Level0)
346 {
347     SetInputTensor(m_inputTensor);
348     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
349     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
350     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
351 
352     std::shared_ptr<NNTensor> activationTensor;
353     activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
354     int8_t activationValue = -1;
355     activationTensor->SetBuffer(&activationValue, sizeof(activationValue));
356     m_allTensors.emplace_back(activationTensor);
357 
358     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
359     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
360     activationTensor->SetBuffer(nullptr, 0);
361 }
362 
363 /**
364  * @tc.name: matmul_build_014
365  * @tc.desc: Verify that the build function returns a failed message with passing invalid param.
366  * @tc.type: FUNC
367  */
368 HWTEST_F(MatMulBuilderTest, matmul_build_014, TestSize.Level0)
369 {
370     SetInputTensor(m_inputTensor);
371     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
372     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS);
373     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
374     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
375 
376     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
377     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
378 }
379 
380 /**
381  * @tc.name: matmul_build_015
382  * @tc.desc: Verify that the build function returns a failed message without set buffer for transposeA.
383  * @tc.type: FUNC
384  */
385 HWTEST_F(MatMulBuilderTest, matmul_build_015, TestSize.Level0)
386 {
387     SetInputTensor(m_inputTensor);
388     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
389     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
390     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
391 
392     std::shared_ptr<NNTensor> transposeATensor;
393     transposeATensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
394     m_allTensors.emplace_back(transposeATensor);
395 
396     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
397     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
398 }
399 
400 /**
401  * @tc.name: matmul_build_016
402  * @tc.desc: Verify that the build function returns a failed message without set buffer for transposeB.
403  * @tc.type: FUNC
404  */
405 HWTEST_F(MatMulBuilderTest, matmul_build_016, TestSize.Level0)
406 {
407     SetInputTensor(m_inputTensor);
408     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
409     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
410     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
411 
412     std::shared_ptr<NNTensor> transposeBTensor;
413     transposeBTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
414     m_allTensors.emplace_back(transposeBTensor);
415 
416     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
417     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
418 }
419 
420 /**
421  * @tc.name: matmul_build_017
422  * @tc.desc: Verify that the build function returns a failed message without set buffer for activation.
423  * @tc.type: FUNC
424  */
425 HWTEST_F(MatMulBuilderTest, matmul_build_017, TestSize.Level0)
426 {
427     SetInputTensor(m_inputTensor);
428     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
429     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
430     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
431 
432     std::shared_ptr<NNTensor> activationTensor;
433     activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
434     m_allTensors.emplace_back(activationTensor);
435 
436     OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors);
437     EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
438 }
439 
440 /**
441  * @tc.name: matmul_getprimitive_001
442  * @tc.desc: Verify that the getPrimitive function returns a successful message
443  * @tc.type: FUNC
444  */
445 HWTEST_F(MatMulBuilderTest, matmul_getprimitive_001, TestSize.Level0)
446 {
447     SetInputTensor(m_inputTensor);
448     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
449     SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A);
450     SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B);
451     SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE);
452 
453     bool transposeAValue = false;
454     bool transposeBValue = false;
455     int8_t activationValue = 0;
456     EXPECT_EQ(OH_NN_SUCCESS, m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors));
457     LiteGraphPrimitvePtr primitive = m_matmul.GetPrimitive();
458     LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive);
459     EXPECT_NE(expectPrimitive, primitive);
460 
461     auto returnValue = mindspore::lite::MindIR_MatMulFusion_GetTransposeA(primitive.get());
462     EXPECT_EQ(returnValue, transposeAValue);
463     returnValue = mindspore::lite::MindIR_MatMulFusion_GetTransposeB(primitive.get());
464     EXPECT_EQ(returnValue, transposeBValue);
465     returnValue = mindspore::lite::MindIR_MatMulFusion_GetActivationType(primitive.get());
466     EXPECT_EQ(returnValue, activationValue);
467 }
468 
469 /**
470  * @tc.name: matmul_getprimitive_002
471  * @tc.desc: Verify that the getPrimitive function returns a failed message without build.
472  * @tc.type: FUNC
473  */
474 HWTEST_F(MatMulBuilderTest, matmul_getprimitive_002, TestSize.Level0)
475 {
476     MatmulBuilder matmul;
477     LiteGraphPrimitvePtr primitive = m_matmul.GetPrimitive();
478     LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive);
479     EXPECT_EQ(expectPrimitive, primitive);
480 }
481 }
482 }
483 }