1 /**
2 * Copyright 2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <string>
17 #include <vector>
18 #include "common/common_test.h"
19 #include "include/api/model.h"
20 #include "include/api/serialization.h"
21 #include "include/api/context.h"
22
23 using namespace mindspore;
24
25 static const char tensor_add_file[] = "/home/workspace/mindspore_dataset/mindir/add/add.mindir";
26 static const std::vector<float> input_data_1 = {1, 2, 3, 4};
27 static const std::vector<float> input_data_2 = {2, 3, 4, 5};
28
29 class TestAdd : public ST::Common {
30 public:
TestAdd()31 TestAdd() {}
32 };
33
TEST_F(TestAdd,InferMindIR)34 TEST_F(TestAdd, InferMindIR) {
35 auto context = ContextAutoSet();
36
37 Graph graph;
38 ASSERT_TRUE(Serialization::Load(tensor_add_file, ModelType::kMindIR, &graph));
39 Model tensor_add;
40 ASSERT_TRUE(tensor_add.Build(GraphCell(graph), context) == kSuccess);
41
42 // get model inputs
43 std::vector<MSTensor> origin_inputs = tensor_add.GetInputs();
44 ASSERT_EQ(origin_inputs.size(), 2);
45
46 // prepare input
47 std::vector<MSTensor> outputs;
48 std::vector<MSTensor> inputs;
49 inputs.emplace_back(origin_inputs[0].Name(), origin_inputs[0].DataType(), origin_inputs[0].Shape(),
50 input_data_1.data(), sizeof(float) * input_data_1.size());
51 inputs.emplace_back(origin_inputs[1].Name(), origin_inputs[1].DataType(), origin_inputs[1].Shape(),
52 input_data_2.data(), sizeof(float) * input_data_2.size());
53
54 // infer
55 ASSERT_TRUE(tensor_add.Predict(inputs, &outputs) == kSuccess);
56
57 // assert input
58 inputs = tensor_add.GetInputs();
59 ASSERT_EQ(inputs.size(), 2);
60 auto after_input_data_1 = inputs[0].Data();
61 auto after_input_data_2 = inputs[1].Data();
62 const float *p = reinterpret_cast<const float *>(after_input_data_1.get());
63 for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) {
64 ASSERT_LE(std::abs(p[i] - input_data_1[i]), 1e-4);
65 }
66 p = reinterpret_cast<const float *>(after_input_data_2.get());
67 for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) {
68 ASSERT_LE(std::abs(p[i] - input_data_2[i]), 1e-4);
69 }
70
71 // assert output
72 for (auto &buffer : outputs) {
73 auto buffer_data = buffer.Data();
74 p = reinterpret_cast<const float *>(buffer_data.get());
75 for (size_t i = 0; i < buffer.DataSize() / sizeof(float); ++i) {
76 ASSERT_LE(std::abs(p[i] - (input_data_1[i] + input_data_2[i])), 1e-4);
77 }
78 }
79 }
80