• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <iostream>
18 #include <memory>
19 #include "common/common_test.h"
20 #include "ir/dtype.h"
21 #include "pybind_api/ir/tensor_py.h"
22 #include "transform/transform_base_test.h"
23 #include "common/py_func_graph_fetcher.h"
24 #include "pipeline/jit/static_analysis/static_analysis.h"
25 #include "frontend/operator/ops.h"
26 #include "transform/graph_ir/df_graph_manager.h"
27 #include "transform/graph_ir/convert.h"
28 #include "utils/utils.h"
29 
30 #ifdef OPEN_SOURCE
31 #include "ge/client/ge_api.h"
32 #else
33 #include "external/ge/ge_api.h"
34 #endif
35 
36 #define private public
37 #include "transform/graph_ir/graph_runner.h"
38 
39 using mindspore::tensor::TensorPy;
40 
41 namespace mindspore {
42 namespace transform {
43 class TestGraphRunner : public UT::Common {
44  public:
TestGraphRunner()45   TestGraphRunner() {}
46   void SetUp();
47   static const std::shared_ptr<Float> kF64;
48   static const std::shared_ptr<Float> kF32;
49 
50  private:
51 };
52 
SetUp()53 void TestGraphRunner::SetUp() { UT::InitPythonPath(); }
54 const std::shared_ptr<Float> TestGraphRunner::kF64 = std::make_shared<Float>(64);
55 const std::shared_ptr<Float> TestGraphRunner::kF32 = std::make_shared<Float>(32);
56 
MakeGeGraph()57 std::shared_ptr<DfGraphConvertor> MakeGeGraph() {
58   PrimitivePtr conv2d = prim::kPrimConv2D;
59   conv2d->AddAttr("stride", MakeValue(static_cast<int64_t>(1)));
60   conv2d->AddAttr("pad", MakeValue(static_cast<int64_t>(0)));
61   conv2d->AddAttr("pad_mode", MakeValue(std::string("pad")));
62   conv2d->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
63   conv2d->AddAttr("group", MakeValue(static_cast<int64_t>(1)));
64   conv2d->AddAttr("mode", MakeValue(static_cast<int64_t>(1)));
65   conv2d->AddAttr("out_channel", MakeValue(static_cast<int64_t>(2)));
66   conv2d->AddAttr("kernel_size", MakeValue(std::vector<int64_t>({2, 2})));
67   conv2d->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
68   conv2d->AddAttr("data_format", MakeValue(kOpFormat_NCHW));
69 
70   FuncGraphPtr anf_graph = MakeFuncGraph(conv2d, 2);
71   std::shared_ptr<FuncGraphManager> ir_graph_manager = MakeManager({anf_graph});
72 
73   return std::make_shared<DfGraphConvertor>(anf_graph);
74 }
75 namespace {
DoExecGraph(const std::vector<MeTensorPtr> & inputs)76 std::shared_ptr<std::vector<MeTensorPtr>> DoExecGraph(const std::vector<MeTensorPtr> &inputs) {
77   std::vector<GeTensorPtr> ge_tensor_ptrs = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW);
78 
79   std::vector<GeTensorPtr> ge_outputs;
80   transform::GraphRunnerOptions options;
81   transform::GraphRunner graph_runner(options);
82   transform::RunOptions run_options;
83   run_options.name = "fp_bp_subgraph";
84 
85   MS_LOG(INFO) << "Run func_graph begin, inputs size is: " << inputs.size();
86   Status ret = graph_runner.RunGraph(run_options, ge_tensor_ptrs, &ge_outputs);
87   MS_LOG(INFO) << "Run func_graph finish, outputs size is: " << ge_outputs.size();
88   if (ret != Status::SUCCESS) {
89     return nullptr;
90   }
91 
92   std::vector<std::vector<int64_t>> request_dims;
93   std::vector<int64_t> dims1 = {1, 1, 4, 4};
94   std::vector<int64_t> dims2 = {2, 3, 4, 5};
95   std::vector<int64_t> dims3 = {9, 9};
96   request_dims.emplace_back(dims1);
97   request_dims.emplace_back(dims2);
98   request_dims.emplace_back(dims3);
99 
100   std::vector<MeTensorPtr> me_outputs = TransformUtil::ConvertGeTensors(ge_outputs, request_dims);
101 
102   return std::make_shared<std::vector<MeTensorPtr>>(me_outputs);
103 }
104 
105 }  // namespace
106 
TEST_F(TestGraphRunner,TestGeTensorConstructor)107 TEST_F(TestGraphRunner, TestGeTensorConstructor) {
108   // Init a data buffer
109   float ge_tensor_data[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
110 
111   // Create a Tensor with wanted data type and shape
112   MeTensor tensor = MeTensor(TypeId::kNumberTypeFloat32, std::vector<int64_t>({1, 2, 3}));
113 
114   // Get the writable data pointer from the tensor
115   float *me_tensor_data = reinterpret_cast<float *>(tensor.data_c());
116 
117   // Copy data from buffer to tensor's data
118   memcpy_s(me_tensor_data, static_cast<size_t>(tensor.data().nbytes()), ge_tensor_data, sizeof(ge_tensor_data));
119   PrintMeTensor(&tensor);
120 
121   std::cout << "----------------------------------" << std::endl;
122   py::tuple py_tuple =
123     py::make_tuple(py::make_tuple(py::make_tuple(1.1f, 2.2f, 3.3f), py::make_tuple(4.4f, 5.5f, 6.6f)));
124   py::array my_arry = py::array(py_tuple).attr("astype").cast<py::function>()("float32").cast<py::array>();
125   auto tensor_tuple = TensorPy::MakeTensor(my_arry, kFloat32);
126   PrintMeTensor(tensor_tuple.get());
127 
128   py::array tensor_array = TensorPy::AsNumpy(tensor);
129   py::array tensor_tuple_array = TensorPy::AsNumpy(*tensor_tuple);
130   assert(memcmp(ge_tensor_data, tensor_array.data(), sizeof(ge_tensor_data)) == 0);
131   assert(memcmp(ge_tensor_data, tensor_tuple_array.data(), sizeof(ge_tensor_data)) == 0);
132 }
133 
134 #if (!defined ENABLE_GE)
135 
TEST_F(TestGraphRunner,TestRunGraphException)136 TEST_F(TestGraphRunner, TestRunGraphException) {
137   DfGraphManager &graph_manager = DfGraphManager::GetInstance();
138   graph_manager.ClearGraph();
139 
140   std::map<string, MeTensorPtr> dict;
141   std::initializer_list<int64_t> list0{2, 1, 2, 2};
142   MeTensorPtr init_tensor_ptr = MakeTensor(kF32, list0);
143   dict["x1"] = init_tensor_ptr;
144 
145   std::shared_ptr<DfGraphConvertor> converter = MakeGeGraph();
146   (*converter).ConvertAllNode().InitParam(dict).BuildGraph();
147   auto df_graph = (*converter).GetComputeGraph();
148 
149   graph_manager.AddGraph("test_graph", df_graph);
150   std::initializer_list<int64_t> list1{1, 1, 2, 3};
151   MeTensorPtr me_tensor_ptr = MakeTensor(kF32, list1);
152 
153   std::initializer_list<int64_t> list2{1, 1, 4, 4};
154   MeTensorPtr input_ptr = MakeTensor(kF32, list2);
155   std::vector<MeTensorPtr> me_inputs;
156   me_inputs.emplace_back(input_ptr);
157   std::vector<MeTensorPtr> me_outputs;
158 
159   GraphRunnerOptions options;
160   GraphRunner graph_runner(options);
161   RunOptions run_options;
162   ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) != Status::SUCCESS);
163   run_options.name = "test_graph";
164   ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
165 
166   GraphRunner graph_runner2(options);
167   ASSERT_TRUE(graph_runner2.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
168 
169   // when the GraphManager is empty
170   graph_manager.ClearGraph();
171   GraphRunner graph_runner3(options);
172   ASSERT_TRUE(graph_runner3.RunGraph(run_options, me_inputs, &me_outputs) != Status::SUCCESS);
173 }
174 
TEST_F(TestGraphRunner,TestRunGraph)175 TEST_F(TestGraphRunner, TestRunGraph) {
176   DfGraphManager &graph_manager = DfGraphManager::GetInstance();
177   graph_manager.ClearGraph();
178 
179   std::shared_ptr<DfGraphConvertor> converter = MakeGeGraph();
180   std::map<std::string, MeTensorPtr> dict;
181   std::initializer_list<int64_t> list0{2, 1, 2, 2};
182   dict.emplace("x1", MakeTensor(kF32, list0));
183 
184   (*converter).ConvertAllNode().InitParam(dict).BuildGraph();
185   graph_manager.AddGraph("test_graph", (*converter).GetComputeGraph());
186 
187   TypePtr type_id = kFloat32;
188 
189   py::tuple tuple = py::make_tuple(
190     py::make_tuple(py::make_tuple(py::make_tuple(1.0, 2.0, 3.0, 4.0), py::make_tuple(4.0, 5.0, 6.0, 7.0))),
191     py::make_tuple(py::make_tuple(py::make_tuple(1.0, 2.0, 3.0, 4.0), py::make_tuple(4.0, 5.0, 6.0, 7.0))));
192   py::array array = py::array(tuple);
193   MeTensorPtr me_tensor_ptr = TensorPy::MakeTensor(array, type_id);
194 
195   MS_LOG(INFO) << "inputs me tensor data is: ";
196   PrintMeTensor(&(*me_tensor_ptr));
197 
198   std::vector<MeTensorPtr> me_inputs;
199   me_inputs.emplace_back(me_tensor_ptr);
200   std::vector<MeTensorPtr> me_outputs;
201 
202   GraphRunnerOptions options;
203   GraphRunner graph_runner(options);
204   RunOptions run_options;
205   run_options.name = "test_graph";
206   ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
207   MS_LOG(INFO) << "outputs me tensor data is: ";
208   for (auto i = 0; i < me_outputs.size(); i++) {
209     PrintMeTensor(&(*me_outputs[i]));
210   }
211 }
212 
TEST_F(TestGraphRunner,TestAPI)213 TEST_F(TestGraphRunner, TestAPI) {
214   DfGraphManager &graph_manager = DfGraphManager::GetInstance();
215   graph_manager.ClearGraph();
216 
217   std::shared_ptr<DfGraphConvertor> converter = MakeGeGraph();
218   std::map<std::string, MeTensorPtr> dict;
219   std::initializer_list<int64_t> list0{2, 1, 2, 2};
220   dict.emplace("x1", MakeTensor(kF32, list0));
221 
222   (*converter).ConvertAllNode().InitParam(dict).BuildGraph();
223   graph_manager.AddGraph("fp_bp_subgraph", (*converter).GetComputeGraph());
224 
225   std::initializer_list<int64_t> list1{1, 1, 4, 4};
226   std::initializer_list<int64_t> list2{2, 3, 4, 5};
227   std::initializer_list<int64_t> list3{9, 9, 1, 1};
228   MeTensorPtr input_ptr1 = MakeTensor(kF32, list1);
229   MeTensorPtr input_ptr2 = MakeTensor(kF32, list2);
230   MeTensorPtr input_ptr3 = MakeTensor(kF32, list3);
231   std::vector<MeTensorPtr> me_inputs;
232   std::vector<MeTensorPtr> me_outputs;
233   me_inputs.emplace_back(input_ptr1);
234   me_inputs.emplace_back(input_ptr2);
235   me_inputs.emplace_back(input_ptr3);
236 
237   auto ret = DoExecGraph(me_inputs);
238 
239   ASSERT_TRUE(ret != nullptr);
240 
241   me_outputs = *ret;
242   MS_LOG(INFO) << "outputs me tensor data is: ";
243   for (auto tensor : me_outputs) {
244     PrintMeTensor(&(*tensor));
245   }
246 }
247 #endif
248 
249 }  // namespace transform
250 }  // namespace mindspore
251