• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "pipeline/pynative/pynative_execute_ge.h"
18 
19 #include <typeinfo>
20 #include <map>
21 #include <set>
22 #include <unordered_set>
23 
24 #include "utils/any.h"
25 #include "utils/utils.h"
26 #include "utils/ms_context.h"
27 #include "frontend/operator/ops.h"
28 #include "pipeline/jit/parse/data_converter.h"
29 #include "pipeline/jit/static_analysis/prim.h"
30 #include "backend/session/session_factory.h"
31 #include "pybind_api/ir/tensor_py.h"
32 #include "transform/graph_ir/op_declare/array_ops_declare.h"
33 
34 using mindspore::tensor::TensorPy;
35 
36 namespace mindspore {
37 namespace pynative {
38 const char SINGLE_OP_GRAPH[] = "single_op_graph";
39 using MeTensor = mindspore::tensor::Tensor;
40 using MeTensorPtr = mindspore::tensor::TensorPtr;
41 using GeOperator = ge::Operator;
42 using GeOperatorPtr = std::shared_ptr<GeOperator>;
43 
44 using transform::GraphRunner;
45 using transform::GraphRunnerOptions;
46 using transform::OperatorPtr;
47 static std::shared_ptr<session::SessionBasic> session = nullptr;
PyAttrValue(const py::object & obj)48 inline ValuePtr PyAttrValue(const py::object &obj) {
49   ValuePtr converted_ret = nullptr;
50   bool converted = parse::ConvertData(obj, &converted_ret);
51   if (!converted) {
52     MS_LOG(EXCEPTION) << "Attribute convert error with type:" << std::string(py::str(obj));
53   }
54   return converted_ret;
55 }
56 
ConvertPyObjToTensor(const py::object & obj)57 MeTensorPtr ConvertPyObjToTensor(const py::object &obj) {
58   MeTensorPtr me_tensor_ptr = nullptr;
59   if (py::isinstance<MeTensor>(obj)) {
60     me_tensor_ptr = py::cast<MeTensorPtr>(obj);
61   } else if (py::isinstance<py::tuple>(obj)) {
62     me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::tuple>(obj)), nullptr);
63   } else if (py::isinstance<py::float_>(obj)) {
64     me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::float_>(obj)), nullptr);
65   } else if (py::isinstance<py::int_>(obj)) {
66     me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::int_>(obj)), nullptr);
67   } else if (py::isinstance<py::list>(obj)) {
68     me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::list>(obj)), nullptr);
69   } else if (py::isinstance<py::array>(obj)) {
70     me_tensor_ptr = TensorPy::MakeTensor(py::cast<py::array>(obj), nullptr);
71   } else {
72     MS_LOG(EXCEPTION) << "Run op inputs type is invalid!";
73   }
74   return me_tensor_ptr;
75 }
76 
SetInputsForSingleOpGraph(const OpExecInfoPtr & op_exec_info,const std::vector<GeTensorPtr> & inputs,const OperatorPtr & op,std::vector<GeOperator> * graph_input_nodes)77 bool SetInputsForSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vector<GeTensorPtr> &inputs,
78                                const OperatorPtr &op, std::vector<GeOperator> *graph_input_nodes) {
79   MS_EXCEPTION_IF_NULL(op_exec_info);
80   MS_EXCEPTION_IF_NULL(graph_input_nodes);
81   auto op_inputs = op_exec_info->op_inputs;
82   std::string op_name = op_exec_info->op_name;
83   transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true);
84   if (adapter == nullptr) {
85     return false;
86   }
87 
88   int64_t op_input_idx = 1;
89   size_t size = inputs.size();
90   for (size_t i = 0; i < size; i++) {
91     if (inputs[i] == nullptr) {
92       continue;
93     }
94     auto const_op = std::make_shared<transform::Constant>();
95     MS_EXCEPTION_IF_NULL(const_op);
96     (void)const_op->set_attr_value(*inputs[i]);
97     MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]);
98     MS_EXCEPTION_IF_NULL(me_tensor_ptr);
99     auto const_op_desc =
100       transform::TransformUtil::GetGeTensorDesc(me_tensor_ptr->shape_c(), me_tensor_ptr->data_type(), kOpFormat_NCHW);
101     if (const_op_desc == nullptr) {
102       MS_LOG(ERROR) << "Create variable " << op_name << " output descriptor failed!";
103       return false;
104     }
105     auto pointer_cast_const_op = std::static_pointer_cast<transform::Constant>(const_op);
106     MS_EXCEPTION_IF_NULL(pointer_cast_const_op);
107     (void)pointer_cast_const_op->update_output_desc_y(*const_op_desc);
108     auto &input_map = adapter->getInputMap();
109     if (input_map.find(op_input_idx) == input_map.end()) {
110       continue;
111     }
112     if (adapter->setInput(op, op_input_idx++, const_op)) {
113       MS_LOG(ERROR) << "Failed to set params, index is " << op_input_idx;
114       return false;
115     }
116     graph_input_nodes->push_back(*const_op);
117   }
118   return true;
119 }
120 
BuildSingleOpGraph(const OpExecInfoPtr & op_exec_info,const std::vector<GeTensorPtr> & inputs,const std::unordered_map<std::string,ValuePtr> & attrs,const GeGraphPtr & graph)121 bool BuildSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vector<GeTensorPtr> &inputs,
122                         const std::unordered_map<std::string, ValuePtr> &attrs, const GeGraphPtr &graph) {
123   MS_EXCEPTION_IF_NULL(op_exec_info);
124   std::string op_name = op_exec_info->op_name;
125   auto op_inputs = op_exec_info->op_inputs;
126   transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true);
127   if (adapter == nullptr) {
128     MS_LOG(ERROR) << "Unable to find Adapter for " << ((std::string)py::str(op_name));
129     return false;
130   }
131   OperatorPtr op = adapter->generate(op_name);
132   MS_EXCEPTION_IF_NULL(op);
133 
134   std::vector<GeOperator> graph_input_nodes;
135   // hold param nodes after setting input and output for the graph
136   // set input
137   if (!SetInputsForSingleOpGraph(op_exec_info, inputs, op, &graph_input_nodes)) {
138     return false;
139   }
140   // set attributes
141   for (auto attr : attrs) {
142     (void)adapter->setAttr(op, attr.first, attr.second);
143   }
144   // set default attributes
145   auto extra_attrs = adapter->GetExtraAttr();
146   for (auto attr : extra_attrs) {
147     (void)adapter->setAttr(op, attr.first, attr.second);
148   }
149   // set input attributes
150   auto &input_attr_map = adapter->getInputAttrMap();
151   for (auto &it : input_attr_map) {
152     if (op_inputs.size() < it.first) {
153       continue;
154     }
155     auto const_value = PyAttrValue(op_inputs[it.first - 1]);
156     if (const_value->isa<None>()) {
157       continue;
158     }
159     it.second.set_attr(op, const_value);
160   }
161   // construct output data nodes
162   std::vector<GeOperator> graph_outputs{*op};
163   // set input and output nodes for the graph
164   MS_EXCEPTION_IF_NULL(graph);
165   (void)graph->SetInputs(graph_input_nodes).SetOutputs(graph_outputs);
166   MS_LOG(INFO) << "BuildSingleOpGraph done";
167   return true;
168 }
169 
ToTensorPtr(const OpExecInfoPtr op_exec_info,std::vector<GeTensorPtr> * const inputs)170 void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector<GeTensorPtr> *const inputs) {
171   MS_EXCEPTION_IF_NULL(inputs);
172   MS_EXCEPTION_IF_NULL(op_exec_info);
173   auto op_inputs = op_exec_info->op_inputs;
174   size_t size = op_inputs.size();
175   for (size_t i = 0; i < size; i++) {
176     if (py::isinstance<py::none>(op_inputs[i])) {
177       inputs->emplace_back(nullptr);
178       continue;
179     }
180     MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]);
181     auto ge_tensor_ptr = transform::TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW);
182     if (ge_tensor_ptr == nullptr) {
183       MS_LOG(EXCEPTION) << "Convert inputs to GE tensor failed in op " << op_exec_info->op_name << ".";
184     }
185     // set inputs for operator to build single node graph
186     inputs->push_back(ge_tensor_ptr);
187   }
188 }
189 
ConvertAttributes(const OpExecInfoPtr & op_exec_info,const std::vector<GeTensorPtr> & inputs)190 PynativeStatusCode ConvertAttributes(const OpExecInfoPtr &op_exec_info, const std::vector<GeTensorPtr> &inputs) {
191   MS_EXCEPTION_IF_NULL(op_exec_info);
192   auto op_attrs = op_exec_info->op_attrs;
193   std::unordered_map<std::string, ValuePtr> attrs{};
194 
195   for (auto &item : op_attrs) {
196     if (!py::isinstance<py::str>(item.first)) {
197       MS_LOG(ERROR) << "Type error in py dict convert";
198       return PYNATIVE_OP_ATTRS_ERR;
199     }
200     std::string name = py::cast<std::string>(item.first);
201     auto attr_value = PyAttrValue(py::cast<py::object>(item.second));
202     (void)attrs.emplace(name, attr_value);
203   }
204 
205   // build graph
206   GeGraphPtr graph = std::make_shared<GeGraph>(op_exec_info->op_name);
207   if (BuildSingleOpGraph(op_exec_info, inputs, attrs, graph) == false) {
208     MS_LOG(ERROR) << "Failed to BuildSingleOpGraph";
209     return PYNATIVE_GRAPH_GE_BUILD_ERR;
210   }
211 
212   // add the single op graph into the graph manager, which will be iterated by session.
213   transform::Status ret =
214     transform::DfGraphManager::GetInstance().AddGraph(SINGLE_OP_GRAPH, std::shared_ptr<transform::DfGraph>(graph));
215   if (ret != transform::SUCCESS) {
216     MS_LOG(ERROR) << "Failed to AddGraph into graph manager";
217     return PYNATIVE_GRAPH_MANAGER_ERR;
218   }
219 
220   return PYNATIVE_SUCCESS;
221 }
222 
ConvertOutputTensors(const OpExecInfoPtr & op_exec_info,const std::vector<GeTensorPtr> & ge_tensors)223 std::vector<MeTensorPtr> ConvertOutputTensors(const OpExecInfoPtr &op_exec_info,
224                                               const std::vector<GeTensorPtr> &ge_tensors) {
225   std::vector<MeTensorPtr> outputs;
226   MS_EXCEPTION_IF_NULL(op_exec_info);
227   AbstractBasePtr abs_base = op_exec_info->abstract;
228   std::vector<std::vector<int64_t>> shapes;
229   if (abs_base != nullptr && abs_base->isa<abstract::AbstractTensor>()) {
230     auto arg_tensor = dyn_cast<abstract::AbstractTensor>(abs_base);
231     MS_EXCEPTION_IF_NULL(arg_tensor);
232     auto shape = arg_tensor->shape();
233     MS_EXCEPTION_IF_NULL(shape);
234     shapes.emplace_back(shape->shape());
235     outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes);
236     return outputs;
237   }
238   if (abs_base != nullptr && abs_base->isa<abstract::AbstractTuple>()) {
239     auto arg_tuple = dyn_cast<abstract::AbstractTuple>(abs_base);
240     MS_EXCEPTION_IF_NULL(arg_tuple);
241     size_t len = arg_tuple->size();
242 
243     for (size_t i = 0; i < len; i++) {
244       if (arg_tuple->elements()[i]->isa<abstract::AbstractTensor>()) {
245         auto tensor = dyn_cast<abstract::AbstractTensor>(arg_tuple->elements()[i]);
246         MS_EXCEPTION_IF_NULL(tensor);
247         auto shape = tensor->shape();
248         MS_EXCEPTION_IF_NULL(shape);
249         shapes.emplace_back(shape->shape());
250       }
251     }
252     outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes);
253     return outputs;
254   }
255   for (auto &it : ge_tensors) {
256     auto tensor = transform::TransformUtil::ConvertGeTensor(it);
257     if (tensor != nullptr) {
258       outputs.emplace_back(tensor);
259     }
260   }
261   return outputs;
262 }
263 
RunOpInGE(const OpExecInfoPtr & op_exec_info,PynativeStatusCode * status)264 py::object RunOpInGE(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status) {
265   MS_LOG(INFO) << "RunOpInGe start";
266   MS_EXCEPTION_IF_NULL(op_exec_info);
267   MS_EXCEPTION_IF_NULL(status);
268 
269   // returns a null py::tuple on error
270   py::tuple err_ret(0);
271   auto op_name = op_exec_info->op_name;
272   transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true);
273   if (adapter == nullptr) {
274     MS_LOG(ERROR) << "Unable to find GE Adapter for " << ((std::string)py::str(op_name));
275     *status = PYNATIVE_OP_NOT_IMPLEMENTED_ERR;
276     return std::move(err_ret);
277   }
278 
279   std::vector<GeTensorPtr> inputs{};
280   ToTensorPtr(op_exec_info, &inputs);
281   // convert me attr to ge AttrValue
282   PynativeStatusCode ret = ConvertAttributes(op_exec_info, inputs);
283   if (ret != PYNATIVE_SUCCESS) {
284     *status = ret;
285     return std::move(err_ret);
286   }
287   // run graph
288   transform::RunOptions run_options;
289   run_options.name = SINGLE_OP_GRAPH;
290   std::vector<GeTensorPtr> ge_inputs;
291   std::vector<GeTensorPtr> ge_outputs;
292   transform::GraphRunnerOptions graph_runner_options;
293   graph_runner_options.options["ge.trainFlag"] = "1";
294   auto graph_runner = std::make_shared<transform::GraphRunner>(graph_runner_options);
295   transform::Status run_ret;
296   {
297     // Release GIL before calling into (potentially long-running) C++ code
298     py::gil_scoped_release release;
299     run_ret = graph_runner->RunGraph(run_options, ge_inputs, &ge_outputs);
300   }
301   if (run_ret != transform::Status::SUCCESS) {
302     MS_LOG(ERROR) << "GraphRunner fails to run graph";
303     *status = PYNATIVE_GRAPH_GE_RUN_ERR;
304     return std::move(err_ret);
305   }
306 
307   std::vector<MeTensorPtr> graph_outputs = ConvertOutputTensors(op_exec_info, ge_outputs);
308   size_t output_size = graph_outputs.size();
309   py::tuple result(output_size);
310   for (size_t i = 0; i < output_size; i++) {
311     MS_EXCEPTION_IF_NULL(graph_outputs[i]);
312     result[i] = *graph_outputs[i];
313   }
314 
315   *status = PYNATIVE_SUCCESS;
316   MS_LOG(INFO) << "RunOpInGe end";
317   return std::move(result);
318 }
319 }  // namespace pynative
320 }  // namespace mindspore
321