• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "pipeline/jit/pipeline_ge.h"
18 
19 #include <sstream>
20 #include <map>
21 #include <unordered_map>
22 #include <cstdlib>
23 #include <algorithm>
24 
25 #include "debug/anf_ir_dump.h"
26 #include "ir/tensor.h"
27 #include "transform/graph_ir/convert.h"
28 #include "transform/graph_ir/df_graph_manager.h"
29 #include "transform/graph_ir/graph_builder.h"
30 #include "transform/graph_ir/graph_runner.h"
31 #include "debug/draw.h"
32 #include "abstract/abstract_value.h"
33 #include "utils/convert_utils_py.h"
34 
35 namespace mindspore {
36 namespace pipeline {
37 using Tensor = mindspore::tensor::Tensor;
38 using MetaTensor = mindspore::tensor::MetaTensor;
39 using TensorOrderMap = std::map<std::string, std::shared_ptr<Tensor>>;
40 using mindspore::abstract::AbstractTensor;
41 using mindspore::abstract::AbstractTuple;
42 using mindspore::abstract::AbstractTuplePtr;
43 using mindspore::transform::DfGraphConvertor;
44 using mindspore::transform::DfGraphManager;
45 using mindspore::transform::GeTensorPtr;
46 using mindspore::transform::MeTensorPtr;
47 using mindspore::transform::Status;
48 using mindspore::transform::TransformUtil;
49 
DoExecNonInputGraph(const std::string & phase)50 void DoExecNonInputGraph(const std::string &phase) {
51   std::vector<GeTensorPtr> ge_tensors;
52   std::vector<GeTensorPtr> ge_outputs;
53   transform::RunOptions run_options;
54   run_options.name = phase;
55   auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner();
56   if (graph_runner == nullptr) {
57     MS_LOG(ERROR) << "Can not found GraphRunner";
58     return;
59   }
60 
61   {
62     // Release GIL before calling into (potentially long-running) C++ code
63     py::gil_scoped_release release;
64     Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs);
65     if (ret != Status::SUCCESS) {
66       MS_LOG(ERROR) << "Exec graph:" << run_options.name << " failed";
67       return;
68     }
69   }
70 }
71 
SetGeOption(const std::map<std::string,std::string> & options)72 void SetGeOption(const std::map<std::string, std::string> &options) {
73   ConfigManager::GetInstance().set_ge_initialize_options(options);
74 }
75 
CreateSessionAndGraphRunner(bool is_training=true)76 Status CreateSessionAndGraphRunner(bool is_training = true) {
77   std::shared_ptr<ge::Session> sess = DfGraphManager::GetInstance().GetGeSession();
78   if (sess == nullptr) {
79     transform::SessionOptions options;
80     if (is_training) {
81       options["ge.trainFlag"] = "1";
82       options["ge.streamNum"] = "100";
83       options["ge.enabledLocalFmkop"] = "1";
84       options["ge.hcomParallel"] = "1";
85     } else {
86       options["ge.trainFlag"] = "0";
87     }
88 
89     options["ge.enablePrintOpPass"] = "0";
90     sess = transform::GraphRunner::NewSession(options);
91     if (sess == nullptr) {
92       MS_LOG(ERROR) << "Init data graph failed, because of create Ge session failed";
93       return Status::FAILED;
94     } else {
95       DfGraphManager::GetInstance().SetGeSession(sess);
96     }
97   }
98 
99   transform::GraphRunnerOptions options;
100   options.sess_ptr = sess;
101   auto graph_runner = std::make_shared<transform::GraphRunner>(options);
102   if (graph_runner == nullptr) {
103     MS_LOG(ERROR) << "Create new graph runner failed";
104     return Status::FAILED;
105   } else {
106     DfGraphManager::GetInstance().SetGraphRunner(graph_runner);
107   }
108 
109   return Status::SUCCESS;
110 }
111 
InitExecDatasetGe(const std::string & queue_name,int64_t size,int64_t batch_size,const std::vector<TypePtr> & types,const std::vector<std::vector<int64_t>> & shapes,const std::vector<int64_t> & input_indexes,const std::string & phase)112 bool InitExecDatasetGe(const std::string &queue_name, int64_t size, int64_t batch_size,
113                        const std::vector<TypePtr> &types, const std::vector<std::vector<int64_t>> &shapes,
114                        const std::vector<int64_t> &input_indexes, const std::string &phase) {
115   std::vector<int64_t> ge_types;
116   (void)std::transform(types.begin(), types.end(), std::back_inserter(ge_types), [](const TypePtr &i) -> int64_t {
117     return transform::TransformUtil::ConvertDataType(i->type_id());
118   });
119 
120   ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_SINK_MODE);
121   ConfigManager::GetInstance().set_iter_num(size);
122   ConfigManager::GetInstance().set_dataset_phase(phase);
123 
124   DatasetGraphParam param(queue_name, size, batch_size, ge_types, shapes, input_indexes);
125   ConfigManager::GetInstance().set_dataset_param(param);
126 
127   if (transform::BuildDatasetGraph(param, phase) != transform::SUCCESS) {
128     MS_LOG(ERROR) << "Build dateset graph failed.";
129     return false;
130   }
131 
132 #if ENABLE_TRAIN
133   (void)setenv("GE_TRAIN", "1", 1);
134 #else
135   (void)setenv("GE_TRAIN", "0", 1);
136 #endif
137 
138   if (CreateSessionAndGraphRunner(static_cast<bool>(ENABLE_TRAIN)) != Status::SUCCESS) {
139     MS_LOG(ERROR) << "Create GE Session or GraphRunner failed.";
140     return false;
141   }
142 
143   MS_LOG(INFO) << "DoExecNonInputGraph:" << phase;
144   DoExecNonInputGraph(phase);
145 
146   return true;
147 }
148 
ConvertObjectToTensors(const py::dict & dict,TensorOrderMap * const tensors)149 void ConvertObjectToTensors(const py::dict &dict, TensorOrderMap *const tensors) {
150   for (auto item : dict) {
151     if ((!py::isinstance<py::str>(item.first))) {
152       MS_LOG(WARNING) << "Type of key of py_dict is not string, ignore it.";
153       continue;
154     }
155     std::shared_ptr<Tensor> tensor;
156     std::string name = py::cast<std::string>(item.first);
157     if (py::isinstance<py::float_>(item.second.attr("data"))) {
158       // convert float to tensor with shape([1])
159       tensor = std::make_shared<Tensor>(kNumberTypeFloat32, std::vector<int64_t>({1}));
160       *(static_cast<float *>(tensor->data_c())) = py::cast<float>(item.second.attr("data"));
161     } else if (py::isinstance<py::int_>(item.second.attr("data"))) {
162       // convert int64_t to tensor with shape([1])
163       tensor = std::make_shared<Tensor>(kNumberTypeInt32, std::vector<int64_t>({1}));
164       *(static_cast<float *>(tensor->data_c())) = py::cast<float>(item.second.attr("data"));
165     } else if (py::isinstance<Tensor>(item.second.attr("data"))) {
166       // cast tensor
167       tensor = py::cast<std::shared_ptr<Tensor>>(item.second.attr("data"));
168     }
169 
170     if (tensor == nullptr) {
171       MS_LOG(EXCEPTION) << "Get default value for " << name << " failed";
172     }
173     (void)tensors->emplace(name, tensor);
174   }
175 }
176 
AddDFGraph(const std::map<std::string,ExecutorInfoPtr> & info,const py::dict & init_params,const std::string & phase,const py::object & broadcast_params)177 bool AddDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const py::dict &init_params,
178                 const std::string &phase, const py::object &broadcast_params) {
179   FuncGraphPtr anf_graph = info.at(phase)->func_graph;
180   DfGraphConvertor converter(anf_graph);
181 
182   size_t pos = phase.find('.');
183   std::string net_id = ((pos == std::string::npos || pos == phase.size() - 1) ? phase : phase.substr(pos + 1));
184   std::string phase_prefix = phase.substr(0, pos);
185   if (phase_prefix == "export") {
186     MS_LOG(INFO) << "Set DfGraphConvertor training : false";
187     converter.set_training(false);
188   }
189 
190   TensorOrderMap init_tensors{};
191   ConvertObjectToTensors(init_params, &init_tensors);
192   (void)converter.ConvertAllNode().InitParam(init_tensors).BuildGraph();
193 
194   if (!broadcast_params.is_none()) {
195     if (!py::isinstance<py::dict>(broadcast_params)) {
196       MS_LOG(ERROR) << "Invalid broadcast params, it must be py::dict type";
197       return false;
198     }
199     py::dict broadcast = broadcast_params.cast<py::dict>();
200     if (broadcast.empty()) {
201       (void)converter.GenerateBroadcastGraph(init_tensors);
202     } else {
203       TensorOrderMap broadcast_tensors{};
204       ConvertObjectToTensors(broadcast, &broadcast_tensors);
205       (void)converter.GenerateBroadcastGraph(broadcast_tensors);
206     }
207     MS_LOG(INFO) << "Generate broadcast graph with params and broadcast_empty is " << broadcast.empty();
208   }
209 
210   (void)converter.GenerateCheckpointGraph();
211   if (converter.ErrCode() != 0) {
212     DfGraphManager::GetInstance().ClearGraph();
213     MS_LOG(ERROR) << "Convert df graph failed, err:" << converter.ErrCode();
214     return false;
215   }
216 #ifdef ENABLE_DUMP_IR
217   if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
218     converter.DrawComputeGraph(GetSaveGraphsPathName("ge_graph.dot"));                      // for debug
219     converter.DrawInitGraph(GetSaveGraphsPathName("init_graph.dot"));                       // for debug
220     converter.DrawSaveCheckpointGraph(GetSaveGraphsPathName("save_checkpoint_graph.dot"));  // for debug
221   }
222 #endif
223   std::string init_graph = "init_subgraph." + net_id;
224   std::string checkpoint_name = "save." + net_id;
225   if (phase.find("train") != std::string::npos) {
226     (void)DfGraphManager::GetInstance().AddGraph(phase, converter.GetComputeGraph(), {{"ge.exec.variable_acc", "1"}});
227   } else {
228     (void)DfGraphManager::GetInstance().AddGraph(phase, converter.GetComputeGraph());
229   }
230   (void)DfGraphManager::GetInstance().AddGraph(init_graph, converter.GetInitGraph());
231   (void)DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, converter.GetBroadcastGraph());
232 
233   Status ret = DfGraphManager::GetInstance().AddGraph(checkpoint_name, converter.GetSaveCheckpointGraph());
234   if (ret == Status::SUCCESS) {
235     DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph);
236   }
237 
238   return true;
239 }
240 
BuildDFGraph(const std::map<std::string,ExecutorInfoPtr> & info,const py::dict & init_params,const std::string & phase,const py::object & broadcast_params)241 FuncGraphPtr BuildDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const py::dict &init_params,
242                           const std::string &phase, const py::object &broadcast_params) {
243   if (info.count(phase) == 0) {
244     MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase);
245   }
246   FuncGraphPtr anf_graph = info.at(phase)->func_graph;
247 #ifdef ENABLE_DUMP_IR
248   if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
249     draw::Draw("anf_graph.dot", anf_graph);  // for debug
250     DumpIR("anf_graph.ir", anf_graph, true);
251   }
252 #endif
253 
254   if (!AddDFGraph(info, init_params, phase, broadcast_params)) {
255     MS_LOG(ERROR) << "GenConvertor failed";
256     return nullptr;
257   }
258 
259 #if ENABLE_TRAIN
260   (void)setenv("GE_TRAIN", "1", 1);
261 #else
262   (void)setenv("GE_TRAIN", "0", 1);
263 #endif
264 
265   if (CreateSessionAndGraphRunner(static_cast<bool>(ENABLE_TRAIN)) != Status::SUCCESS) {
266     MS_LOG(ERROR) << "Create GE Session or GraphRunner failed.";
267     return nullptr;
268   }
269 
270   return anf_graph;
271 }
272 
RunGEInitGraph(const py::dict & init_params,const std::string & phase)273 void RunGEInitGraph(const py::dict &init_params, const std::string &phase) {
274   MS_LOG(DEBUG) << "ExecInitGraph start.";
275   TensorOrderMap inputs_with_name{};
276   ConvertObjectToTensors(init_params, &inputs_with_name);
277   std::vector<tensor::TensorPtr> inputs;
278   (void)std::transform(inputs_with_name.begin(), inputs_with_name.end(), std::back_inserter(inputs),
279                        [](const std::pair<std::string, tensor::TensorPtr> &item) { return item.second; });
280 
281   std::vector<GeTensorPtr> ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW);
282   if (ge_tensors.size() != inputs.size()) {
283     MS_LOG(ERROR) << "Args convert to ge tensor error.";
284     return;
285   }
286   MS_LOG(DEBUG) << "Run graph begin, inputs size is: " << inputs.size() << ".";
287 
288   std::vector<GeTensorPtr> ge_outputs;
289   transform::RunOptions run_options;
290 
291   run_options.name = phase;
292   if (DfGraphManager::GetInstance().GetGraphByName(phase) == nullptr) {
293     MS_LOG(WARNING) << "Can not find " << phase << " sub graph, don't need data init subgraph in INFER mode.";
294     return;
295   }
296   auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner();
297   if (graph_runner == nullptr) {
298     MS_LOG(EXCEPTION) << "Can not found GraphRunner.";
299   }
300   {
301     // Release GIL before calling into (potentially long-running) C++ code
302     py::gil_scoped_release release;
303     Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs);
304     if (ret != Status::SUCCESS) {
305       MS_LOG(EXCEPTION) << "Exec " << phase << " graph failed.";
306     }
307 
308     MS_LOG(INFO) << "Exec " << phase << " graph success.";
309 
310     if ((ConfigManager::GetInstance().parallel_strategy() == ParallelStrategy::DISTRIBUTION) &&
311         (DfGraphManager::GetInstance().GetGraphByName(BROADCAST_GRAPH_NAME) != nullptr)) {
312       run_options.name = BROADCAST_GRAPH_NAME;
313       ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs);
314       if (ret != Status::SUCCESS) {
315         MS_LOG(EXCEPTION) << "Exec BROADCAST_GRAPH_NAME failed.";
316       }
317       MS_LOG(INFO) << "Exec broadcast graph success.";
318     }
319   }
320 }
321 
ExtractGeneralCnodeRet(const AbstractBasePtr & cnode_data,const py::tuple & data,size_t * count)322 py::object ExtractGeneralCnodeRet(const AbstractBasePtr &cnode_data, const py::tuple &data, size_t *count) {
323   MS_EXCEPTION_IF_NULL(cnode_data);
324 
325   if (cnode_data->isa<AbstractTensor>()) {
326     if (*count >= data.size()) {
327       MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size()
328                         << " less than the number of elements required. ";
329     }
330 
331     BaseShapePtr shape = cnode_data->BuildShape();
332     if (!shape->isa<abstract::Shape>()) {
333       MS_LOG(EXCEPTION) << "The shape of the tensor derived is not Shape, is " << shape->ToString();
334     }
335     auto shape_me = shape->cast<abstract::ShapePtr>()->shape();
336     auto shape_ge = py::cast<Tensor &>(data[*count]).shape();
337     if (shape_ge != shape_me) {
338       MS_LOG(EXCEPTION) << "The shape of the " << *count << "th tensor returned: " << shape_ge
339                         << " is not the same as the shape of the tensor derived: " << shape_me;
340     }
341 
342     return data[(*count)++];
343   }
344 
345   if (!cnode_data->isa<AbstractTuple>()) {
346     MS_LOG(EXCEPTION) << "The output of operator in the final anf graph could "
347                       << "only be a tensor or a tuple of tensor, but got " << cnode_data->BuildValue()->ToString()
348                       << ".";
349   }
350   auto data_tp = cnode_data->cast<AbstractTuplePtr>();
351   auto elements = data_tp->elements();
352   size_t size = data_tp->size();
353   auto tp = py::tuple(size);
354   for (size_t i = 0; i < size; i++) {
355     tp[i] = ExtractGeneralCnodeRet(elements[i], data, count);
356   }
357   return std::move(tp);
358 }
359 
StructureOutput(const AnfNodePtr & output_node,const py::tuple & data,size_t * count)360 py::object StructureOutput(const AnfNodePtr &output_node, const py::tuple &data, size_t *count) {
361   MS_EXCEPTION_IF_NULL(output_node);
362 
363   if (output_node->isa<ValueNode>()) {
364     return ValueToPyData(GetValueNode(output_node));
365   }
366 
367   if (output_node->isa<Parameter>()) {
368     if (*count >= data.size()) {
369       MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size()
370                         << " less than the number of elements required. ";
371     }
372     return data[(*count)++];
373   }
374 
375   auto output_c = output_node->cast<CNodePtr>();
376   if (output_c == nullptr) {
377     MS_LOG(EXCEPTION) << "The final anf graph could only have constant, parameter, and operator, but got "
378                       << output_node->ToString();
379   }
380 
381   if (output_c->IsApply(prim::kPrimMakeTuple)) {
382     auto input_list = output_c->inputs();
383     size_t size = input_list.size();
384     auto tp = py::tuple(size - 1);
385     for (size_t i = 1; i < size; i++) {
386       tp[i - 1] = StructureOutput(input_list[i], data, count);
387     }
388     return std::move(tp);
389   }
390   if (output_c->IsApply(prim::kPrimDepend)) {
391     return StructureOutput(output_c->input(1), data, count);
392   }
393 
394   return ExtractGeneralCnodeRet(output_c->abstract(), data, count);
395 }
396 
DoExecGraph(const FuncGraphPtr & graph,const std::vector<MeTensorPtr> & inputs,const std::string & phase)397 std::shared_ptr<py::object> DoExecGraph(const FuncGraphPtr &graph, const std::vector<MeTensorPtr> &inputs,
398                                         const std::string &phase) {
399   std::vector<GeTensorPtr> ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW);
400   if (ge_tensors.size() != inputs.size()) {
401     MS_LOG(EXCEPTION) << "Convert me args to ge tensor error.";
402   }
403 
404   std::vector<GeTensorPtr> ge_outputs;
405   transform::RunOptions run_options;
406   run_options.name = phase;
407   auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner();
408   if (graph_runner == nullptr) {
409     MS_LOG(EXCEPTION) << "Can not found GraphRunner.";
410   }
411 
412   {
413     // Release GIL before calling into (potentially long-running) C++ code
414     py::gil_scoped_release release;
415     MS_LOG(DEBUG) << "Run graph begin, inputs size is: " << inputs.size();
416     Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs);
417     MS_LOG(DEBUG) << "Run graph finish, outputs size is: " << ge_outputs.size();
418     if (ret != Status::SUCCESS) {
419       MS_LOG(ERROR) << "Exec graph failed";
420       return nullptr;
421     }
422   }
423 
424   std::vector<MeTensorPtr> me_outputs = TransformUtil::ConvertGeTensors(ge_outputs);
425   if (me_outputs.size() != ge_outputs.size()) {
426     MS_LOG(WARNING) << "Convert output Ge tensor to Me tensor failed";
427   }
428 
429   py::tuple outputs(me_outputs.size());
430   for (std::size_t i = 0; i < outputs.size(); i++) {
431     outputs[i] = *me_outputs[i];
432   }
433 
434   std::shared_ptr<py::object> ret = nullptr;
435 
436   AnfNodePtr output_node = graph->get_return()->input(1);
437   MS_EXCEPTION_IF_NULL(output_node);
438   size_t count = 0;
439   py::object oj = StructureOutput(output_node, outputs, &count);
440   ret = std::make_shared<py::object>(oj);
441 
442   return ret;
443 }
444 
ProcessGeArg(const std::map<std::string,ExecutorInfoPtr> & info,const py::tuple & args,const std::string & phase,std::vector<tensor::TensorPtr> * inputs)445 void ProcessGeArg(const std::map<std::string, ExecutorInfoPtr> &info, const py::tuple &args, const std::string &phase,
446                   std::vector<tensor::TensorPtr> *inputs) {
447   // check the arg and use the GraphExecutorPy args
448   std::size_t size = args.size();
449 
450   if (info.count(phase) == 0) {
451     MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase);
452   }
453 
454   auto arg_size = info.at(phase)->arg_list_size;
455   if (size != arg_size) {
456     MS_LOG(EXCEPTION) << "The real arg num : size = " << size << ". graph_arg_size = " << arg_size;
457   }
458 
459   // process the first args of tensor
460   // only in dataset normal(non-sink) mode, fp_bp graph need input tensors
461   if (ConfigManager::GetInstance().dataset_mode() == DS_NORMAL_MODE) {
462     for (std::size_t i = 0; i < size; i++) {
463       ValuePtr converted = nullptr;
464       bool succ = parse::ConvertData(args[i], &converted);
465       if (!succ) {
466         MS_LOG(EXCEPTION) << "The " << i << "th arg convert failed.";
467       }
468       if (converted->isa<tensor::Tensor>()) {
469         inputs->push_back(converted->cast<tensor::TensorPtr>());
470       } else {
471         MS_EXCEPTION(TypeError) << "The " << i << "th arg: " << converted->ToString() << " is not tensor.";
472       }
473     }
474   }
475 }
476 
ExecDFGraph(const std::map<std::string,ExecutorInfoPtr> & info,const py::tuple & args,const std::string & phase)477 py::object ExecDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const py::tuple &args,
478                        const std::string &phase) {
479   std::string phase_prefix = GetPhasePrefix(phase);
480   if (phase_prefix == "save") {
481     DoExecNonInputGraph(phase);
482     ConfigManager::GetInstance().ResetConfig();
483     return py::none();
484   }
485 
486   if (info.count(phase) == 0) {
487     MS_LOG(EXCEPTION) << "There is no phase:" << phase;
488   }
489   FuncGraphPtr anf_graph = info.at(phase)->func_graph;
490 
491 #ifdef ENABLE_INFER
492   // Now don't use the graph because the exec ge function don't take effect
493   MS_EXCEPTION_IF_NULL(info.at(phase)->func_graph);
494   if (ENABLE_TRAIN != info.at(phase)->func_graph->has_flag("training")) {
495     MS_LOG(ERROR) << "Graph training mode mismatch mode of libraries";
496     ConfigManager::GetInstance().ResetConfig();
497     return py::none();
498   }
499 #endif
500 
501   std::shared_ptr<py::object> ret_val = std::make_shared<py::object>();
502   // We will not execute graph when output is constant or just input itself.
503   if (IsGraphOutputValueNodeOrParameter(info.at(phase)->func_graph->output(), args, ret_val)) {
504     ConfigManager::GetInstance().ResetConfig();
505     return *ret_val;
506   }
507 
508   std::vector<tensor::TensorPtr> inputs;
509   ProcessGeArg(info, args, phase, &inputs);
510 
511   std::shared_ptr<py::object> ret = DoExecGraph(anf_graph, inputs, phase);
512   ConfigManager::GetInstance().ResetConfig();
513   if (ret != nullptr) {
514     return *ret;
515   } else {
516     MS_LOG(EXCEPTION) << "Exec graph failed";
517   }
518 }
519 
ExportDFGraph(const std::string & file_name,const std::string & phase)520 void ExportDFGraph(const std::string &file_name, const std::string &phase) {
521   MS_LOG(DEBUG) << "Export graph begin.";
522   transform::DfGraphWrapperPtr wrap_ptr = DfGraphManager::GetInstance().GetGraphByName(phase);
523   if (wrap_ptr == nullptr) {
524     MS_LOG(ERROR) << "Get graph form DfGraphManager failed!";
525     return;
526   }
527 
528   transform::DfGraphPtr ge_graph = wrap_ptr->graph_ptr_;
529   if (ge_graph == nullptr) {
530     MS_LOG(ERROR) << "Graph is null!";
531     return;
532   }
533 
534   if (ge_graph->SaveToFile(file_name) != 0) {
535     MS_LOG(EXCEPTION) << "Export air model failed.";
536   }
537   MS_LOG(INFO) << "Export air model finish.";
538 }
539 }  // namespace pipeline
540 }  // namespace mindspore
541