• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019-2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "backend/session/gpu_session.h"
17 
18 #include <string>
19 #include <utility>
20 #include "backend/optimizer/common/helper.h"
21 #include "backend/optimizer/common/optimizer.h"
22 #include "backend/optimizer/common/pass_manager.h"
23 #include "backend/optimizer/common/common_backend_optimization.h"
24 #include "backend/optimizer/gpu/adam_weight_decay_fusion.h"
25 #include "backend/optimizer/gpu/adam_fusion.h"
26 #include "backend/optimizer/gpu/apply_momentum_weight_scale_fusion.h"
27 #include "backend/optimizer/gpu/apply_momentum_scale_fusion.h"
28 #include "backend/optimizer/gpu/apply_momentum_weight_fusion.h"
29 #include "backend/optimizer/gpu/batch_norm_relu_fusion.h"
30 #include "backend/optimizer/gpu/batch_norm_relu_grad_fusion.h"
31 #include "backend/optimizer/gpu/batch_norm_add_relu_fusion.h"
32 #include "backend/optimizer/gpu/post_batch_norm_add_relu_fusion.h"
33 #include "backend/optimizer/gpu/batch_norm_add_relu_grad_fusion.h"
34 #include "backend/optimizer/gpu/combine_momentum_fusion.h"
35 #include "backend/optimizer/gpu/combine_cast_fusion.h"
36 #include "backend/optimizer/gpu/cudnn_inplace_fusion.h"
37 #include "backend/optimizer/gpu/insert_format_transform_op.h"
38 #include "backend/optimizer/gpu/replace_momentum_cast_fusion.h"
39 #include "backend/optimizer/gpu/replace_addn_fusion.h"
40 #include "backend/optimizer/gpu/print_reduce_fusion.h"
41 #include "backend/optimizer/gpu/bce_with_logits_loss_fusion.h"
42 #include "backend/optimizer/gpu/remove_format_transform_pair.h"
43 #include "backend/optimizer/gpu/remove_redundant_format_transform.h"
44 #include "backend/optimizer/gpu/reduce_precision_fusion.h"
45 #include "backend/optimizer/gpu/insert_cast_gpu.h"
46 #include "backend/optimizer/gpu/relu_v2_pass.h"
47 #include "backend/optimizer/gpu/add_relu_v2_fusion.h"
48 #include "backend/optimizer/gpu/add_relu_grad_v2_fusion.h"
49 #include "backend/optimizer/gpu/matmul_biasadd_fusion.h"
50 #if ENABLE_GPU_INFER
51 #include "backend/optimizer/trt_pass/graph_converter.h"
52 #endif
53 #include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
54 #include "backend/optimizer/pass/communication_op_fusion.h"
55 #include "backend/optimizer/gpu/concat_outputs_for_all_gather.h"
56 #include "backend/optimizer/pass/getitem_tuple.h"
57 #include "backend/optimizer/pass/optimize_updatestate.h"
58 #include "common/trans.h"
59 #include "debug/anf_ir_dump.h"
60 #include "debug/dump_proto.h"
61 #ifdef ENABLE_DEBUGGER
62 #include "debug/data_dump/e2e_dump.h"
63 #include "debug/data_dump/dump_json_parser.h"
64 #include "debug/debugger/proto_exporter.h"
65 #include "debug/data_dump/dump_utils.h"
66 #include "debug/tensor_load.h"
67 #else
68 #include "debug/debugger/proto_exporter_stub.h"
69 #endif
70 #include "runtime/device/gpu/gpu_kernel_build.h"
71 #include "runtime/device/gpu/gpu_kernel_runtime.h"
72 #include "runtime/device/gpu/gpu_stream_assign.h"
73 #include "runtime/device/gpu/kernel_info_setter.h"
74 #include "runtime/device/kernel_runtime_manager.h"
75 #include "runtime/device/gpu/cuda_driver.h"
76 #include "runtime/device/gpu/distribution/collective_init.h"
77 #include "runtime/device/gpu/gpu_bucket.h"
78 #include "runtime/device/gpu/gpu_device_address.h"
79 #include "utils/ms_utils.h"
80 #include "utils/config_manager.h"
81 #include "utils/ms_context.h"
82 #include "utils/context/graph_kernel_flags.h"
83 #include "utils/utils.h"
84 #include "abstract/utils.h"
85 #if ENABLE_CPU && ENABLE_GPU
86 #include "ps/util.h"
87 #include "ps/ps_cache/ps_cache_manager.h"
88 #endif
89 #ifdef ENABLE_DUMP_IR
90 #include "debug/rdr/running_data_recorder.h"
91 #endif
92 
93 namespace mindspore {
94 namespace session {
95 namespace gpu {
96 using AnfAlgo = mindspore::session::AnfRuntimeAlgorithm;
97 using CollectiveInitializer = device::gpu::CollectiveInitializer;
98 using GetLocalRankId = device::gpu::GetLocalRankId;
99 using InitNCCLComm = device::gpu::InitNCCLComm;
100 
Init(uint32_t device_id)101 void GPUSession::Init(uint32_t device_id) {
102   const void *collective_handle_ = CollectiveInitializer::instance().collective_handle();
103   bool collective_inited = CollectiveInitializer::instance().collective_inited();
104   if (collective_inited && collective_handle_ != nullptr) {
105     auto get_local_rank_funcptr =
106       reinterpret_cast<GetLocalRankId>(dlsym(const_cast<void *>(collective_handle_), "local_rank_id"));
107     MS_EXCEPTION_IF_NULL(get_local_rank_funcptr);
108     device_id = IntToUint((*get_local_rank_funcptr)());
109   }
110   bool ret = device::gpu::CudaDriver::SetDevice(UintToInt(device_id));
111   if (!ret) {
112     MS_LOG(EXCEPTION) << "GPUSession failed to set current device id:" << device_id;
113   }
114   auto ms_context = MsContext::GetInstance();
115   MS_EXCEPTION_IF_NULL(ms_context);
116   ms_context->set_param<uint32_t>(MS_CTX_DEVICE_ID, device_id);
117   if (collective_inited) {
118     if (collective_handle_ != nullptr) {
119       auto init_nccl_comm_funcptr =
120         reinterpret_cast<InitNCCLComm>(dlsym(const_cast<void *>(collective_handle_), "InitNCCLComm"));
121       MS_EXCEPTION_IF_NULL(init_nccl_comm_funcptr);
122       (*init_nccl_comm_funcptr)();
123       rank_id_ = GetRankId();
124     }
125   }
126 #ifndef ENABLE_SECURITY
127   auto &json_parser = DumpJsonParser::GetInstance();
128   // Dump json config file if dump is enabled
129   json_parser.CopyDumpJsonToDir(rank_id_);
130   json_parser.CopyMSCfgJsonToDir(rank_id_);
131 #endif
132   MS_LOG(INFO) << "Set device id " << device_id << " for gpu session.";
133   InitExecutor(kGPUDevice, device_id);
134 }
135 
SelectKernel(const std::shared_ptr<KernelGraph> & kernel_graph) const136 void GPUSession::SelectKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
137   MS_EXCEPTION_IF_NULL(kernel_graph);
138   device::gpu::FormatTransformChecker::GetInstance().CheckSupportFormatTransform(kernel_graph);
139   for (const auto &kernel_node : kernel_graph->execution_order()) {
140     MS_EXCEPTION_IF_NULL(kernel_node);
141     device::gpu::SetKernelInfo(kernel_node);
142   }
143 }
144 
StartKernelRT() const145 void GPUSession::StartKernelRT() const {
146   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
147   MS_EXCEPTION_IF_NULL(runtime_instance);
148   if (!runtime_instance->Init()) {
149     MS_LOG(EXCEPTION) << "GPU start kernel runtime failed";
150   }
151 }
152 
Optimize(const std::shared_ptr<KernelGraph> & kernel_graph)153 void GPUSession::Optimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
154   MS_EXCEPTION_IF_NULL(kernel_graph);
155   auto optimizer = std::make_shared<opt::GraphOptimizer>();
156   auto pm = std::make_shared<opt::PassManager>();
157 #if ENABLE_GPU_INFER
158   pm->AddPass(std::make_shared<opt::GraphConverter>());
159 #endif
160   pm->AddPass(std::make_shared<opt::MatMulBiasAddFusion>());
161   pm->AddPass(std::make_shared<opt::AdamWeightDecayFusion>());
162   pm->AddPass(std::make_shared<opt::AdamFusion>());
163   pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayScaleFusion>());
164   pm->AddPass(std::make_shared<opt::ApplyMomentumScaleFusion>());
165   pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayFusion>());
166   if (!context::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
167     pm->AddPass(std::make_shared<opt::CastAllFusion>("cast_all"));
168   }
169   pm->AddPass(std::make_shared<opt::CombineMomentumFusion>("combine_momentum"));
170   pm->AddPass(std::make_shared<opt::ReplaceMomentumCastFusion>());
171   pm->AddPass(std::make_shared<opt::ReplaceAddNFusion>());
172   pm->AddPass(std::make_shared<opt::PrintReduceFusion>("print_reduce"));
173   pm->AddPass(std::make_shared<opt::BCEWithLogitsLossFusion>());
174   pm->AddPass(std::make_shared<opt::InsertCastGPU>("insert_cast_gpu"));
175   optimizer->AddPassManager(pm);
176   (void)optimizer->Optimize(kernel_graph);
177   kernel_graph->SetExecOrderByDefault();
178 }
179 
HardwareOptimize(const std::shared_ptr<KernelGraph> & kernel_graph)180 void GPUSession::HardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
181   MS_EXCEPTION_IF_NULL(kernel_graph);
182   auto optimizer = std::make_shared<opt::GraphOptimizer>();
183   auto pm = std::make_shared<opt::PassManager>();
184   pm->AddPass(std::make_shared<opt::BatchNormReluFusion>());
185   pm->AddPass(std::make_shared<opt::BatchNormReluGradFusion>());
186   pm->AddPass(std::make_shared<opt::BatchNormAddReluFusion>());
187   pm->AddPass(std::make_shared<opt::PostBatchNormAddReluFusion>());
188   pm->AddPass(std::make_shared<opt::BatchNormAddReluGradFusion>());
189   pm->AddPass(std::make_shared<opt::InsertFormatTransformOp>());
190   pm->AddPass(std::make_shared<opt::RemoveFormatTransformPair>());
191   pm->AddPass(std::make_shared<opt::RemoveRedundantFormatTransform>());
192   // Remove node only used by UpdateState, in order to ensure the correct execution sequence in CudnnInplaceAggregate.
193   pm->AddPass(std::make_shared<opt::OptimizeUpdateState>());
194   pm->AddPass(std::make_shared<opt::CudnnInplaceAggregate>());
195   pm->AddPass(std::make_shared<opt::ReluV2Pass>());
196   pm->AddPass(std::make_shared<opt::AddReluV2Fusion>());
197   pm->AddPass(std::make_shared<opt::AddReluGradV2Fusion>());
198   pm->AddPass(std::make_shared<opt::AllReduceFusion>());
199   pm->AddPass(std::make_shared<opt::AllGatherFusion>());
200   pm->AddPass(std::make_shared<opt::ConcatOutputsForAllGather>());
201   pm->AddPass(std::make_shared<opt::GetitemTuple>());
202   pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
203   optimizer->AddPassManager(pm);
204   (void)optimizer->Optimize(kernel_graph);
205   kernel_graph->SetExecOrderByDefault();
206 }
207 
RunOpOptimize(const std::shared_ptr<KernelGraph> & kernel_graph)208 void GPUSession::RunOpOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
209   MS_EXCEPTION_IF_NULL(kernel_graph);
210   auto optimizer = std::make_shared<opt::GraphOptimizer>();
211   auto pm = std::make_shared<opt::PassManager>();
212   pm->AddPass(std::make_shared<opt::BCEWithLogitsLossFusion>());
213   pm->AddPass(std::make_shared<opt::InsertCastGPU>("insert_cast_gpu"));
214   optimizer->AddPassManager(pm);
215   (void)optimizer->Optimize(kernel_graph);
216   kernel_graph->SetExecOrderByDefault();
217 }
218 
RunOpHardwareOptimize(const std::shared_ptr<KernelGraph> & kernel_graph)219 void GPUSession::RunOpHardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
220   MS_EXCEPTION_IF_NULL(kernel_graph);
221   auto optimizer = std::make_shared<opt::GraphOptimizer>();
222   auto pm = std::make_shared<opt::PassManager>();
223   pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
224   optimizer->AddPassManager(pm);
225   (void)optimizer->Optimize(kernel_graph);
226   kernel_graph->SetExecOrderByDefault();
227 }
228 
GraphKernelOptimize(const std::shared_ptr<KernelGraph> & kernel_graph)229 void GPUSession::GraphKernelOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
230   if (!context::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
231     return;
232   }
233   opt::GraphKernelOptimize(kernel_graph);
234   kernel_graph->SetExecOrderByDefault();
235 }
236 
AssignStream(const std::shared_ptr<KernelGraph> & kernel_graph)237 void GPUSession::AssignStream(const std::shared_ptr<KernelGraph> &kernel_graph) {
238   MS_EXCEPTION_IF_NULL(kernel_graph);
239   device::gpu::AssignGpuStream(kernel_graph);
240 }
241 
BuildKernel(const std::shared_ptr<KernelGraph> & kernel_graph) const242 void GPUSession::BuildKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
243   auto kernels = kernel_graph->execution_order();
244   device::gpu::CreateGPUKernel(kernels);
245 }
246 
AllocateMemory(const KernelGraph * kernel_graph) const247 void GPUSession::AllocateMemory(const KernelGraph *kernel_graph) const {
248   MS_EXCEPTION_IF_NULL(kernel_graph);
249   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
250   MS_EXCEPTION_IF_NULL(runtime_instance);
251   runtime_instance->AssignMemory(*kernel_graph);
252 }
253 
RunOpAllocateMemory(const std::vector<tensor::TensorPtr> & input_tensors,const KernelGraph * kernel_graph) const254 void GPUSession::RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors,
255                                      const KernelGraph *kernel_graph) const {
256   MS_EXCEPTION_IF_NULL(kernel_graph);
257   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
258   MS_EXCEPTION_IF_NULL(runtime_instance);
259   runtime_instance->RunOpAssignMemory(input_tensors, *kernel_graph);
260 }
261 
RunOpGenKernelEvent(const KernelGraph * graph) const262 void GPUSession::RunOpGenKernelEvent(const KernelGraph *graph) const {
263   MS_EXCEPTION_IF_NULL(graph);
264   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
265   MS_EXCEPTION_IF_NULL(runtime_instance);
266   runtime_instance->GenKernelEvents(*graph);
267 }
268 
RunOpClearMemory(const KernelGraph * kernel_graph) const269 void GPUSession::RunOpClearMemory(const KernelGraph *kernel_graph) const {
270   MS_EXCEPTION_IF_NULL(kernel_graph);
271   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
272   MS_EXCEPTION_IF_NULL(runtime_instance);
273   runtime_instance->RunOpClearMemory(*kernel_graph);
274 }
275 
276 namespace {
277 constexpr auto kAssignInputSize = 3;
278 constexpr auto kAssignUpdateIndex = 1;
UpdatedByAssign(const KernelGraphPtr & kernel_graph,const AnfNodePtr & node)279 bool UpdatedByAssign(const KernelGraphPtr &kernel_graph, const AnfNodePtr &node) {
280   MS_EXCEPTION_IF_NULL(kernel_graph);
281   auto manager = kernel_graph->manager();
282   if (manager == nullptr) {
283     return false;
284   }
285   auto &node_users = manager->node_users();
286   auto iter = node_users.find(node);
287   if (iter == node_users.end()) {
288     return false;
289   }
290   auto &users = iter->second;
291   return std::any_of(users.begin(), users.end(), [](const std::pair<AnfNodePtr, int64_t> &user) {
292     MS_EXCEPTION_IF_NULL(user.first);
293     auto output_cnode = user.first->cast<CNodePtr>();
294     return output_cnode != nullptr && IsPrimitiveCNode(output_cnode, prim::kPrimAssign) &&
295            user.second == kAssignUpdateIndex && output_cnode->inputs().size() > kAssignInputSize;
296   });
297 }
298 
UpdateGraphInputAbstract(const AnfNodePtr input_node,const tensor::TensorPtr tensor)299 size_t UpdateGraphInputAbstract(const AnfNodePtr input_node, const tensor::TensorPtr tensor) {
300   MS_EXCEPTION_IF_NULL(input_node);
301   MS_EXCEPTION_IF_NULL(tensor);
302   size_t size = LongToSize(tensor->data().nbytes());
303   if (!input_node->isa<Parameter>()) {
304     return size;
305   }
306   auto input_param = input_node->cast<ParameterPtr>();
307   if (input_param != nullptr && input_param->has_dynamic_shape()) {
308     auto tensor_shape = tensor->shape();
309     std::vector<size_t> shape_tmp;
310     (void)std::transform(tensor_shape.begin(), tensor_shape.end(), std::back_inserter(shape_tmp), IntToSize);
311     AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(input_node, 0)}, {shape_tmp},
312                                         input_node.get());
313     size = abstract::ShapeSize(shape_tmp) * abstract::TypeIdSize(tensor->data_type());
314   }
315   return size;
316 }
317 }  // namespace
318 
LoadInputData(const std::shared_ptr<KernelGraph> & kernel_graph,const std::vector<tensor::TensorPtr> & inputs_const) const319 void GPUSession::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
320                                const std::vector<tensor::TensorPtr> &inputs_const) const {
321   std::vector<tensor::TensorPtr> inputs(inputs_const);
322   MS_EXCEPTION_IF_NULL(kernel_graph);
323   auto &input_nodes = kernel_graph->input_nodes();
324   auto ms_context = MsContext::GetInstance();
325   MS_EXCEPTION_IF_NULL(ms_context);
326   if (inputs.size() != input_nodes.size()) {
327     MS_LOG(EXCEPTION) << "Tensor input:" << inputs.size() << " is not equal graph inputs:" << input_nodes.size();
328   }
329   for (size_t i = 0; i < inputs.size(); ++i) {
330     auto tensor = inputs[i];
331     MS_EXCEPTION_IF_NULL(tensor);
332     auto input_node = input_nodes[i];
333     MS_EXCEPTION_IF_NULL(input_node);
334     if (input_node->isa<Parameter>() && AnfAlgo::OutputAddrExist(input_node, 0)) {
335 #if ENABLE_CPU && ENABLE_GPU
336       const std::string &param_name = input_node->fullname_with_scope();
337       if (ps::ps_cache_instance.IsHashTable(param_name)) {
338         continue;
339       }
340 #endif
341       auto pk_node = input_node->cast<ParameterPtr>();
342       auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0);
343       MS_EXCEPTION_IF_NULL(device_address);
344       auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
345       bool need_sync = false;
346       if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
347         if (tensor_address == nullptr || tensor_address != device_address) {
348           need_sync = true;
349         }
350       } else if (tensor->NeedSyncHostToDevice() || tensor_address == nullptr) {
351         need_sync = true;
352       } else if (tensor_address != device_address) {
353         if (tensor_address->DeviceType() == device_address->DeviceType()) {
354           AnfAlgo::SetOutputAddr(tensor_address, 0, pk_node.get());
355         } else {
356           need_sync = true;
357         }
358       }
359       if (need_sync) {
360         if (AnfAlgo::IsParameterWeight(pk_node) || UpdatedByAssign(kernel_graph, input_node) ||
361             ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode) {
362           tensor->set_device_address(device_address);
363         }
364         auto size = UpdateGraphInputAbstract(input_node, tensor);
365         if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0), size, tensor->data_type(),
366                                               tensor->data_c())) {
367           MS_LOG(EXCEPTION) << "SyncHostToDevice failed.";
368         }
369         if (kernel_graph->IsUpdatedParameter(pk_node)) {
370           tensor->SetIsUpdateByDevice();
371         }
372       }
373     }
374     tensor->set_sync_status(kNoNeedSync);
375   }
376 }
377 
CompileGraphImpl(const AnfNodePtrList & lst,const AnfNodePtrList & outputs)378 GraphId GPUSession::CompileGraphImpl(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) {
379   // Construct graph, if successfully, graph_sum_ + 1
380   auto graph = ConstructKernelGraph(lst, outputs);
381   MS_EXCEPTION_IF_NULL(graph);
382   return CompileGraphImpl(graph);
383 }
384 
CompileGraphImpl(NotNull<FuncGraphPtr> func_graph)385 GraphId GPUSession::CompileGraphImpl(NotNull<FuncGraphPtr> func_graph) {
386   std::vector<KernelGraphPtr> all_graphs;
387   auto root_graph = ConstructKernelGraph(func_graph, &all_graphs);
388   MS_EXCEPTION_IF_NULL(root_graph);
389   if (all_graphs.size() != 1) {
390     MS_LOG(EXCEPTION) << "Gpu backend does not support multi-graph schedule, graph num is " << all_graphs.size();
391   }
392   // Insert maketuple graph output in case of multi-outputs.
393   // The ConvertTupleOutputToMaketuple pass will insert TupleGetItem.
394   AnfAlgo::InsertMakeTupleForOutput(NOT_NULL(root_graph));
395   opt::BackendCommonOptimization(root_graph);
396   return CompileGraphImpl(root_graph);
397 }
398 
CompileGraphImpl(const KernelGraphPtr & graph)399 GraphId GPUSession::CompileGraphImpl(const KernelGraphPtr &graph) {
400   MS_EXCEPTION_IF_NULL(graph);
401   // Prepare ms context info for dump .pb graph
402   auto context_ptr = MsContext::GetInstance();
403   MS_EXCEPTION_IF_NULL(context_ptr);
404   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
405   MS_EXCEPTION_IF_NULL(runtime_instance);
406 #ifndef ENABLE_SECURITY
407   auto &json_parser = DumpJsonParser::GetInstance();
408   json_parser.Parse();
409 #endif
410 #ifdef ENABLE_DUMP_IR
411   bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
412   // Dump .pb graph before graph optimization
413   if (save_graphs) {
414     DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id()));
415   }
416 #endif
417   // Graph optimization irrelevant to device data format
418   Optimize(graph);
419   // Select kernel build info
420   SelectKernel(graph);
421   // Graph optimization relevant to device data format
422   HardwareOptimize(graph);
423   // Run final optimization
424   FinalOptimize(graph);
425   // Graph kernel fusion optimization
426   GraphKernelOptimize(graph);
427   // Start gpu kernel runtime
428   StartKernelRT();
429 #if ENABLE_CPU && ENABLE_GPU
430   InitPsWorker(graph);
431 #endif
432   // Assign CUDA streams
433   AssignStream(graph);
434 #ifdef ENABLE_DUMP_IR
435   // Dump .pb graph before remove nop nodes
436   if (save_graphs) {
437     DumpIRProto(graph, "before_removeNop_" + std::to_string(graph->graph_id()));
438   }
439 #endif
440   // Update Graph Dynamic Shape Attr.
441   UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
442   graph->UpdateGraphDynamicAttr();
443   const bool pynative_mode = context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode;
444   // Hide NopOp from execution graph in graph mode
445   if (!pynative_mode) {
446     opt::HideNopNode(graph.get());
447   }
448   // Build kernel if node is cnode
449   BuildKernel(graph);
450 #ifdef ENABLE_DUMP_IR
451   std::string name = "graph_build";
452   DumpGraphParams dump_params = {true, static_cast<int>(kWholeStack)};
453   (void)mindspore::RDR::RecordAnfGraph(SubModuleId::SM_SESSION, name, graph, dump_params, ".ir,.pb");
454   auto &kernels = graph->execution_order();
455   std::string exec_order_name = "graph_exec_order." + std::to_string(graph->graph_id());
456   (void)mindspore::RDR::RecordGraphExecOrder(SubModuleId::SM_SESSION, exec_order_name, kernels);
457 #endif
458 #ifndef ENABLE_SECURITY
459   // Get summary nodes.
460   SetSummaryNodes(graph.get());
461 #endif
462   // Dump .pb graph after graph optimization
463 #ifdef ENABLE_DUMP_IR
464   if (save_graphs) {
465     DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id()));
466   }
467 #endif
468 #ifndef ENABLE_SECURITY
469   if (json_parser.e2e_dump_enabled()) {
470     graph->set_root_graph_id(graph->graph_id());
471     std::string final_graph = "trace_code_graph_" + std::to_string(graph->graph_id());
472     std::string root_dir = json_parser.path() + "/rank_" + std::to_string(rank_id_);
473     std::string target_dir = root_dir + "/graphs";
474     std::string ir_file_path = target_dir + "/" + "ms_output_" + final_graph + ".ir";
475     DumpIRProtoWithSrcInfo(graph, final_graph, target_dir, kDebugWholeStack);
476     DumpIR("trace_code_graph", graph, true, kWholeStack, ir_file_path);
477     DumpGraphExeOrder("ms_execution_order_graph_" + std::to_string(graph->graph_id()) + ".csv", root_dir,
478                       graph->execution_order());
479   }
480 #endif
481   // Set graph manager.
482   MS_EXCEPTION_IF_NULL(context_);
483   FuncGraphManagerPtr manager = MakeManager({graph});
484   context_->AddManager(manager);
485   if (manager) {
486     manager->AddFuncGraph(graph);
487     graph->set_manager(manager);
488   }
489 
490   InitAllBucket(graph);
491   // Alloc memory in graph mode, including static memory and dynamic memory
492   if (!pynative_mode) {
493     AllocateMemory(graph.get());
494   }
495 
496   DumpGraph(graph);
497 
498 #ifdef ENABLE_DEBUGGER
499   if (debugger_ && debugger_->DebuggerBackendEnabled()) {
500     debugger_->LoadGraphs(graph);
501   }
502 #endif
503   MS_LOG(INFO) << "CompileGraph graph_id: " << graph->graph_id();
504   return graph->graph_id();
505 }
506 
PreExecuteGraph(const std::shared_ptr<KernelGraph> & kernel_graph,const std::vector<tensor::TensorPtr> & inputs,VectorRef * outputs)507 void GPUSession::PreExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
508                                  const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
509 #ifdef ENABLE_DEBUGGER
510   if (debugger_) {
511     debugger_->PreExecute(kernel_graph);
512   }
513 
514   DumpSetup(kernel_graph);
515 #endif
516 
517 #if ENABLE_CPU && ENABLE_GPU
518   // Initialize parameter server
519   InitPSParamAndOptim(kernel_graph, inputs);
520 #endif
521 }
522 
PostExecuteGraph(const std::shared_ptr<KernelGraph> & kernel_graph,const std::vector<tensor::TensorPtr> & inputs,VectorRef * outputs)523 void GPUSession::PostExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
524                                   const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
525   // Summary
526   auto context_ptr = MsContext::GetInstance();
527   MS_EXCEPTION_IF_NULL(context_ptr);
528 #ifndef ENABLE_SECURITY
529   if (context_ptr->get_param<bool>(MS_CTX_ENABLE_GPU_SUMMARY)) {
530     Summary(kernel_graph.get());
531   }
532 #endif
533 #ifdef ENABLE_DEBUGGER
534   if (debugger_ && debugger_->DebuggerBackendEnabled()) {
535     debugger_->LoadParametersAndConst(kernel_graph);
536   }
537 
538   // debug used for dump
539   if (debugger_ && debugger_->CheckDebuggerDumpEnabled()) {
540     Dump(kernel_graph);
541   }
542 
543   if (debugger_) {
544     debugger_->PostExecute();
545   }
546 #endif
547 }
548 
ExecuteGraph(const std::shared_ptr<KernelGraph> & kernel_graph)549 void GPUSession::ExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph) {
550   int kernel_num = kernel_graph->execution_order().size();
551   int64_t loopsize = (kernel_num > 1) ? ConfigManager::GetInstance().gpu_loopsink_size() : 1;
552   for (int64_t i = 0; i < loopsize; i++) {
553 #if ENABLE_CPU && ENABLE_GPU
554     std::string channel_name;
555     if (ps::PsDataPrefetch::GetInstance().cache_enable() && IsGetNextGraph(kernel_graph, &channel_name)) {
556       ps::ps_cache_instance.IncreaseGraphStep(channel_name);
557     }
558 #endif
559     Execute(kernel_graph);
560   }
561 }
562 
UpdateOutputTensors(const VectorRef * outputs,const std::map<tensor::TensorPtr,session::KernelWithIndex> & tensor_to_node,std::map<DeviceAddressPtr,DeviceAddressPtr> * new_to_old_device_address)563 void GPUSession::UpdateOutputTensors(const VectorRef *outputs,
564                                      const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node,
565                                      std::map<DeviceAddressPtr, DeviceAddressPtr> *new_to_old_device_address) {
566   MS_EXCEPTION_IF_NULL(outputs);
567   for (const auto &item : *outputs) {
568     if (utils::isa<VectorRefPtr>(item)) {
569       const auto &vector_ref = utils::cast<VectorRef>(item);
570       UpdateOutputTensors(&vector_ref, tensor_to_node, new_to_old_device_address);
571     } else if (utils::isa<tensor::TensorPtr>(item)) {
572       const auto &tensor = utils::cast<tensor::TensorPtr>(item);
573       MS_EXCEPTION_IF_NULL(tensor);
574       const auto &iter = tensor_to_node.find(tensor);
575       if (iter != tensor_to_node.end()) {
576         const auto &node = iter->second.first;
577         const auto &output_index = iter->second.second;
578         MS_EXCEPTION_IF_NULL(node);
579         auto address = AnfAlgo::GetMutableOutputAddr(node, output_index);
580         // The outputs may have the same tensor, so need skip when the tensor has been set to device address.
581         if ((address == nullptr) || (address->GetPtr() == nullptr)) {
582           // If the device address in the node is invalid, you need to find out whether there is a corresponding
583           // device address in the new to old device address map to check whether the device address in the node
584           // has been replaced with a new one.
585           if ((*new_to_old_device_address).find(address) != (*new_to_old_device_address).end()) {
586             address = (*new_to_old_device_address)[address];
587           } else {
588             continue;
589           }
590         }
591         tensor->set_device_address(address);
592 
593         // When the device address of graph cnode output is set in tensor, the graph output need be set new device
594         // address, to avoid that the device address context of tensor be rewritten in the next step or next loop.
595         // But one time memory application scenarios need to be skipped, because the memory is not allocated next step:
596         // 1. Non cnode 2. Communication kernel.
597         bool ps_mode = false;
598 #if ((defined ENABLE_CPU) && (!defined _WIN32))
599         ps_mode = ps::PSContext::instance()->is_ps_mode();
600 #endif
601         if (node->isa<CNode>() && !AnfAlgo::IsCommunicationOp(node) && !ps_mode) {
602           auto new_address = std::make_shared<device::gpu::GPUDeviceAddress>(nullptr, address->GetSize());
603           AnfAlgo::SetOutputAddr(new_address, output_index, node.get());
604           (*new_to_old_device_address)[new_address] = address;
605           if (context::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
606             auto runtime_instance =
607               device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
608             MS_EXCEPTION_IF_NULL(runtime_instance);
609             auto gpu_runtime_instance = dynamic_cast<device::gpu::GPUKernelRuntime *>(runtime_instance);
610             gpu_runtime_instance->SetAddrInvalid(address);
611           }
612         }
613 
614         if (AnfAlgo::IsDynamicShape(node)) {
615           const auto &updated_shape = AnfAlgo::GetOutputInferShape(node, output_index);
616           ShapeVector int_shape;
617           std::transform(updated_shape.begin(), updated_shape.end(), std::back_inserter(int_shape), SizeToInt);
618           tensor->set_shape(int_shape);
619         }
620       }
621       if (tensor->NeedSyncDeviceToHostImmediately()) {
622         tensor->data_sync(false);
623         tensor->set_device_address(nullptr);
624         tensor->set_sync_status(kNeedSyncHostToDevice);
625       }
626     }
627   }
628 }
629 
Execute(const std::shared_ptr<KernelGraph> & kernel_graph) const630 void GPUSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph) const {
631   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
632   MS_EXCEPTION_IF_NULL(runtime_instance);
633   if (!runtime_instance->Run(*kernel_graph, false)) {
634     MS_LOG(EXCEPTION) << "GPU execute graph failed!";
635   }
636 }
637 
BuildOpImpl(const OpRunInfo & op_run_info,const GraphInfo & graph_info,const std::vector<tensor::TensorPtr> & input_tensors,const std::vector<int64_t> & tensors_mask)638 KernelGraphPtr GPUSession::BuildOpImpl(const OpRunInfo &op_run_info, const GraphInfo &graph_info,
639                                        const std::vector<tensor::TensorPtr> &input_tensors,
640                                        const std::vector<int64_t> &tensors_mask) {
641   // Check if the graph cache exists.
642   auto it = run_op_graphs_.find(graph_info);
643   if (it != run_op_graphs_.end() && kOpCacheBlackList.find(op_run_info.op_name) == kOpCacheBlackList.end()) {
644     return it->second;
645   }
646 
647   // Prepare the graph
648   const auto &kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors, tensors_mask);
649   MS_EXCEPTION_IF_NULL(kernel_graph);
650   RunOpOptimize(kernel_graph);
651   SelectKernel(kernel_graph);
652   RunOpHardwareOptimize(kernel_graph);
653   StartKernelRT();
654   RunOpHideNopNode(kernel_graph);
655   BuildKernel(kernel_graph);
656   auto enable_op_graph_cache = MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_OP_GRAPH_CACHE);
657   if (enable_op_graph_cache) {
658     run_op_graphs_[graph_info] = kernel_graph;
659   }
660   return kernel_graph;
661 }
662 
RunOpImplOrigin(const GraphInfo & graph_info,OpRunInfo * op_run_info,std::vector<tensor::TensorPtr> * input_tensors,VectorRef * outputs,const std::vector<int64_t> & tensors_mask)663 void GPUSession::RunOpImplOrigin(const GraphInfo &graph_info, OpRunInfo *op_run_info,
664                                  std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
665                                  const std::vector<int64_t> &tensors_mask) {
666   RunOpImpl(graph_info, op_run_info, input_tensors, outputs, tensors_mask);
667 }
668 
RunOpImpl(const GraphInfo & graph_info,OpRunInfo * op_run_info,std::vector<tensor::TensorPtr> * input_tensors,VectorRef * outputs,const std::vector<int64_t> & tensors_mask)669 void GPUSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info,
670                            std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
671                            const std::vector<int64_t> &tensors_mask) {
672   MS_EXCEPTION_IF_NULL(input_tensors);
673   MS_EXCEPTION_IF_NULL(op_run_info);
674   const auto &kernel_graph = BuildOpImpl(*op_run_info, graph_info, *input_tensors, tensors_mask);
675   EraseValueNodeTensor(tensors_mask, input_tensors);
676   // wait for allreduce
677   for (auto &tensor : *input_tensors) {
678     MS_EXCEPTION_IF_NULL(tensor);
679     if (tensor->NeedWaitDevice()) {
680       tensor->WaitDevice();
681     }
682   }
683   // run op
684   MS_EXCEPTION_IF_NULL(kernel_graph);
685   RunOpRemoveNopNode(kernel_graph);
686   RunOpAllocateMemory(*input_tensors, kernel_graph.get());
687   RunOpGenKernelEvent(kernel_graph.get());
688   // Execute the computation
689   LoadInputData(kernel_graph, *input_tensors);
690   Execute(kernel_graph);
691   // Fetch outputs
692   std::map<tensor::TensorPtr, session::KernelWithIndex> tensor_to_node;
693   UpdateOutputs(kernel_graph, outputs, *input_tensors, &tensor_to_node);
694   // update output abstract of dynamic op to op_run_info
695   if (op_run_info->is_dynamic_shape) {
696     UpdateOutputAbstract(kernel_graph, op_run_info);
697   }
698   RunOpClearMemory(kernel_graph.get());
699   if (kOpCacheBlackList.find(op_run_info->op_name) != kOpCacheBlackList.end()) {
700     run_op_graphs_.erase(graph_info);
701   }
702 }
703 
704 #ifdef ENABLE_DEBUGGER
DumpSetup(const std::shared_ptr<KernelGraph> & kernel_graph) const705 void GPUSession::DumpSetup(const std::shared_ptr<KernelGraph> &kernel_graph) const {
706   MS_LOG(INFO) << "Start!";
707   MS_EXCEPTION_IF_NULL(kernel_graph);
708   E2eDump::DumpSetup(kernel_graph.get());
709   MS_LOG(INFO) << "Finish!";
710 }
711 
Dump(const std::shared_ptr<KernelGraph> & kernel_graph) const712 void GPUSession::Dump(const std::shared_ptr<KernelGraph> &kernel_graph) const {
713   if (debugger_->DebuggerBackendEnabled()) {
714     MS_EXCEPTION_IF_NULL(kernel_graph);
715     E2eDump::DumpData(kernel_graph.get(), rank_id_, debugger_.get());
716   } else {
717     DumpJsonParser::GetInstance().UpdateDumpIter();
718   }
719 }
720 
DumpDataEnabledIteration() const721 bool GPUSession::DumpDataEnabledIteration() const {
722   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
723   MS_EXCEPTION_IF_NULL(runtime_instance);
724   return runtime_instance->DumpDataEnabledIteration();
725 }
726 #endif
727 
SyncStream() const728 void GPUSession::SyncStream() const {
729   auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
730   MS_EXCEPTION_IF_NULL(runtime_instance);
731   auto ret = runtime_instance->SyncStream();
732   if (!ret) {
733     MS_LOG(EXCEPTION) << "Sync stream error!";
734   }
735 }
736 
CreateBucket(uint32_t bucket_id,uint32_t bucket_size)737 std::shared_ptr<device::Bucket> GPUSession::CreateBucket(uint32_t bucket_id, uint32_t bucket_size) {
738   auto bucket = std::make_shared<device::gpu::GPUBucket>(bucket_id, bucket_size);
739 
740   auto kernel_runtime = device::KernelRuntimeManager::Instance().GetCurrentKernelRuntime();
741   MS_EXCEPTION_IF_NULL(kernel_runtime);
742   auto compute_stream = kernel_runtime->compute_stream();
743   auto communication_stream = kernel_runtime->communication_stream();
744   MS_EXCEPTION_IF_NULL(compute_stream);
745   MS_EXCEPTION_IF_NULL(communication_stream);
746 
747   MS_EXCEPTION_IF_NULL(bucket);
748   bucket->Init({compute_stream}, {communication_stream});
749   return bucket;
750 }
751 }  // namespace gpu
752 }  // namespace session
753 }  // namespace mindspore
754