Home
last modified time | relevance | path

Searched refs:ParallelContext (Results 1 – 25 of 34) sorted by relevance

12

/third_party/mindspore/mindspore/ccsrc/frontend/parallel/
Dcontext.cc39 std::shared_ptr<ParallelContext> ParallelContext::inst_context_ = nullptr;
41 std::shared_ptr<ParallelContext> ParallelContext::GetInstance() { in GetInstance()
43 inst_context_.reset(new (std::nothrow) ParallelContext()); in GetInstance()
48 ParallelContext::ParallelContext() { Reset(); } in ParallelContext() function in mindspore::parallel::ParallelContext
50 void ParallelContext::Reset() { in Reset()
80 void ParallelContext::set_device_num(int64_t device_num) { in set_device_num()
85 void ParallelContext::set_global_rank(int64_t global_rank) { in set_global_rank()
90 void ParallelContext::set_gradients_mean(bool gradients_mean) { gradients_mean_ = gradients_mean; } in set_gradients_mean()
92 void ParallelContext::set_full_batch(bool full_batch) { full_batch_ = full_batch; } in set_full_batch()
94 void ParallelContext::set_dataset_strategy(const std::vector<std::vector<int64_t>> &dataset_strateg… in set_dataset_strategy()
[all …]
Dcontext.h54 class ParallelContext {
56 ~ParallelContext() = default;
57 ParallelContext(const ParallelContext &) = delete;
58 ParallelContext &operator=(const ParallelContext &) = delete;
60 static std::shared_ptr<ParallelContext> GetInstance();
143 ParallelContext();
144 static std::shared_ptr<ParallelContext> inst_context_;
Dstep_parallel.cc133 int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step(); in CreateMirrorInput()
134 int64_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num(); in CreateMirrorInput()
1011 if (ParallelContext::GetInstance()->enable_parallel_optimizer()) { in FindCNode()
1027 …if (ParallelContext::GetInstance()->enable_parallel_optimizer() && IsInAllGatherNodeList(use_apply… in FindCNode()
1036 if (!ParallelContext::GetInstance()->gradient_fp32_sync()) { in InsertMirrorBeforeCast()
1045 if (ParallelContext::GetInstance()->enable_parallel_optimizer() && IsInAllGatherNodeList(cnode)) { in InsertMirrorBeforeCast()
1107 int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step(); in MirrorOpName()
1108 int64_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num(); in MirrorOpName()
1563 int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step(); in ApplyParallelOptOnParam()
1564 int32_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num(); in ApplyParallelOptOnParam()
[all …]
Dparameter_manager.cc270 …if (ParallelContext::GetInstance()->pipeline_stage_split_num() == 1 && first_user_group_list != us… in CheckParameterSplit()
339 bool full_batch = ParallelContext::GetInstance()->full_batch(); in HandleNoUsedParameter()
346 int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step(); in HandleNoUsedParameter()
419 int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step(); in HandleFullySplitParameters()
519 if (ParallelContext::GetInstance()->enable_parallel_optimizer()) { in SetClonedTensorShapeForOptimizer()
Dstep_auto_parallel.cc64 MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); in StepAutoParallel()
65 std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode(); in StepAutoParallel()
75 std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode(); in StepAutoParallel()
357 if (ParallelContext::GetInstance()->sharding_propagation() && in CreateTheOperatorInfo()
415 if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) { in ConstructCostGraphNodesByUniqueId()
528 if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) { in ConstructCostGraphNodesByUniqueIdTC()
630 if (ParallelContext::GetInstance()->sharding_propagation() && (prev_prim->name() == CAST) && in CreateEdgeBetweenTwoOps()
969 if (ParallelContext::GetInstance()->sharding_propagation()) { in ParallelStrategySearch()
/third_party/mindspore/mindspore/ccsrc/pipeline/jit/
Dinit.cc57 using ParallelContext = mindspore::parallel::ParallelContext; typedef
135 (void)py::class_<ParallelContext, std::shared_ptr<ParallelContext>>(m, "AutoParallelContext") in PYBIND11_MODULE()
136 … .def_static("get_instance", &ParallelContext::GetInstance, "Get auto parallel context instance.") in PYBIND11_MODULE()
137 .def("get_device_num", &ParallelContext::device_num, "Get device num.") in PYBIND11_MODULE()
138 .def("set_device_num", &ParallelContext::set_device_num, "Set device num.") in PYBIND11_MODULE()
139 .def("get_device_num_is_set", &ParallelContext::device_num_is_set, "Get device num is set.") in PYBIND11_MODULE()
140 .def("get_global_rank", &ParallelContext::global_rank, "Get global rank.") in PYBIND11_MODULE()
141 .def("set_global_rank", &ParallelContext::set_global_rank, "Set global rank.") in PYBIND11_MODULE()
142 .def("get_global_rank_is_set", &ParallelContext::global_rank_is_set, "Get global rank is set.") in PYBIND11_MODULE()
143 .def("get_gradients_mean", &ParallelContext::gradients_mean, "Get mirror mean.") in PYBIND11_MODULE()
[all …]
Dpipeline_split.cc47 int64_t global_rank = parallel::ParallelContext::GetInstance()->global_rank(); in GetRank()
49 if (!parallel::ParallelContext::GetInstance()->global_rank_is_set()) { in GetRank()
73 auto parallel_mode = parallel::ParallelContext::GetInstance()->parallel_mode(); in PipelineSplit()
78 auto stage_num = parallel::ParallelContext::GetInstance()->pipeline_stage_split_num(); in PipelineSplit()
89 if (!parallel::ParallelContext::GetInstance()->device_num_is_set()) { in PipelineSplit()
96 device_num = parallel::ParallelContext::GetInstance()->device_num(); in PipelineSplit()
Daction.cc506 auto context = parallel::ParallelContext::GetInstance(); in AbstractSpecializeAction()
507 MS_EXCEPTION_IF_NULL(parallel::ParallelContext::GetInstance()); in AbstractSpecializeAction()
588 if (parallel::ParallelContext::GetInstance()->parallel_mode() == "semi_auto_parallel" || in OptInlineAction()
589 parallel::ParallelContext::GetInstance()->parallel_mode() == "auto_parallel") { in OptInlineAction()
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/strategy_checkpoint/
Dparallel_strategy_checkpoint.cc32 if (ParallelContext::GetInstance() != nullptr) { in GetInstance()
33 instance.load_file_ = ParallelContext::GetInstance()->strategy_ckpt_load_file(); in GetInstance()
34 … instance.load_checkpoint_on_ = !ParallelContext::GetInstance()->strategy_ckpt_load_file().empty(); in GetInstance()
35 instance.save_file_ = ParallelContext::GetInstance()->strategy_ckpt_save_file(); in GetInstance()
36 … instance.save_checkpoint_on_ = !ParallelContext::GetInstance()->strategy_ckpt_save_file().empty(); in GetInstance()
37 instance.group_info_save_file_ = ParallelContext::GetInstance()->group_ckpt_save_file(); in GetInstance()
38 instance.group_info_save_on_ = !ParallelContext::GetInstance()->group_ckpt_save_file().empty(); in GetInstance()
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/allreduce_fusion/
Dstep_allreduce_fusion.cc32 MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); in StepAllreduceFusion()
33 std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode(); in StepAllreduceFusion()
34 bool enable_all_reduce_fusion = ParallelContext::GetInstance()->enable_all_reduce_fusion(); in StepAllreduceFusion()
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ops_info/
Dvirtual_dataset_info.cc154 MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); in GenerateStrategies()
157 if (!ParallelContext::GetInstance()->dataset_strategy().empty()) { in GenerateStrategies()
158 strategy = ParallelContext::GetInstance()->dataset_strategy(); in GenerateStrategies()
160 bool full_batch = ParallelContext::GetInstance()->full_batch(); in GenerateStrategies()
Dget_next_info.cc121 MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); in CheckStrategy()
122 if (!ParallelContext::GetInstance()->dataset_strategy().empty()) { in CheckStrategy()
123 dataset_strategy_ = ParallelContext::GetInstance()->dataset_strategy(); in CheckStrategy()
125 bool full_batch = ParallelContext::GetInstance()->full_batch(); in CheckStrategy()
Doperator_info.cc377 MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); in AddCommOpMeanFlag()
378 bool mean_flag = ParallelContext::GetInstance()->gradients_mean(); in AddCommOpMeanFlag()
415 int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step(); in CreateMiniStepAllGatherOp()
416 bool mean_flag = ParallelContext::GetInstance()->gradients_mean(); in CreateMiniStepAllGatherOp()
439 bool mean_flag = ParallelContext::GetInstance()->gradients_mean(); in CreateMicroStepAllGatherOp()
483 bool mean_flag = ParallelContext::GetInstance()->gradients_mean(); in CreateMirrorOps()
484 int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step(); in CreateMirrorOps()
485 int64_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num(); in CreateMirrorOps()
565 …int64_t optimizer_weight_shard_size = ParallelContext::GetInstance()->optimizer_weight_shard_size(… in CreateGroupForOptShard()
607 auto integrated_save = ParallelContext::GetInstance()->optimizer_weight_shard_aggregated_save(); in CreateGroupForOptShard()
[all …]
Dvirtual_output_info.cc58 bool full_batch = ParallelContext::GetInstance()->full_batch(); in GenerateStrategies()
Dreluv2_info.cc101 if (!ParallelContext::GetInstance()->loss_repeated_mean()) { in InferAsLossDivisor()
Dsplit_info.cc177 if (!ParallelContext::GetInstance()->loss_repeated_mean()) { in InferAsLossDivisor()
Dunique_info.cc41 MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); in InferTensorMap()
/third_party/mindspore/mindspore/ccsrc/utils/
Dcomm_manager.cc226 auto parallel_context = parallel::ParallelContext::GetInstance(); in GetRank()
250 auto parallel_context = parallel::ParallelContext::GetInstance(); in IsStandAlone()
/third_party/mindspore/mindspore/ccsrc/runtime/device/
Dlaunch_mul.cc61 auto parallel_context = parallel::ParallelContext::GetInstance(); in ObtainMulInputsAddr()
Dbucket.cc87 auto parallel_context = parallel::ParallelContext::GetInstance(); in CalculateMean()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/hccl/
Dhccl_kernel_metadata.cc37 auto parallel_context_instance = parallel::ParallelContext::GetInstance(); in GetKernelFormat()
/third_party/mindspore/mindspore/ccsrc/frontend/optimizer/irpass/
Dinline.h52 auto stage_num = parallel::ParallelContext::GetInstance()->pipeline_stage_split_num(); in operator()
119 auto stage_num = parallel::ParallelContext::GetInstance()->pipeline_stage_split_num(); in operator()
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/tensor_layout/
Dredistribution_operator_infer.cc137 if (ParallelContext::GetInstance()->enable_all2all()) { in InferPermuteByAxis()
Dtensor_layout.cc436 …int64_t optimizer_weight_shard_size = ParallelContext::GetInstance()->optimizer_weight_shard_size(… in GenerateOptShardSliceShape()
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/graph_util/
Dpipeline_split_utils.cc117 MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); in InsertVirtualAssignAdd()
118 bool enable_parallel_optimizer = ParallelContext::GetInstance()->enable_parallel_optimizer(); in InsertVirtualAssignAdd()

12