• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019-2024 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_STEP_PARALLEL_H_
18 #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_STEP_PARALLEL_H_
19 
20 #include <vector>
21 
22 #include <map>
23 #include <memory>
24 #include <set>
25 #include <string>
26 #include <utility>
27 
28 #include "utils/hash_map.h"
29 #include "frontend/optimizer/opt.h"
30 #include "frontend/parallel/strategy.h"
31 #include "frontend/parallel/ops_info/operator_info.h"
32 #include "pipeline/jit/ps/pipeline.h"
33 #include "frontend/parallel/ops_info/ops_utils.h"
34 #include "frontend/parallel/auto_parallel/operator_costmodel.h"
35 #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
36 
37 using OperatorInfoPtr = std::shared_ptr<mindspore::parallel::OperatorInfo>;
38 
39 namespace mindspore {
40 namespace parallel {
41 const uint64_t kUSecondInSecond = 1000000;
42 const int32_t RECURSION_LIMIT = 1000;
43 
44 struct LossNodeInfo {
45   bool has_tuple_getitem = false;
46   int64_t dout_index = 0;  // now don't support the sens is a tuple
47   CNodePtr loss_node = nullptr;
48 };
49 
50 void ForwardCommunication(OperatorVector forward_op, const CNodePtr &node);
51 void ForwardCommunicationForMultiOut(OperatorVector forward_op, const CNodePtr &node);
52 
53 TensorLayout GetTensorInLayout(const AnfNodePtr &pre_node, std::vector<int> get_item_index);
54 TensorLayout GetTensorInLayoutForNewShape(const AnfNodePtr &pre_node, std::vector<int> get_item_index);
55 
56 void MarkForwardCNode(const FuncGraphPtr &root);
57 
58 void SetVirtualDatasetStrategy(const CNodePtr &node);
59 
60 // Create parallel operator for primitive node(has strategy)
61 void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes);
62 
63 // main step of Parallel
64 bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer);
65 
66 std::set<FuncGraphPtr> ForwardGraph(const FuncGraphPtr &root);
67 
68 bool CreateGroupsByCkptFile(const std::string &file);
69 
70 void InsertVirtualOutput(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes);
71 }  // namespace parallel
72 }  // namespace mindspore
73 
74 #endif  // MINDSPORE_CCSRC_FRONTEND_PARALLEL_STEP_PARALLEL_H_
75