• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_STEP_AUTO_PARALLEL_H_
17 #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_STEP_AUTO_PARALLEL_H_
18 
19 #include <map>
20 #include <memory>
21 #include <string>
22 #include <vector>
23 #include <utility>
24 #include "frontend/optimizer/opt.h"
25 #include "frontend/parallel/status.h"
26 #include "ir/anf.h"
27 #include "pipeline/jit/ps/pipeline.h"
28 #include "frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h"
29 
30 namespace mindspore {
31 namespace parallel {
32 // main step of Auto-parallel
33 bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &);
34 
35 void InitCostGraph();
36 
37 Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &);
38 
39 Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &);
40 
41 void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes);
42 
43 void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes);
44 
45 Status IgnoreOperatorsInCostGraph();
46 
47 Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root);
48 
49 Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root, size_t rank_id = 0,
50                                  const size_t device_num = 0);
51 
52 std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
53                                                           std::vector<std::vector<std::string>> input_tensor_names);
54 
55 CNodePtr GetInternalOperatorInfo(const CNodePtr &cnode, const ValueNodePtr &prim_anf_node);
56 
57 void ModifyInputsTensorNameListIfOperatorInfoCreated(const std::string &name, const std::string &uniqueid);
58 
59 size_t FindOperatorIndexById(const std::string &unique_id,
60                              const std::vector<std::vector<std::string>> &input_tensor_names);
61 
62 void AddUsersUniqueIdWhenSharingParameter(
63   const std::pair<std::string, std::pair<AnfNodePtr, AnfNodeIndexSet>> &parameter_users_info);
64 
65 std::vector<std::vector<size_t>> GetIndexOfOpsSharingInputTensor(
66   const std::vector<std::vector<std::string>> &param_users_uniqueid_list,
67   const std::vector<std::vector<std::string>> &input_tensor_names);
68 
69 Status LoadStrategyFromFile(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes);
70 
71 void SaveStrategyToFile();
72 }  // namespace parallel
73 }  // namespace mindspore
74 #endif  // MINDSPORE_CCSRC_FRONTEND_PARALLEL_STEP_AUTO_PARALLEL_H_
75