• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PARAMETER_MANAGER_H_
18 #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PARAMETER_MANAGER_H_
19 
20 #include <set>
21 #include <vector>
22 #include <string>
23 #include <utility>
24 #include <memory>
25 #include <unordered_map>
26 #include "base/base.h"
27 #include "frontend/parallel/device_manager.h"
28 #include "frontend/parallel/step_parallel_utils.h"
29 #include "frontend/parallel/came_parallel_handler.h"
30 #include "pipeline/jit/ps/resource.h"
31 #include "pybind11/pybind11.h"
32 
33 namespace py = pybind11;
34 
35 namespace mindspore {
36 namespace parallel {
37 constexpr char OBJ[] = "obj";
38 constexpr char CLONED_OBJ[] = "cloned_obj";
39 constexpr char SLICE_PARAMETER_FN_PATH[] = "mindspore.parallel._utils";
40 constexpr char SLICE_PARAMETER_FN_NAME[] = "_slice_parameter";
41 constexpr char INIT_OPTIMIZER_STATE_FN[] = "_init_optimizer_state";
42 constexpr char SLICE_TENSOR_FN_NAME[] = "_slice_tensor";
43 
44 using RefKeyPair = std::pair<AnfNodePtr, std::vector<AnfNodePtr>>;
45 using ParameterUsersInfo = std::pair<std::string, std::pair<AnfNodePtr, AnfNodeIndexSet>>;
46 
47 ParameterUsersInfo FindParameterUsers(const AnfNodePtr &node, bool (*IsCareNode)(const CNodePtr &),
48                                       const std::vector<AnfNodePtr> &all_nodes);
49 AnfNodePtr RefParameterToActualParameter(const AnfNodePtr &node);
50 void CheckParameterSplit(const std::vector<AnfNodePtr> &all_nodes);
51 void HandleSymbolicKeyInstance(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes);
52 void HandleNoUsedParameter(const FuncGraphPtr &root);
53 void HandleFullySplitParameters(const FuncGraphPtr &root);
54 void SetClonedTensorShapeForOptimizer(const FuncGraphPtr &root);
55 void HandleCameAndAdaFactorOpt(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes,
56                                const FuncGraphManagerPtr &manager);
57 void AutoParallelPostProcess(const FuncGraphPtr &root);
58 void SliceTensorObj(const ParameterPtr &parameter, const TensorLayoutPtr &tensor_layout, size_t rank_id = 0);
59 // Init the parameters for graph which not specified by shard under PyNative mode.
60 void InitPynativeNoShardParams(const FuncGraphPtr &root);
61 void InitCompileCacheParams(const pipeline::ResourcePtr &resource);
62 std::pair<AnfNodePtr, bool> FindParameter(const AnfNodePtr &node, const FuncGraphPtr &func_graph);
63 std::pair<AnfNodePtr, bool> FindParameterWithAllgather(const AnfNodePtr &node, const FuncGraphPtr &func_graph,
64                                                        const std::string &name);
65 std::unordered_map<std::string, std::shared_ptr<TensorLayout>> AdaSumParamTensorLayout(const FuncGraphPtr &root);
66 bool HandleAdaSum(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes,
67                   std::unordered_map<std::string, std::shared_ptr<TensorLayout>> *adasum_param_tensor_layout_map);
68 void HandleMirrorInAdaSum(
69   const FuncGraphPtr &root,
70   std::unordered_map<std::string, std::shared_ptr<TensorLayout>> *adasum_param_tensor_layout_map);
71 bool ParameterIsCloned(const AnfNodePtr &parameter_node);
72 bool IsStrategySaved(const AnfNodePtr &parameter_node);
73 py::object GetPyParameterObj(const ParamInfoPtr &param_info, const std::string &obj);
74 bool IsFullySplitParameter(const ParameterPtr &param_ptr, size_t allow_repeat_num = 1);
75 std::shared_ptr<TensorLayout> CreateParameterLayout(const AnfNodePtr &node);
76 void InsertUniformRealForTaggedNodes(const FuncGraphManagerPtr &manager, const std::vector<AnfNodePtr> &all_nodes);
77 }  // namespace parallel
78 }  // namespace mindspore
79 
80 #endif  // MINDSPORE_CCSRC_FRONTEND_PARALLEL_PARAMETER_MANAGER_H_
81