• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019-2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_
17 #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_
18 
19 #include <vector>
20 #include <memory>
21 #include <utility>
22 #include <string>
23 #include <set>
24 #include <unordered_set>
25 #include "ir/func_graph.h"
26 #include "backend/session/kernel_graph.h"
27 #include "utils/ms_utils.h"
28 #include "backend/optimizer/common/pattern_engine.h"
29 
30 namespace mindspore {
31 namespace opt {
32 constexpr size_t kTransOpInputTensorNum = 1;
33 constexpr size_t kCastInputTensorNum = 1;
34 constexpr size_t kDependInputTensorNum = 2;
35 constexpr size_t kReluInputTensorNum = 1;
36 constexpr size_t kReluGradInputTensorNum = 2;
37 constexpr size_t kAddInputTensorNum = 2;
38 constexpr size_t kTupleGetItemInputTensorNum = 2;
39 constexpr size_t kConvInputTensorNum = 2;
40 constexpr size_t kRealDivInputTensorNum = 2;
41 constexpr size_t kSqrtInputTensorNum = 1;
42 constexpr size_t kMatMulInputTensorNum = 2;
43 constexpr size_t kMulInputTensorNum = 2;
44 constexpr size_t kSubInputTensorNum = 2;
45 constexpr size_t kAssignSubInputTensorNum = 2;
46 constexpr size_t kDropoutInputTensorNum = 1;
47 constexpr size_t kAssignInputTensorNum = 2;
48 
49 constexpr size_t kGradIndex = 3;
50 constexpr size_t kAddNInputNum = 2;
51 
52 constexpr size_t kConvBn1OutputNum = 3;
53 constexpr size_t kBn2ReluOutputNum = 4;
54 
55 constexpr size_t kBnInputTensorNum = 5;
56 constexpr size_t kBnOutputNum = 5;
57 
58 constexpr size_t kBN1OutputNum = 2;
59 constexpr size_t kBN2OutputNum = 3;
60 constexpr size_t kBN3OutputNum = 1;
61 
62 constexpr size_t kBNGradInputTensorNum = 5;
63 constexpr size_t kBNGradOutputNum = 3;
64 
65 constexpr size_t kBNGrad1OutputNum = 3;
66 constexpr size_t kBNGrad2OutputNum = 5;
67 constexpr size_t kBNGrad3OutputNum = 1;
68 
69 constexpr size_t kBNTrainingReduceOutputNum = 2;
70 constexpr size_t kBNTrainingUpdateOutputNum = 5;
71 constexpr size_t kBNTrainingUpdateV2OutputNum = 3;
72 constexpr size_t kBNTrainingUpdateV3OutputNum = 5;
73 constexpr size_t kBNTrainingUpdateGradOutputNum = 2;
74 
75 constexpr size_t kSingleOutputNum = 1;
76 constexpr size_t kSumNodeInputTensorNum = 1;
77 constexpr size_t kSquareNodeInputTensorNum = 1;
78 constexpr size_t kSquareSumv2OutputNum = 2;
79 constexpr size_t kMinimumInputTensorNum = 2;
80 
81 constexpr size_t kLambNextMVWithDecayInputNum = 7;
82 constexpr size_t kLambNextMVWithDecayConstantMulInputNum = 5;
83 constexpr size_t kLambNextMVWithDecayOutputNum = 4;
84 constexpr size_t kLambNextMVWithDecayV1OutputNum = 4;
85 constexpr size_t kLambNextRightOutputNum = 2;
86 constexpr size_t kLambUpdateWithLrV2InputNum = 8;
87 constexpr size_t kLambNextMVRuleInputNum = 14;
88 constexpr size_t kLambNextMVRuleOutputNum = 4;
89 constexpr size_t kBackendReshapeInputTensorNum = 1;
90 constexpr size_t kBackendTransposeInputTensorNum = 1;
91 constexpr size_t kAdamApplyOneWithDecayOutputNum = 3;
92 constexpr size_t kLayerNormBetaGammaBackpropInputTensorNum = 4;
93 constexpr size_t kLayerNormBetaGammaBackpropOutputNum = 2;
94 constexpr size_t kLayerNormBetaGammaBackpropV2InputTensorNum = 2;
95 constexpr size_t kLayerNormXBackpropOutputNum = 4;
96 constexpr size_t kLayerNormXBackpropV2OutputNum = 2;
97 constexpr size_t kLayerNormGradInputTensorNum = 5;
98 constexpr size_t kAdamApplyOneOutputNum = 3;
99 constexpr size_t kApplyMomentumInputTensorNum = 5;
100 constexpr size_t kBiasAddInputTensorNum = 2;
101 constexpr size_t kTopkInputTensorNum = 2;
102 constexpr size_t kLarsV2InputTensorNum = 4;
103 constexpr size_t kFusedMulApplyMomentumOutputNum = 2;
104 constexpr size_t kSplitInputTensorNum = 1;
105 constexpr size_t kGatherV2DynInputTensorNum = 3;
106 constexpr size_t kUnsortedSegmentSumInputTensorNum = 2;
107 constexpr size_t kSoftmaxCrossEntropyWithLogitsOutputNum = 2;
108 constexpr size_t kSparseSoftmaxCrossEntropyWithLogitsInputTensorNum = 2;
109 constexpr size_t kOneHotOutputNum = 1;
110 constexpr size_t kOneHotInputTensorNum = 4;
111 
112 enum FusedBatchNormInput {
113   kX = 1,
114   kVariance = 5,
115 };
116 enum FusedBatchNormOutput {
117   kY = 0,
118   kRunningMean,
119   kRunningVariance,
120   kSaveMean,
121   kSaveInvVariance,
122 };
123 enum ConvBn1Output {
124   kData = 0,
125   kVarPart,
126   kMean,
127 };
128 
129 std::vector<int64_t> Convert2Int(const std::vector<size_t> &v);
130 
131 std::vector<int64_t> Convert2Long(const std::vector<size_t> &v);
132 
133 // check whether node depends on either of nodes or not
134 bool IsDepend(const FuncGraph &graph, const AnfNodePtr &node, const std::vector<AnfNodePtr> &nodes);
135 
136 bool UnVisited(const BaseRef &n);
137 
138 bool Visited(const BaseRef &n);
139 
140 // check if the input node is CNode, then check it's input_size, return CNodePtr if check success.
141 CNodePtr CheckAnfNodeIfCNodeAndInputSize(const AnfNodePtr &node, size_t input_size);
142 
143 void CheckCNodeInputSize(const CNodePtr &cnode, size_t input_tensor_num);
144 
145 bool HasSymmetricalKernelInfo(const AnfNodePtr &node_x, const AnfNodePtr &node_y);
146 
147 const AnfNodePtr EliminateDependTransop(const FuncGraphPtr &func_graph, const AnfNodePtr &node);
148 
149 void CreateOutputsOfConvBn1(const FuncGraphPtr &func_graph, const CNodePtr &conv_cnode, const CNodePtr &bn_cnode,
150                             std::vector<AnfNodePtr> *conv_bn1_outputs);
151 
152 void CreateOutputsOfFusedBn2(const FuncGraphPtr &graph, const std::vector<AnfNodePtr> &fused_bn1_outputs,
153                              const CNodePtr &bn_node, std::vector<AnfNodePtr> *fused_bn2_outputs);
154 void CreateOutputsOfFusedBn3(const FuncGraphPtr &graph, const AnfNodePtr &data_input,
155                              const std::vector<AnfNodePtr> &fused_bn1_outputs,
156                              const std::vector<AnfNodePtr> &fused_bn2_outputs, const CNodePtr &bn_node,
157                              std::vector<AnfNodePtr> *fused_bn3_outputs);
158 
159 void CreateMultipleOutputsOfAnfNode(const FuncGraphPtr &kernel_graph, const AnfNodePtr &anf_node_ptr, size_t output_num,
160                                     std::vector<AnfNodePtr> *outputs);
161 
162 tensor::TensorPtr CreateTensorWithValueTuple(const ValueTuplePtr &value_tuple_ptr, const TypePtr &type_ptr,
163                                              size_t data_length);
164 
165 tensor::TensorPtr CreateTupleTensor(const ValueTuplePtr &value_tuple);
166 
167 bool IsAllNopNode(const session::KernelGraph *const graph);
168 
169 bool IsNopNode(const AnfNodePtr &node);
170 
171 void HideNopNode(session::KernelGraph *const graph);
172 
173 void RemoveNopNode(session::KernelGraph *const graph);
174 
175 CNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx);
176 
177 ValueNodePtr CreateShapeValueNode(const FuncGraphPtr &func_graph, const std::vector<int64_t> &shape,
178                                   bool to_tensor = false);
179 
180 bool IsUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node);
181 
182 std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedList(const FuncGraphPtr &graph,
183                                                                              const AnfNodePtr &node);
184 
185 size_t GetRealNodeNum(const FuncGraphPtr &graph, const AnfNodePtr &node);
186 
187 std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedListByOutputIdx(const FuncGraphPtr &graph,
188                                                                                         const AnfNodePtr &node,
189                                                                                         size_t output_index);
190 bool IsNotRealUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node);
191 
192 void ConstInputToAttr(const CNodePtr &cnode, const std::unordered_set<size_t> &input_attrs);
193 
194 bool AnfEqual(const BaseRef &a, const BaseRef &b);
195 
196 bool CNodeTypeEqual(const BaseRef &a, const BaseRef &b);
197 
198 AnfNodePtr SexpToNode(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap *primitive_vars,
199                       bool multigraph = false);
200 
201 // Check var_node in two equivs is the same node
202 bool IsSameNode(const EquivPtr &equiv1, const EquivPtr &equiv2, const VarPtr &var_node);
203 
204 // Get anf_node from equiv by var_node
205 AnfNodePtr GetAnfNodeByVar(const EquivPtr &equiv, const VarPtr &var_node);
206 
207 // Compare tuple getitem's index, return bool[n1's index < n2's index]
208 bool CompareTupleGetitem(const AnfNodePtr &n1, const AnfNodePtr &n2);
209 
210 // Get attr which is bool from cnode
211 bool GetBoolAttr(const AnfNodePtr &node, const std::string &attr_name);
212 
213 // Check node's data type is in supported data type set
214 bool CheckSupportDataType(const AnfNodePtr &node, const std::set<TypeId> &supported_data_type_set);
215 
216 // Create a new value node of func graph,not kernel graph
217 ValueNodePtr MakeValueNode(const ValueNodePtr &value_node);
218 
219 // Transfer depend or updatestate to the new node
220 void TransferDependOrUpdateState(const CNodePtr &old_node, const FuncGraphPtr &graph, const CNodePtr &new_node);
221 
222 AbstractBasePtr CppInferShape(const PrimitivePtr &prim, const AbstractBasePtrList &args_spec_list);
223 
224 // Generate kernel build info for created kernel
225 kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(const std::vector<AnfNodePtr> &node_list);
226 
227 // Get used number of node's each output
228 std::vector<int64_t> GetNodeOutputUsedNum(const session::KernelGraph &kernel_graph, const AnfNodePtr &node);
229 
230 // Get total used number of node's output
231 int64_t GetNodeOutputTotalUsedNum(const session::KernelGraph &kernel_graph, const AnfNodePtr &node);
232 }  // namespace opt
233 }  // namespace mindspore
234 #endif  // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_
235