1 /**
2 * Copyright 2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "tools/optimizer/parallel/parallel_pass.h"
18 #include "include/errorcode.h"
19 #include "ir/tensor.h"
20 #include "tools/optimizer/parallel/operator_info_register.h"
21 #include "ops/fusion/conv2d_fusion.h"
22 #include "nnacl/op_base.h"
23
24 namespace mindspore {
25 namespace opt {
26
27 namespace {
28 constexpr auto kAnfPrimitiveIndex = 0;
29 }
30
IsParallelCareNode(const AnfNodePtr & node)31 bool ParallelPass::IsParallelCareNode(const AnfNodePtr &node) {
32 MS_ASSERT(node != nullptr);
33 auto c_node = node->cast<CNodePtr>();
34 auto prim = GetValueNode<PrimitivePtr>(c_node->input(kAnfPrimitiveIndex));
35 MS_CHECK_TRUE_RET(prim != nullptr, false);
36 // depth_wise can not be splited in conv_info, we deal with in depthwise_conv_info
37 is_depth_wise_ = prim->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(prim->GetAttr(ops::kIsDepthWise));
38 type_name_.clear();
39 return std::any_of(kParallelOpNames.begin(), kParallelOpNames.end(), [this, &node](auto &prim_item) {
40 if (CheckPrimitiveType(node, prim_item.first.first) && is_depth_wise_ == prim_item.first.second) {
41 type_name_ = prim_item.second;
42 }
43 return !type_name_.empty();
44 });
45 }
46
SetParallelOpName(const AnfNodePtr & node,std::string * parallel_name)47 bool ParallelPass::SetParallelOpName(const AnfNodePtr &node, std::string *parallel_name) {
48 MS_ASSERT(node != nullptr && parallel_name != nullptr);
49 if (!utils::isa<CNode>(node)) {
50 return false;
51 }
52 auto cnode = node->cast<CNodePtr>();
53 std::string cnode_name = cnode->fullname_with_scope();
54 if (cnode_name.find(PARALLEL_NAME_SUFFIX) != std::string::npos) {
55 MS_LOG(DEBUG) << " : Skip splited cnode " << cnode_name;
56 return false;
57 }
58
59 // find operator name first, then operator type name.
60 if (split_strategys_.find(*parallel_name) == split_strategys_.end()) {
61 *parallel_name = type_name_;
62 }
63
64 MS_LOG(DEBUG) << " : Reached a parallel care node: " << cnode_name;
65 if (split_strategys_.find(*parallel_name) == split_strategys_.end()) {
66 MS_LOG(DEBUG) << *parallel_name << " : No split strategy for the current CNode.";
67 return false;
68 }
69 cnode->set_fullname_with_scope(cnode_name + PARALLEL_NAME_SUFFIX);
70 return true;
71 }
72
CreateParallelOperator(const CNodePtr & cnode,const std::string & scope_name,const std::string & parallel_op_name)73 OperatorInfoPtr ParallelPass::CreateParallelOperator(const CNodePtr &cnode, const std::string &scope_name,
74 const std::string ¶llel_op_name) {
75 MS_ASSERT(cnode != nullptr);
76 // foreach kernel_list && data_type
77 for (const auto &schmea_id : kParallelSchemaId) {
78 if (!CheckPrimitiveType(cnode, schmea_id.first)) {
79 continue;
80 }
81 auto split_key_pair = kParallelSchemaId.find(schmea_id.first);
82 auto split_schema_id = split_key_pair->second.first;
83 auto split_type_id = split_key_pair->second.second;
84 SplitOpKey op_key = SplitOpKey(split_schema_id, split_type_id, is_depth_wise_);
85 auto op_create_func = OperatorInfoFactory::GeInstance()->FindOperatorInfo(op_key);
86 if (op_create_func == nullptr) {
87 return nullptr;
88 }
89 OperatorInfoPtr op = op_create_func(scope_name, split_strategys_[parallel_op_name]);
90 return op;
91 }
92 return nullptr;
93 }
94
Run(const FuncGraphPtr & func_graph,const AnfNodePtr & node)95 AnfNodePtr ParallelPass::Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) {
96 if (func_graph == nullptr || node == nullptr) {
97 return node;
98 }
99 if (!utils::isa<CNode>(node)) {
100 return node;
101 }
102 if (!IsParallelCareNode(node)) {
103 return node;
104 }
105 // if current conv2d node has two output nodes ,we do not split it;
106 auto manager = func_graph->manager();
107 MS_CHECK_TRUE_MSG(manager != nullptr, nullptr, "manager is nullptr.");
108 auto iter = manager->node_users().find(node);
109 if (iter == manager->node_users().end()) {
110 MS_LOG(ERROR) << "node : " << node->fullname_with_scope() << "has no output";
111 return nullptr;
112 }
113 auto output_info_list = iter->second;
114 if (output_info_list.size() > kDefaultBatch) {
115 return node;
116 }
117 auto cnode = node->cast<CNodePtr>();
118 if (cnode == nullptr) {
119 return node;
120 }
121
122 std::string parallel_op_name = cnode->fullname_with_scope();
123 if (!SetParallelOpName(node, ¶llel_op_name)) {
124 return node;
125 }
126
127 std::string cnode_name = cnode->fullname_with_scope();
128 OperatorInfoPtr parallel_operator = CreateParallelOperator(cnode, cnode_name, parallel_op_name);
129 if (parallel_operator == nullptr) {
130 MS_LOG(ERROR) << "Failure: Create " << parallel_op_name << " OperatorInstance failed";
131 return node;
132 }
133 parallel_operator->Init(func_graph, cnode, fmk_type_);
134 if (parallel_operator->DoSplit() == RET_ERROR) {
135 MS_LOG(ERROR) << "Failure: operator " << parallel_op_name << " init failed";
136 return node;
137 }
138 return parallel_operator->replace_op();
139 }
140 } // namespace opt
141 } // namespace mindspore
142