1 /**
2 * Copyright 2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define USE_DEPRECATED_API
18 #include "tools/optimizer/parallel/parallel_pass.h"
19 #include "include/errorcode.h"
20 #include "ir/tensor.h"
21 #include "tools/optimizer/parallel/operator_info_register.h"
22 #include "ops/fusion/conv2d_fusion.h"
23 #include "nnacl/op_base.h"
24 #include "ops/op_utils.h"
25
26 namespace mindspore {
27 namespace opt {
28
29 namespace {
30 constexpr auto kAnfPrimitiveIndex = 0;
31 } // namespace
32
IsParallelCareNode(const AnfNodePtr & node)33 bool ParallelPass::IsParallelCareNode(const AnfNodePtr &node) {
34 MS_ASSERT(node != nullptr);
35 auto c_node = node->cast<CNodePtr>();
36 MS_CHECK_TRUE_RET(c_node != nullptr, false);
37 auto prim = GetValueNode<PrimitivePtr>(c_node->input(kAnfPrimitiveIndex));
38 MS_CHECK_TRUE_RET(prim != nullptr, false);
39 // depth_wise can not be splited in conv_info, we deal with in depthwise_conv_info
40 is_depth_wise_ = prim->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(prim->GetAttr(ops::kIsDepthWise));
41 type_name_.clear();
42 return std::any_of(kParallelOpNames.begin(), kParallelOpNames.end(), [this, &node](auto &prim_item) {
43 if (CheckPrimitiveType(node, prim_item.first.first) && is_depth_wise_ == prim_item.first.second) {
44 type_name_ = prim_item.second;
45 }
46 return !type_name_.empty();
47 });
48 }
49
SetParallelOpName(const AnfNodePtr & node,std::string * parallel_name)50 bool ParallelPass::SetParallelOpName(const AnfNodePtr &node, std::string *parallel_name) {
51 MS_ASSERT(node != nullptr && parallel_name != nullptr);
52 if (!utils::isa<CNode>(node)) {
53 return false;
54 }
55 auto cnode = node->cast<CNodePtr>();
56 std::string cnode_name = cnode->fullname_with_scope();
57 if (cnode_name.find(PARALLEL_NAME_SUFFIX) != std::string::npos) {
58 MS_LOG(DEBUG) << " : Skip splited cnode " << cnode_name;
59 return false;
60 }
61
62 // find operator name first, then operator type name.
63 if (split_strategys_.find(*parallel_name) == split_strategys_.end()) {
64 *parallel_name = type_name_;
65 }
66
67 MS_LOG(DEBUG) << " : Reached a parallel care node: " << cnode_name;
68 if (split_strategys_.find(*parallel_name) == split_strategys_.end()) {
69 MS_LOG(DEBUG) << *parallel_name << " : No split strategy for the current CNode.";
70 return false;
71 }
72 cnode->set_fullname_with_scope(cnode_name + PARALLEL_NAME_SUFFIX);
73 return true;
74 }
75
CreateParallelOperator(const CNodePtr & cnode,const std::string & scope_name,const std::string & parallel_op_name)76 OperatorInfoPtr ParallelPass::CreateParallelOperator(const CNodePtr &cnode, const std::string &scope_name,
77 const std::string ¶llel_op_name) {
78 MS_ASSERT(cnode != nullptr);
79 // foreach kernel_list && data_type
80 for (const auto &schmea_id : kParallelSchemaId) {
81 if (!CheckPrimitiveType(cnode, schmea_id.first)) {
82 continue;
83 }
84 auto split_key_pair = kParallelSchemaId.find(schmea_id.first);
85 auto split_schema_id = split_key_pair->second.first;
86 auto split_type_id = split_key_pair->second.second;
87 SplitOpKey op_key = SplitOpKey(split_schema_id, split_type_id, is_depth_wise_);
88 auto op_create_func = OperatorInfoFactory::GeInstance()->FindOperatorInfo(op_key);
89 if (op_create_func == nullptr) {
90 return nullptr;
91 }
92 OperatorInfoPtr op = op_create_func(scope_name, split_strategys_[parallel_op_name]);
93 return op;
94 }
95 return nullptr;
96 }
97
Run(const FuncGraphPtr & func_graph,const AnfNodePtr & node)98 AnfNodePtr ParallelPass::Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) {
99 if (func_graph == nullptr || node == nullptr) {
100 return node;
101 }
102 if (!utils::isa<CNode>(node)) {
103 return node;
104 }
105 if (!IsParallelCareNode(node)) {
106 return node;
107 }
108 // if current conv2d node has two output nodes ,we do not split it;
109 auto manager = func_graph->manager();
110 MS_CHECK_TRUE_MSG(manager != nullptr, nullptr, "manager is nullptr.");
111 auto iter = manager->node_users().find(node);
112 if (iter == manager->node_users().end()) {
113 MS_LOG(ERROR) << "node : " << node->fullname_with_scope() << "has no output";
114 return nullptr;
115 }
116 auto output_info_list = iter->second;
117 if (output_info_list.size() > kDefaultBatch) {
118 return node;
119 }
120 auto cnode = node->cast<CNodePtr>();
121 if (cnode == nullptr) {
122 return node;
123 }
124
125 std::string parallel_op_name = cnode->fullname_with_scope();
126 if (!SetParallelOpName(node, ¶llel_op_name)) {
127 return node;
128 }
129
130 std::string cnode_name = cnode->fullname_with_scope();
131 OperatorInfoPtr parallel_operator = CreateParallelOperator(cnode, cnode_name, parallel_op_name);
132 if (parallel_operator == nullptr) {
133 MS_LOG(ERROR) << "Failure: Create " << parallel_op_name << " OperatorInstance failed";
134 return node;
135 }
136 parallel_operator->Init(func_graph, cnode, fmk_type_);
137 if (parallel_operator->DoSplit() == RET_ERROR) {
138 MS_LOG(ERROR) << "Failure: operator " << parallel_op_name << " init failed";
139 return node;
140 }
141 return parallel_operator->replace_op();
142 }
143 } // namespace opt
144 } // namespace mindspore
145