• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "backend/kernel_compiler/tbe/tbe_adapter.h"
18 
19 #include <map>
20 #include <unordered_set>
21 #include <string>
22 #include <memory>
23 #include <vector>
24 #include <set>
25 #include <algorithm>
26 #include <unordered_map>
27 #include "backend/session/anf_runtime_algorithm.h"
28 #include "backend/kernel_compiler/oplib/opinfo.h"
29 #include "frontend/parallel/ops_info/ops_utils.h"
30 #include "backend/kernel_compiler/tbe/tbe_dynaminc_shape_util.h"
31 #include "backend/kernel_compiler/tbe/tbe_json/tbe_json_utils.h"
32 #include "utils/json_operation_utils.h"
33 #include "utils/ms_context.h"
34 
35 namespace mindspore {
36 namespace kernel {
37 namespace tbe {
38 namespace {
39 constexpr int kInvalid = -1;
40 constexpr int kFloat = 0;
41 constexpr int kFloat16 = 1;
42 constexpr int kInt8 = 2;
43 constexpr int kInt32 = 3;
44 constexpr int kUint8 = 4;
45 constexpr int kUint64 = 10;
46 constexpr int kBool = 12;
47 constexpr size_t kC0 = 16;
48 constexpr size_t kShapeIndex0 = 0;
49 constexpr size_t kShapeIndex1 = 1;
50 constexpr size_t kShapeIndex2 = 2;
51 constexpr size_t kShapeIndex3 = 3;
52 constexpr size_t kShapeIndex4 = 4;
TypeStrToDstType(const std::string & type_str)53 int TypeStrToDstType(const std::string &type_str) {
54   std::unordered_map<std::string, int> type_name_type_id_map = {
55     {"Float", kFloat}, {"Float32", kFloat}, {"Float16", kFloat16}, {"Int8", kInt8},
56     {"Int32", kInt32}, {"UInt8", kUint8},   {"UInt64", kUint64},   {"Bool", kBool}};
57   auto iter = type_name_type_id_map.find(type_str);
58   if (iter != type_name_type_id_map.end()) {
59     return iter->second;
60   } else {
61     MS_LOG(INFO) << "Error type str is invailed: " << type_str;
62   }
63   return kInvalid;
64 }
65 }  // namespace
66 std::unordered_set<std::string> TbeAdapter::input_order_adjusted_ops_ = {kConv2DBackpropInputOpName,
67                                                                          kConv2DBackpropFilterOpName,
68                                                                          kLogSoftmaxGradOpName,
69                                                                          kLayerNormGradOpName,
70                                                                          kLayerNormXBackpropOpName,
71                                                                          kLayerNormXBackpropV2OpName,
72                                                                          kLayerNormBetaGammaBackpropOpName,
73                                                                          kMinimumGradOpName,
74                                                                          kMaximumGradOpName,
75                                                                          kApplyCenteredRMSPropOpName};
76 
77 std::map<std::string, FAttrsPass> TbeAdapter::build_json_attr_pass_map_ = {{"Cast", TbeAdapter::CastAttrJsonPass}};
78 
DynamicInputAdjusted(const std::shared_ptr<AnfNode> & anf_node,std::vector<std::vector<nlohmann::json>> const & inputs_list,nlohmann::json * inputs_json)79 bool TbeAdapter::DynamicInputAdjusted(const std::shared_ptr<AnfNode> &anf_node,
80                                       std::vector<std::vector<nlohmann::json>> const &inputs_list,
81                                       nlohmann::json *inputs_json) {
82   if (!AnfAlgo::IsNodeDynamicShape(anf_node) && !AnfAlgo::IsDynamicShape(anf_node)) {
83     return false;
84   }
85   auto op_name = AnfAlgo::GetCNodeName(anf_node);
86   if (op_name == kConv2DBackpropInputOpName) {
87     // process dynamic Conv2DBackpropInput, tbe kernel input is x, input_size and dout
88     inputs_json->push_back(inputs_list[kIndex2]);
89     inputs_json->push_back(inputs_list[kIndex1]);
90     inputs_json->push_back(inputs_list[kIndex0]);
91     return true;
92   }
93   if (op_name == kConv2DBackpropFilterOpName) {
94     // process dynamic Conv2DBackpropFilter, tbe kernel input is filter_size, x and dout
95     inputs_json->push_back(inputs_list[kIndex1]);
96     inputs_json->push_back(inputs_list[kIndex2]);
97     inputs_json->push_back(inputs_list[kIndex0]);
98     return true;
99   }
100   return false;
101 }
102 
InputOrderPass(const std::shared_ptr<AnfNode> & anf_node,std::vector<std::vector<nlohmann::json>> const & inputs_list,nlohmann::json * inputs_json)103 void TbeAdapter::InputOrderPass(const std::shared_ptr<AnfNode> &anf_node,
104                                 std::vector<std::vector<nlohmann::json>> const &inputs_list,
105                                 nlohmann::json *inputs_json) {
106   MS_EXCEPTION_IF_NULL(inputs_json);
107   if (DynamicInputAdjusted(anf_node, inputs_list, inputs_json)) {
108     return;
109   }
110   auto op_name = AnfAlgo::GetCNodeName(anf_node);
111   if (input_order_adjusted_ops_.find(op_name) == input_order_adjusted_ops_.end()) {
112     (void)std::copy(inputs_list.begin(), inputs_list.end(), std::back_inserter((*inputs_json)));
113   } else {
114     if (op_name == "MinimumGrad" || op_name == "MaximumGrad") {
115       inputs_json->push_back(inputs_list[kIndex2]);
116       inputs_json->push_back(inputs_list[kIndex0]);
117       inputs_json->push_back(inputs_list[kIndex1]);
118       for (size_t i = 3; i < inputs_list.size(); ++i) {
119         inputs_json->push_back(inputs_list[i]);
120       }
121     } else if (op_name == "ApplyCenteredRMSProp") {
122       // Parameter order of ApplyCenteredRMSProp's TBE implementation is different from python API, so map
123       // TBE parameter to correspond python API parameter by latter's index using hardcode
124       inputs_json->push_back(inputs_list[kIndex0]);
125       inputs_json->push_back(inputs_list[kIndex1]);
126       inputs_json->push_back(inputs_list[kIndex2]);
127       inputs_json->push_back(inputs_list[kIndex3]);
128       inputs_json->push_back(inputs_list[kIndex5]);
129       inputs_json->push_back(inputs_list[kIndex6]);
130       inputs_json->push_back(inputs_list[kIndex7]);
131       inputs_json->push_back(inputs_list[kIndex8]);
132       inputs_json->push_back(inputs_list[kIndex4]);
133     } else {
134       inputs_json->push_back(inputs_list[kIndex1]);
135       inputs_json->push_back(inputs_list[kIndex0]);
136       for (size_t i = 2; i < inputs_list.size(); ++i) {
137         inputs_json->push_back(inputs_list[i]);
138       }
139     }
140   }
141 }
142 
FusionInputOrderPass(const std::shared_ptr<AnfNode> & anf_node,const std::vector<nlohmann::json> & inputs_list,std::vector<nlohmann::json> * inputs_json)143 void TbeAdapter::FusionInputOrderPass(const std::shared_ptr<AnfNode> &anf_node,
144                                       const std::vector<nlohmann::json> &inputs_list,
145                                       std::vector<nlohmann::json> *inputs_json) {
146   MS_EXCEPTION_IF_NULL(inputs_json);
147   if (DynamicInputAdjusted(anf_node, inputs_list, inputs_json)) {
148     return;
149   }
150   auto op_name = AnfAlgo::GetCNodeName(anf_node);
151   if (input_order_adjusted_ops_.find(op_name) == input_order_adjusted_ops_.end()) {
152     (void)std::copy(inputs_list.begin(), inputs_list.end(), std::back_inserter((*inputs_json)));
153   } else {
154     if (op_name == "MinimumGrad" || op_name == "MaximumGrad") {
155       inputs_json->emplace_back(inputs_list[2]);
156       inputs_json->emplace_back(inputs_list[0]);
157       inputs_json->emplace_back(inputs_list[1]);
158       for (size_t i = 3; i < inputs_list.size(); ++i) {
159         inputs_json->emplace_back(inputs_list[i]);
160       }
161     } else {
162       inputs_json->emplace_back(inputs_list[1]);
163       inputs_json->emplace_back(inputs_list[0]);
164       for (size_t i = 2; i < inputs_list.size(); ++i) {
165         inputs_json->emplace_back(inputs_list[i]);
166       }
167     }
168   }
169 }
170 
FusionDataOrderPass(const std::string & op_name,const std::vector<AnfNodePtr> & data_layer,std::vector<AnfNodePtr> * reorder_data_layer)171 void TbeAdapter::FusionDataOrderPass(const std::string &op_name, const std::vector<AnfNodePtr> &data_layer,
172                                      std::vector<AnfNodePtr> *reorder_data_layer) {
173   MS_EXCEPTION_IF_NULL(reorder_data_layer);
174   if (input_order_adjusted_ops_.find(op_name) == input_order_adjusted_ops_.end()) {
175     (void)std::copy(data_layer.begin(), data_layer.end(), std::back_inserter((*reorder_data_layer)));
176   } else {
177     if (op_name == "MinimumGrad" || op_name == "MaximumGrad") {
178       (void)reorder_data_layer->emplace_back(data_layer[kIndex2]);
179       (void)reorder_data_layer->emplace_back(data_layer[kIndex0]);
180       (void)reorder_data_layer->emplace_back(data_layer[kIndex1]);
181       for (size_t i = 3; i < data_layer.size(); ++i) {
182         (void)reorder_data_layer->emplace_back(data_layer[i]);
183       }
184     } else {
185       (void)reorder_data_layer->emplace_back(data_layer[kIndex1]);
186       (void)reorder_data_layer->emplace_back(data_layer[kIndex0]);
187       for (size_t i = 2; i < data_layer.size(); ++i) {
188         reorder_data_layer->emplace_back(data_layer[i]);
189       }
190     }
191   }
192 }
193 
RunAttrPass(const mindspore::AnfNodePtr & anf_node,const std::vector<OpAttrPtr> & op_info_attrs,nlohmann::json * attrs_json)194 bool TbeAdapter::RunAttrPass(const mindspore::AnfNodePtr &anf_node, const std::vector<OpAttrPtr> &op_info_attrs,
195                              nlohmann::json *attrs_json) {
196   MS_EXCEPTION_IF_NULL(attrs_json);
197   auto cnode_name = AnfAlgo::GetCNodeName(anf_node);
198   auto FPass = build_json_attr_pass_map_.find(cnode_name);
199   if (FPass != build_json_attr_pass_map_.end()) {
200     FPass->second(anf_node, op_info_attrs, attrs_json);
201     return true;
202   }
203   return false;
204 }
205 
MaxiOrMinimumGradAttrJsonPass(const AnfNodePtr & anf_node,const std::vector<std::shared_ptr<OpAttr>> & op_info_attrs,nlohmann::json * attrs_json)206 void TbeAdapter::MaxiOrMinimumGradAttrJsonPass(const AnfNodePtr &anf_node,
207                                                const std::vector<std::shared_ptr<OpAttr>> &op_info_attrs,
208                                                nlohmann::json *attrs_json) {
209   MS_EXCEPTION_IF_NULL(anf_node);
210   MS_EXCEPTION_IF_NULL(attrs_json);
211   auto attr_num = op_info_attrs.size();
212   auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
213   MS_EXCEPTION_IF_NULL(primitive);
214   for (size_t i = 0; i < attr_num; i++) {
215     nlohmann::json attr_obj;
216     MS_EXCEPTION_IF_NULL(op_info_attrs[i]);
217     std::string attr_name = op_info_attrs[i]->name();
218     auto value = primitive->GetAttr(attr_name);
219     if (value != nullptr) {
220       bool attr_value = GetValue<bool>(value);
221       attr_obj["value"] = attr_value;
222       attr_obj["valid"] = true;
223     } else {
224       attr_obj["valid"] = false;
225     }
226     attr_obj["name"] = attr_name;
227     attrs_json->push_back(attr_obj);
228   }
229   MS_LOG(INFO) << "MaxiOrMinimumGradAttrJsonPass done.";
230 }
231 
CastAttrJsonPass(const mindspore::AnfNodePtr & anf_node,const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> & op_info_attrs,nlohmann::json * attrs_json)232 void TbeAdapter::CastAttrJsonPass(const mindspore::AnfNodePtr &anf_node,
233                                   const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
234                                   nlohmann::json *attrs_json) {
235   MS_EXCEPTION_IF_NULL(anf_node);
236   MS_EXCEPTION_IF_NULL(attrs_json);
237   if (op_info_attrs.size() != 1) {
238     MS_LOG(INFO) << "cast node should has dst_type attr";
239     return;
240   }
241   auto attr_name = op_info_attrs[0]->name();
242   auto type_ptr = std::make_shared<TensorType>(TypeIdToType(AnfAlgo::GetOutputDeviceDataType(anf_node, 0)));
243   MS_EXCEPTION_IF_NULL(type_ptr);
244   auto type_element = type_ptr->element();
245   MS_EXCEPTION_IF_NULL(type_element);
246   auto dtype = type_element->ToString();
247   auto dst_type_value = TypeStrToDstType(dtype);
248   nlohmann::json attr_obj;
249   attr_obj["value"] = dst_type_value;
250   attr_obj["valid"] = true;
251   attr_obj["name"] = attr_name;
252   attrs_json->push_back(attr_obj);
253 }
254 
GenTopKV2IndicesTensorInfo(const std::shared_ptr<mindspore::AnfNode> & anf_node,size_t real_input_index,std::vector<nlohmann::json> * input_list,mindspore::kernel::kCreaterType creater_type)255 void TbeAdapter::GenTopKV2IndicesTensorInfo(const std::shared_ptr<mindspore::AnfNode> &anf_node,
256                                             size_t real_input_index, std::vector<nlohmann::json> *input_list,
257                                             mindspore::kernel::kCreaterType creater_type) {
258   MS_EXCEPTION_IF_NULL(anf_node);
259   MS_EXCEPTION_IF_NULL(input_list);
260   auto input_x_shape = AnfAlgo::GetOutputInferShape(anf_node, 0);
261   size_t last_dim = input_x_shape[input_x_shape.size() - 1];
262   std::vector<size_t> tensor_shape = {last_dim};
263   std::vector<size_t> tensor_origin_shape = {last_dim};
264   std::string tensor_format = AnfAlgo::GetInputFormat(anf_node, static_cast<const size_t &>(real_input_index));
265   if (tensor_format == kOpFormat_DEFAULT) {
266     tensor_format = kOpFormat_NCHW;
267   }
268   std::string tensor_origin_format = kOpFormat_NCHW;
269   std::string tensor_dtype = "float16";
270   nlohmann::json input_desc_json;
271   input_desc_json["dtype"] = tensor_dtype;
272   input_desc_json["name"] = AnfAlgo::GetCNodeName(anf_node);
273   input_desc_json["ori_shape"] = tensor_origin_shape;
274   input_desc_json["ori_format"] = tensor_origin_format;
275   input_desc_json["shape"] = tensor_shape;
276   if (creater_type == OP_SELECT_FORMAT) {
277     input_desc_json["format"] = tensor_origin_format;
278   } else {
279     input_desc_json["format"] = tensor_format;
280   }
281   input_desc_json["valid"] = true;
282   input_list->emplace_back(input_desc_json);
283 }
284 
IsSpecialFusionComputeNode(const std::vector<mindspore::AnfNodePtr> & compute_nodes)285 bool TbeAdapter::IsSpecialFusionComputeNode(const std::vector<mindspore::AnfNodePtr> &compute_nodes) {
286   auto result = std::find_if(compute_nodes.begin(), compute_nodes.end(), [](const auto &it) {
287     auto op_name = AnfAlgo::GetCNodeName(it);
288     return (op_name == kConv2DBackpropInputOpName || op_name == kConv2DOpName);
289   });
290   return result != compute_nodes.end();
291 }
292 
GetSpecInputLayers(const std::string & op_name,const std::vector<mindspore::AnfNodePtr> & reorder_layer,std::map<const AnfNodePtr,FusionDataType> * spec_data_input)293 bool TbeAdapter::GetSpecInputLayers(const std::string &op_name, const std::vector<mindspore::AnfNodePtr> &reorder_layer,
294                                     std::map<const AnfNodePtr, FusionDataType> *spec_data_input) {
295   if ((op_name == kReluGradV2OpName || op_name == kAddNOpName || op_name == kTensorAddOpName) &&
296       reorder_layer.empty()) {
297     MS_LOG(WARNING) << "Fusion error: node(" << op_name << " )'s input is null. ";
298     return false;
299   }
300   if (op_name == kReluGradV2OpName) {
301     (*spec_data_input)[reorder_layer[0]] = kFusionReLUGradV2;
302   } else if (op_name == kAddNOpName) {
303     for (const auto &it : reorder_layer) {
304       (*spec_data_input)[it] = kFusionAddN;
305     }
306   } else if (op_name == kTensorAddOpName) {
307     (*spec_data_input)[reorder_layer[0]] = kFusionAdd;
308   }
309   return true;
310 }
311 
FusionDescJsonPass(const AnfNodePtr & node,nlohmann::json * output_desc,const std::map<const AnfNodePtr,tbe::FusionDataType> & spec_data_input)312 void TbeAdapter::FusionDescJsonPass(const AnfNodePtr &node, nlohmann::json *output_desc,
313                                     const std::map<const AnfNodePtr, tbe::FusionDataType> &spec_data_input) {
314   MS_EXCEPTION_IF_NULL(node);
315   MS_EXCEPTION_IF_NULL(output_desc);
316   tbe::FusionDataType fusion_data_type =
317     spec_data_input.find(node) != spec_data_input.end() ? spec_data_input.at(node) : tbe::kFusionNormal;
318   std::vector<size_t> shape = (*output_desc)["shape"];
319   if ((fusion_data_type == kFusionAddN || fusion_data_type == kFusionAdd) && shape.size() == kShape5dDims) {
320     std::vector<size_t> spec_shape = {};
321     spec_shape.emplace_back(shape[kShapeIndex0]);
322     spec_shape.emplace_back(shape[kShapeIndex1]);
323     spec_shape.emplace_back(shape[kShapeIndex2] * shape[kShapeIndex3]);
324     spec_shape.emplace_back(shape[kShapeIndex4]);
325     (*output_desc)["shape"] = spec_shape;
326   } else if (fusion_data_type == kFusionReLUGradV2) {
327     std::vector<size_t> spec_shape = {};
328     spec_shape.emplace_back(shape[kShapeIndex0]);
329     spec_shape.emplace_back(shape[kShapeIndex1]);
330     spec_shape.emplace_back(shape[kShapeIndex2] * shape[kShapeIndex3]);
331     spec_shape.emplace_back(kC0);
332     (*output_desc)["shape"] = spec_shape;
333     (*output_desc)["data_type"] = "bool";
334   }
335 }
336 
GetRealOpType(const std::string & origin_type)337 std::string TbeAdapter::GetRealOpType(const std::string &origin_type) {
338   static std::map<std::string, std::string> buffer_fussion_op_map = {
339     {parallel::DEPTHWISE_CONV2D_NATIVE, parallel::DEPTHWISE_CONV2D}};
340   auto iter = buffer_fussion_op_map.find(origin_type);
341   return (iter != buffer_fussion_op_map.end()) ? iter->second : origin_type;
342 }
343 
GetNodeFusionType(const mindspore::CNodePtr & cnode)344 std::string TbeAdapter::GetNodeFusionType(const mindspore::CNodePtr &cnode) {
345   MS_EXCEPTION_IF_NULL(cnode);
346   auto node_type = AnfAlgo::GetCNodeName(cnode);
347   static std::map<std::string, std::string> fusion_type_map = {{kConv2DOpName, "Convolution"},
348                                                                {kBNTrainingReduceOpName, "bn_reduce"},
349                                                                {kBNTrainingUpdateOpName, "bn_update"},
350                                                                {kReluV2OpName, "ElemWise"},
351                                                                {kTensorAddOpName, "ElemWise"},
352                                                                {kConv2DBackpropInputOpName, "Conv2d_backprop_input"},
353                                                                {kConv2DBackpropFilterOpName, "Conv2d_backprop_filter"},
354                                                                {kDepthwiseConv2dNativeOpName, "DepthwiseConvolution"},
355                                                                {kAddNOpName, "ElemWise"},
356                                                                {kReluGradV2OpName, "ElemWise"},
357                                                                {kRealDivOpName, "ElemWise"},
358                                                                {kBiasAddOpName, "BiasAdd"}};
359   auto find = fusion_type_map.find(node_type);
360   if (find == fusion_type_map.end()) {
361     MS_LOG(INFO) << "Fusion warning: get node fusion type failed from lists, origin node type: " << node_type;
362     auto op_info = mindspore::kernel::tbe::TbeDynamicShapeUtil::FindOp(node_type, cnode);
363     MS_EXCEPTION_IF_NULL(op_info);
364     return op_info->fusion_type();
365   } else {
366     return find->second;
367   }
368 }
369 
FormatPass(const std::string & format,const size_t & origin_shape_size)370 std::string TbeAdapter::FormatPass(const std::string &format, const size_t &origin_shape_size) {
371   if (format == kOpFormat_DEFAULT) {
372     auto ms_context = MsContext::GetInstance();
373     MS_EXCEPTION_IF_NULL(ms_context);
374     if (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode) {
375       return kOpFormat_NCHW;
376     }
377     return origin_shape_size == kNCHWShapeSize ? kOpFormat_NCHW : kOpFormat_ND;
378   } else if (format == kOpFormat_FRAC_Z) {
379     return kOpFormat_FRACTAL_Z;
380   } else {
381     return format;
382   }
383 }
384 
GetSpecDataInput(const FusionScopeInfo & fusion_scope_info,std::map<const AnfNodePtr,tbe::FusionDataType> * spec_data_input)385 bool TbeAdapter::GetSpecDataInput(const FusionScopeInfo &fusion_scope_info,
386                                   std::map<const AnfNodePtr, tbe::FusionDataType> *spec_data_input) {
387   MS_EXCEPTION_IF_NULL(spec_data_input);
388   auto input_nodes = fusion_scope_info.input_nodes;
389   auto compute_nodes = fusion_scope_info.compute_nodes;
390   for (const auto &compute_node : compute_nodes) {
391     MS_EXCEPTION_IF_NULL(compute_node);
392     std::vector<mindspore::AnfNodePtr> layer = {};
393     std::vector<mindspore::AnfNodePtr> reorder_layer = {};
394     auto op_name = AnfAlgo::GetCNodeName(compute_node);
395     auto ccompute_node = compute_node->cast<CNodePtr>();
396     if (ccompute_node == nullptr) {
397       MS_LOG(WARNING) << "Fusion error: fusion compute node must be cnode, but the node is "
398                       << ccompute_node->DebugString();
399       return false;
400     }
401     for (size_t i = 1; i < ccompute_node->inputs().size(); ++i) {
402       auto input = ccompute_node->input(i);
403       auto find_iter = std::find(input_nodes.begin(), input_nodes.end(), input);
404       if (find_iter != input_nodes.end()) {
405         layer.emplace_back((*find_iter));
406       }
407     }
408     InputOrderPass<AnfNodePtr>(compute_node, layer, &reorder_layer);
409     if (IsSpecialFusionComputeNode(compute_nodes)) {
410       if (!GetSpecInputLayers(op_name, reorder_layer, spec_data_input)) {
411         return false;
412       }
413     }
414   }
415   return true;
416 }
IsPlaceHolderInput(const AnfNodePtr & node,const OpIOInfoPtr & input_ptr)417 bool TbeAdapter::IsPlaceHolderInput(const AnfNodePtr &node, const OpIOInfoPtr &input_ptr) {
418   MS_EXCEPTION_IF_NULL(node);
419   MS_EXCEPTION_IF_NULL(input_ptr);
420   static std::set<std::string> node_set = {kDynamicRNNOpName, kDynamicGRUV2OpName};
421   auto cnode_name = AnfAlgo::GetCNodeName(node);
422   if (node_set.find(cnode_name) == node_set.end()) {
423     return false;
424   }
425   auto cnode = node->cast<CNodePtr>();
426   MS_EXCEPTION_IF_NULL(cnode);
427   if (AnfAlgo::HasNodeAttr("placeholder_index", cnode)) {
428     auto none_index = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "placeholder_index");
429     return find(none_index.begin(), none_index.end(), input_ptr->index()) != none_index.end();
430   } else {
431     MS_LOG(EXCEPTION) << "Cnode: " << cnode_name << "doesn't has attribute placeholder_index.";
432   }
433 }
CastAttrJsonPrePass(const AnfNodePtr & anf_node,std::vector<OpAttrPtr> * op_info_attrs,nlohmann::json * attrs_json)434 void TbeAdapter::CastAttrJsonPrePass(const AnfNodePtr &anf_node, std::vector<OpAttrPtr> *op_info_attrs,
435                                      nlohmann::json *attrs_json) {
436   MS_EXCEPTION_IF_NULL(anf_node);
437   MS_EXCEPTION_IF_NULL(attrs_json);
438   if (AnfAlgo::GetCNodeName(anf_node) != kCastOpName) {
439     return;
440   }
441   if (op_info_attrs->size() != 1) {
442     MS_LOG(INFO) << "cast node should has dst_type attr";
443     return;
444   }
445   auto attr_name = (*op_info_attrs)[0]->name();
446   auto type_ptr = std::make_shared<TensorType>(TypeIdToType(AnfAlgo::GetOutputDeviceDataType(anf_node, 0)));
447   MS_EXCEPTION_IF_NULL(type_ptr);
448   auto type_element = type_ptr->element();
449   MS_EXCEPTION_IF_NULL(type_element);
450   auto dtype = type_element->ToString();
451   auto dst_type_value = TypeStrToDstType(dtype);
452   nlohmann::json attr_obj;
453   attr_obj[kJValue] = dst_type_value;
454   attr_obj[kJValid] = true;
455   attr_obj[kJDtype] = "int32";
456   attr_obj[kJName] = attr_name;
457   attrs_json->push_back(attr_obj);
458   op_info_attrs->clear();
459 }
460 
CastAttrJsonPost(const AnfNodePtr & anf_node,nlohmann::json * attrs_json)461 void TbeAdapter::CastAttrJsonPost(const AnfNodePtr &anf_node, nlohmann::json *attrs_json) {
462   if (AnfAlgo::GetCNodeName(anf_node) != kCastOpName) {
463     return;
464   }
465   std::map<int, std::string> dst_type_map{{0, "float32"}, {1, "float16"}, {2, "int8"}, {3, "int32"},
466                                           {4, "uint8"},   {10, "uint64"}, {12, "bool"}};
467   auto type_id = GetJsonValue<int>(attrs_json->at(0), kJValue);
468   auto iter = dst_type_map.find(type_id);
469   if (iter != dst_type_map.end()) {
470     attrs_json->at(0)[kJValue] = iter->second;
471   } else {
472     MS_LOG(EXCEPTION) << "Invalid type:" << type_id;
473   }
474 }
LayerNormAttrJsonPost(const AnfNodePtr & anf_node,nlohmann::json * attrs_json)475 void TbeAdapter::LayerNormAttrJsonPost(const AnfNodePtr &anf_node, nlohmann::json *attrs_json) {
476   MS_EXCEPTION_IF_NULL(anf_node);
477   MS_EXCEPTION_IF_NULL(attrs_json);
478   if (AnfAlgo::GetCNodeName(anf_node) == parallel::LAYER_NORM) {
479     nlohmann::json new_attrs_json;
480     for (auto &json_item : *attrs_json) {
481       if (GetJsonValue<std::string>(json_item, kJName) == kAttrEpsilon) {
482         continue;
483       }
484       new_attrs_json.push_back(json_item);
485     }
486     *attrs_json = new_attrs_json;
487   }
488 }
489 }  // namespace tbe
490 }  // namespace kernel
491 }  // namespace mindspore
492