• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "runtime/device/executor/dynamic_kernel.h"
18 #include <vector>
19 #include <algorithm>
20 #include <stack>
21 #include "backend/session/anf_runtime_algorithm.h"
22 #include "backend/optimizer/common/helper.h"
23 #include "common/trans.h"
24 #include "pipeline/jit/static_analysis/static_analysis.h"
25 #include "abstract/dshape.h"
26 #include "utils/utils.h"
27 #include "abstract/param_validator.h"
28 
29 namespace mindspore {
30 namespace device {
Initialize()31 void DynamicKernel::Initialize() {
32   MS_LOG(INFO) << "Init Start";
33   auto cnode = cnode_ptr_.lock();
34   MS_EXCEPTION_IF_NULL(cnode);
35   is_dynamic_shape_ = AnfAlgo::IsDynamicShape(cnode);
36   if (!is_dynamic_shape_) {
37     MS_LOG(DEBUG) << "cnode is not dynamic shape:" << cnode->fullname_with_scope();
38     return;
39   }
40 
41   is_input_dynamic_shape_ = AnfAlgo::GetBooleanAttr(cnode, kAttrInputIsDynamicShape);
42   is_output_dynamic_shape_ = AnfAlgo::GetBooleanAttr(cnode, kAttrOutputIsDynamicShape);
43 
44   auto ret = abstract::GetDependsFormMap(cnode);
45   if (ret.empty()) {
46     MS_LOG(DEBUG) << "No dynamic_shape_depends found";
47     return;
48   }
49   MS_LOG(INFO) << "Have depends";
50   (void)std::transform(ret.begin(), ret.end(), std::back_inserter(depend_list_),
51                        [](const int64_t &value) { return static_cast<int>(value); });
52   MS_LOG(INFO) << "Init End";
53 }
54 
GetKernelType() const55 int DynamicKernel::GetKernelType() const { return AnfAlgo::GetKernelType(cnode_ptr_.lock()); }
56 
RebuildDependTensor()57 void DynamicKernel::RebuildDependTensor() {
58   depend_tensor_map_.clear();
59   auto cnode = cnode_ptr_.lock();
60   MS_EXCEPTION_IF_NULL(cnode);
61   auto context = MsContext::GetInstance();
62   MS_EXCEPTION_IF_NULL(context);
63   for (auto depend : depend_list_) {
64     auto pre_node_with_index = AnfAlgo::GetPrevNodeOutput(cnode, depend);
65     bool visit_nop_node = !context->get_param<bool>(MS_CTX_ENABLE_MINDRT);
66     auto output_addr = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, depend, visit_nop_node);
67     std::vector<int64_t> shapes = trans::GetRuntimePaddingShape(pre_node_with_index.first, pre_node_with_index.second);
68     auto host_type = AnfAlgo::GetOutputInferDataType(pre_node_with_index.first, pre_node_with_index.second);
69     auto out_tensor = std::make_shared<tensor::Tensor>(host_type, shapes);
70     MS_EXCEPTION_IF_NULL(out_tensor);
71     // The second parameter must be false, otherwise the device address cannot be released and allocated, and the
72     // address size will be wrong in the dynamic shape scenario.
73     out_tensor->set_device_address(output_addr, false);
74     auto ret = depend_tensor_map_.try_emplace(depend, out_tensor);
75     if (!ret.second) {
76       MS_LOG(EXCEPTION) << "Insert map failed";
77     }
78   }
79 }
80 
InferShape()81 void DynamicKernel::InferShape() {
82   auto cnode = cnode_ptr_.lock();
83   MS_EXCEPTION_IF_NULL(cnode);
84   MS_LOG(INFO) << "InferShape start, node:" << cnode->fullname_with_scope();
85   InferShapeRecursive();
86   auto inputs = cnode->inputs();
87   if (inputs.empty()) {
88     MS_LOG(EXCEPTION) << "Invalid inputs";
89   }
90   // rebuild depend tensor map for gpu dynamic memory allocation.
91   RebuildDependTensor();
92   AnfAlgo::InferShape(cnode, &depend_tensor_map_);
93 }
94 
InferShapeRecursive()95 void DynamicKernel::InferShapeRecursive() {
96   auto cnode = cnode_ptr_.lock();
97   MS_EXCEPTION_IF_NULL(cnode);
98   auto input_size = AnfAlgo::GetInputTensorNum(cnode);
99   for (size_t i = 0; i < input_size; i++) {
100     auto input_node_with_index = AnfAlgo::GetPrevNodeOutput(cnode, i);
101     auto input_node = input_node_with_index.first;
102     MS_EXCEPTION_IF_NULL(input_node);
103     InferShapeForNopNode(&input_node);
104   }
105 }
106 
InferShapeForNopNode(AnfNodePtr * input_node)107 void DynamicKernel::InferShapeForNopNode(AnfNodePtr *input_node) {
108   MS_EXCEPTION_IF_NULL(*input_node);
109   if (!opt::IsNopNode(*input_node) || !AnfAlgo::IsDynamicShape(*input_node)) {
110     MS_LOG(INFO) << "Input node is not a nop node, no need infer.";
111     return;
112   }
113   MS_LOG(INFO) << "Infer shape for nop node.";
114   std::stack<AnfNodePtr> nop_road;
115   nop_road.push(*input_node);
116 
117   /*lint -e716*/
118   while (true) {
119     auto input_node_with_idx = AnfAlgo::GetPrevNodeOutput(*input_node, 0);
120     auto in_node = input_node_with_idx.first;
121     MS_EXCEPTION_IF_NULL(in_node);
122     if (opt::IsNopNode(in_node)) {
123       nop_road.push(in_node);
124       *input_node = in_node;
125     } else {
126       break;
127     }
128   }
129   /*lint +e716*/
130 
131   while (!nop_road.empty()) {
132     auto nop_node = nop_road.top();
133     MS_EXCEPTION_IF_NULL(nop_node);
134     AnfAlgo::InferShape(nop_node->cast<CNodePtr>());
135     nop_road.pop();
136   }
137 }
138 }  // namespace device
139 }  // namespace mindspore
140