• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_LITE_SRC_DELEGATE_DELEGATE_UTILS
17 #define MINDSPORE_LITE_SRC_DELEGATE_DELEGATE_UTILS
18 #include <vector>
19 #include "include/ms_tensor.h"
20 #include "include/api/delegate.h"
21 #include "src/common/log_adapter.h"
22 #include "include/errorcode.h"
23 
24 namespace mindspore::lite {
25 bool IsSubGraphInputTensor(const std::vector<mindspore::MSTensor> &inputs, mindspore::MSTensor input);
26 
27 template <typename T>
GetGraphInTensors(std::vector<T * > ops)28 std::vector<mindspore::MSTensor> GetGraphInTensors(std::vector<T *> ops) {
29   std::vector<mindspore::MSTensor> inputs;
30   auto is_op_output = [&](mindspore::MSTensor tensor) -> bool {
31     for (auto op : ops) {
32       auto out_tensors = op->outputs();
33       if (find(out_tensors.begin(), out_tensors.end(), tensor) != out_tensors.end()) {
34         return true;
35       }
36     }
37     return false;
38   };
39 
40   for (auto op : ops) {
41     for (auto in_tensor : op->inputs()) {
42       if (in_tensor.Data() == nullptr && !is_op_output(in_tensor)) {
43         inputs.push_back(in_tensor);
44       }
45     }
46   }
47   return inputs;
48 }
49 
50 template <typename T>
GetGraphOutTensors(const std::vector<T * > & ops)51 std::vector<mindspore::MSTensor> GetGraphOutTensors(const std::vector<T *> &ops) {
52   std::vector<mindspore::MSTensor> outputs;
53   auto is_op_input = [&](const mindspore::MSTensor tensor) -> bool {
54     for (auto op : ops) {
55       auto in_tensors = op->inputs();
56       if (find(in_tensors.begin(), in_tensors.end(), tensor) != in_tensors.end()) {
57         return true;
58       }
59     }
60     return false;
61   };
62 
63   for (auto op : ops) {
64     for (auto out_tensor : op->outputs()) {
65       if (!is_op_input(out_tensor)) {
66         outputs.push_back(out_tensor);
67       }
68     }
69   }
70 
71   for (auto op : ops) {
72     for (auto out_op : op->out_ops()) {
73       if (find(ops.begin(), ops.end(), out_op) == ops.end()) {
74         // visit the out op that is not in the subgraph
75         for (auto tensor : op->outputs()) {
76           if (find(out_op->inputs().begin(), out_op->inputs().end(), tensor) != out_op->inputs().end() &&
77               find(outputs.begin(), outputs.end(), tensor) == outputs.end()) {
78             // find the connected tensor
79             outputs.push_back(tensor);
80             break;
81           }
82         }
83       }
84     }
85   }
86   return outputs;
87 }
88 
89 template <typename T>
GraphInTensors(const std::vector<T * > & ops,DelegateModel<schema::Primitive> * model,KernelIter from,KernelIter end)90 std::vector<mindspore::MSTensor> GraphInTensors(const std::vector<T *> &ops, DelegateModel<schema::Primitive> *model,
91                                                 KernelIter from, KernelIter end) {
92   auto in_tensors = GetGraphInTensors(ops);
93   std::vector<mindspore::MSTensor> all_in_tensors;
94   for (auto op : ops) {
95     for (auto in_tensor : op->inputs()) {
96       if (in_tensor.Data() != nullptr && find(in_tensors.begin(), in_tensors.end(), in_tensor) == in_tensors.end()) {
97         all_in_tensors.push_back(in_tensor);
98       }
99     }
100   }
101 
102   for (auto iter = model->BeginKernelIterator(); iter != model->EndKernelIterator(); iter++) {
103     if (iter >= from && iter <= end) {
104       continue;
105     }
106     // The output of other kernels is the input of the current subgraph kernel.
107     for (auto out_tensor : (*iter)->outputs()) {
108       if (std::find(all_in_tensors.begin(), all_in_tensors.end(), out_tensor) != all_in_tensors.end()) {
109         in_tensors.push_back(out_tensor);
110       }
111     }
112   }
113   return in_tensors;
114 }
115 
116 template <typename T>
GraphOutTensors(const std::vector<T * > & ops,DelegateModel<schema::Primitive> * model,KernelIter from,KernelIter end)117 std::vector<mindspore::MSTensor> GraphOutTensors(const std::vector<T *> &ops, DelegateModel<schema::Primitive> *model,
118                                                  KernelIter from, KernelIter end) {
119   auto out_tensors = GetGraphOutTensors(ops);
120   std::vector<mindspore::MSTensor> all_out_tensors;
121   for (auto op : ops) {
122     for (auto out_tensor : op->outputs()) {
123       if (find(out_tensors.begin(), out_tensors.end(), out_tensor) == out_tensors.end()) {
124         all_out_tensors.push_back(out_tensor);
125       }
126     }
127   }
128 
129   for (auto iter = model->BeginKernelIterator(); iter != model->EndKernelIterator(); iter++) {
130     if (iter >= from && iter <= end) {
131       continue;
132     }
133     // The input of other kernels is the output of the current subgraph kernel.
134     for (auto in_tensor : (*iter)->inputs()) {
135       if (find(all_out_tensors.begin(), all_out_tensors.end(), in_tensor) != all_out_tensors.end()) {
136         out_tensors.push_back(in_tensor);
137       }
138     }
139   }
140   return out_tensors;
141 }
142 
143 template <typename T>
FindPreOps(T * cur_op,std::vector<T * > all_ops)144 std::vector<T *> FindPreOps(T *cur_op, std::vector<T *> all_ops) {
145   std::vector<T *> in_ops;
146   for (auto in_tensor : cur_op->inputs()) {
147     for (auto op : all_ops) {
148       if (find(op->outputs().begin(), op->outputs().end(), in_tensor) != op->outputs().end()) {
149         in_ops.push_back(op);
150       }
151     }
152   }
153   return in_ops;
154 }
155 
156 template <typename T>
FindNextOps(T * cur_op,std::vector<T * > all_ops)157 std::vector<T *> FindNextOps(T *cur_op, std::vector<T *> all_ops) {
158   std::vector<T *> out_ops;
159   for (auto out_tensor : cur_op->outputs()) {
160     for (auto op : all_ops) {
161       if (find(op->inputs().begin(), op->inputs().end(), out_tensor) != op->inputs().end()) {
162         out_ops.push_back(op);
163       }
164     }
165   }
166   return out_ops;
167 }
168 
169 template <typename T>
FindPreNextOps(std::vector<T * > all_ops)170 void FindPreNextOps(std::vector<T *> all_ops) {
171   for (auto op : all_ops) {
172     auto in_ops = FindPreOps(op, all_ops);
173     op->set_in_ops(in_ops);
174     auto out_ops = FindNextOps(op, all_ops);
175     op->set_out_ops(out_ops);
176   }
177 }
178 
179 template <typename T>
GetGraphInOutOps(const std::vector<mindspore::MSTensor> & inputs,const std::vector<mindspore::MSTensor> & outputs,std::vector<T * > * in_ops,std::vector<T * > * out_ops,const std::vector<T * > & all_ops)180 int GetGraphInOutOps(const std::vector<mindspore::MSTensor> &inputs, const std::vector<mindspore::MSTensor> &outputs,
181                      std::vector<T *> *in_ops, std::vector<T *> *out_ops, const std::vector<T *> &all_ops) {
182   for (auto in_tensor : inputs) {
183     for (auto op : all_ops) {
184       if (find(op->inputs().begin(), op->inputs().end(), in_tensor) != op->inputs().end() &&
185           find(in_ops->begin(), in_ops->end(), op) == in_ops->end()) {
186         in_ops->push_back(op);
187       }
188     }
189   }
190   if (in_ops->empty()) {
191     MS_LOG(ERROR) << "Can't find the input ops for npu sub graph.";
192     return RET_ERROR;
193   }
194 
195   for (auto out_tensor : outputs) {
196     for (auto op : all_ops) {
197       if (find(op->outputs().begin(), op->outputs().end(), out_tensor) != op->outputs().end() &&
198           find(out_ops->begin(), out_ops->end(), op) == out_ops->end()) {
199         out_ops->push_back(op);
200       }
201     }
202   }
203   if (out_ops->empty()) {
204     MS_LOG(ERROR) << "Can't find the output ops for npu sub graph.";
205     return RET_ERROR;
206   }
207   return RET_OK;
208 }
209 }  // namespace mindspore::lite
210 
211 #endif  // MINDSPORE_LITE_SRC_DELEGATE_DELEGATE_UTILS
212