1 /**
2 * Copyright 2020-2022 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define USE_DEPRECATED_API
18 #include "tools/optimizer/graph/lite_tensor_extractor.h"
19 #include <memory>
20 #include <vector>
21 #include "mindspore/core/ops/nn_optimizer_ops.h"
22 #include "mindspore/core/ops/nn_ops.h"
23 #include "mindspore/core/ops/lite_ops.h"
24 #include "src/tensorlist.h"
25 #include "tools/optimizer/common/format_utils.h"
26 #include "utils/ms_utils_secure.h"
27 #include "nnacl/op_base.h"
28
29 namespace mindspore {
30 namespace opt {
31 namespace {
32 constexpr int kElementShapeIndex = 1;
33 constexpr int kElementNumOffset = 2;
34 constexpr int kBasicInfoMinSize = 3;
CheckTensorListIsValid(const std::vector<uint8_t> & tensorlist_data)35 bool CheckTensorListIsValid(const std::vector<uint8_t> &tensorlist_data) {
36 if (tensorlist_data.empty()) {
37 return true;
38 }
39 auto basic_data_size = tensorlist_data.size() / sizeof(int);
40 auto *data = reinterpret_cast<const int *>(tensorlist_data.data());
41 if (basic_data_size < static_cast<size_t>(kBasicInfoMinSize)) {
42 MS_LOG(ERROR) << "tensorlist data length illegal, which should be at least 3, now is " << basic_data_size;
43 return false;
44 }
45 if (data[kElementShapeIndex] < 0 || INT_ADD_OVERFLOW(data[kElementShapeIndex], kBasicInfoMinSize)) {
46 MS_LOG(ERROR) << "tensorlist data length is too big, INT add overflow.";
47 return false;
48 }
49 if (static_cast<size_t>((data[kElementShapeIndex] + kBasicInfoMinSize)) > basic_data_size) {
50 MS_LOG(ERROR) << "tensorlist data length illegal. current tensorlist data length should be at least "
51 << (data[kElementShapeIndex] + kBasicInfoMinSize) << ", but now is " << basic_data_size;
52 return false;
53 }
54 auto element_num = data[data[kElementShapeIndex] + kElementNumOffset];
55 if (element_num > 0 && INT_ADD_OVERFLOW(element_num, 1)) {
56 MS_LOG(ERROR) << "tensorlist data length is too big, INT add overflow.";
57 return false;
58 }
59 auto shape_once = data[kElementShapeIndex] + 1;
60 auto shape_group_num = element_num < 0 ? 1 : element_num + 1;
61 if (INT_MUL_OVERFLOW(shape_once, shape_group_num)) {
62 MS_LOG(ERROR) << "tensorlist data length is too big, INT mul overflow.";
63 return false;
64 }
65 auto shape_info_size = shape_once * shape_group_num;
66 if (INT_ADD_OVERFLOW(shape_info_size, kElementNumOffset)) {
67 MS_LOG(ERROR) << "tensorlist data length is too big, INT add overflow.";
68 return false;
69 }
70 size_t real_data_size = static_cast<size_t>(shape_info_size + kElementNumOffset);
71 if (real_data_size != basic_data_size) {
72 MS_LOG(ERROR) << "current tensorlist data length should be " << real_data_size << ", but now is "
73 << basic_data_size;
74 return false;
75 }
76 return true;
77 }
78
ConvertToLiteTensor(const lite::DataInfo & data_info)79 TensorPtr ConvertToLiteTensor(const lite::DataInfo &data_info) {
80 auto tensor_category = lite::TensorCategory(lite::NodeType(data_info.node_type_), data_info.shape_.size(),
81 TypeId(data_info.data_type_), data_info.data_.size());
82 TensorPtr tensor;
83 if (data_info.data_type_ != kObjectTypeTensorType) {
84 tensor = std::make_shared<lite::Tensor>(TypeId(data_info.data_type_), data_info.shape_,
85 (mindspore::Format)data_info.format_, tensor_category);
86 } else {
87 tensor = std::make_shared<lite::TensorList>(data_info.shape_, std::vector<int>(), tensor_category);
88 }
89 if (tensor == nullptr) {
90 MS_LOG(ERROR) << "new a lite tensor failed.";
91 return nullptr;
92 }
93 auto tensor_size = data_info.data_.size();
94 if (tensor_size > 0) {
95 if (data_info.data_type_ == kObjectTypeTensorType) {
96 auto tensor_list = std::static_pointer_cast<lite::TensorList>(tensor);
97 if (!CheckTensorListIsValid(data_info.data_)) {
98 MS_LOG(ERROR) << "tensor list is invalid.";
99 return nullptr;
100 }
101 if (tensor_list->Decode(reinterpret_cast<const int *>(data_info.data_.data()), tensor_size) != RET_OK) {
102 MS_LOG(ERROR) << "Decode tensorlist data failed.";
103 return nullptr;
104 }
105 } else {
106 auto tensor_data = malloc(tensor_size);
107 if (tensor_data == nullptr) {
108 MS_LOG(ERROR) << "tensor_data is nullptr.";
109 return nullptr;
110 }
111 if (memcpy_s(tensor_data, tensor_size, data_info.data_.data(), tensor_size) != EOK) {
112 free(tensor_data);
113 MS_LOG(ERROR) << "memcpy data error.";
114 return nullptr;
115 }
116 tensor->set_data(tensor_data);
117 }
118 }
119
120 if (tensor_size == 0 && data_info.data_ptr_ != nullptr) {
121 tensor->set_data(data_info.data_ptr_);
122 tensor->set_own_data(false);
123 }
124 return tensor;
125 }
126
GetCNodeTensorListVarInput(const lite::DataInfo & data_info)127 TensorPtr GetCNodeTensorListVarInput(const lite::DataInfo &data_info) {
128 auto tensor_list = std::make_shared<lite::TensorList>(data_info.shape_, std::vector<int>{});
129 if (tensor_list == nullptr) {
130 MS_LOG(ERROR) << "new a lite tensor list failed";
131 return nullptr;
132 }
133 if (data_info.data_.empty()) {
134 return tensor_list;
135 }
136 if (!CheckTensorListIsValid(data_info.data_)) {
137 MS_LOG(ERROR) << "tensor list is invalid.";
138 return nullptr;
139 }
140 auto status = tensor_list->Decode(reinterpret_cast<const int *>(data_info.data_.data()), data_info.data_.size());
141 if (status != RET_OK) {
142 MS_LOG(ERROR) << "decode tensor list failed.";
143 return nullptr;
144 }
145 return tensor_list;
146 }
147
CreateTensorFromData(const lite::DataInfo & data_info,const bool & has_inferred,const mindspore::Format & format)148 TensorPtr CreateTensorFromData(const lite::DataInfo &data_info, const bool &has_inferred,
149 const mindspore::Format &format) {
150 if (data_info.data_type_ == static_cast<int>(kObjectTypeTensorType)) {
151 auto tensor = GetCNodeTensorListVarInput(data_info);
152 MS_CHECK_TRUE_MSG(tensor != nullptr, nullptr, "tensor is nullptr.");
153 tensor->set_format((Format)(format));
154 if (!has_inferred) {
155 tensor->set_shape({-1});
156 }
157 return tensor;
158 } else {
159 auto tensor = std::make_shared<lite::Tensor>(TypeId(data_info.data_type_), data_info.shape_);
160 MS_CHECK_TRUE_MSG(tensor != nullptr, nullptr, "tensor is nullptr.");
161 tensor->set_format((Format)(format));
162 if (!has_inferred) {
163 tensor->set_shape({-1});
164 }
165 return tensor;
166 }
167 }
168 } // namespace
169
GetCNodeConstInputToAbstract(const CNodePtr & cnode,const AbstractBasePtrList & abs_list,converter::FmkType fmk_type,bool train_flag)170 int LiteTensorExtractor::GetCNodeConstInputToAbstract(const CNodePtr &cnode, const AbstractBasePtrList &abs_list,
171 converter::FmkType fmk_type, bool train_flag) {
172 MS_ASSERT(cnode != nullptr && const_ms_inputs != nullptr);
173 for (size_t i = 1; i < cnode->size(); ++i) {
174 if (utils::isa<CNodePtr>(cnode->input(i))) {
175 continue;
176 }
177 STATUS status;
178 lite::DataInfo data_info;
179 if (utils::isa<ParameterPtr>(cnode->input(i))) {
180 status = lite::FetchDataFromParameterNode(cnode, i, fmk_type, &data_info, true);
181 } else {
182 status = lite::FetchDataFromValueNode(cnode, i, fmk_type, train_flag, &data_info, true);
183 }
184 if (status == lite::RET_NO_CHANGE) {
185 continue;
186 }
187 if (status != RET_OK) {
188 MS_LOG(ERROR) << "fetch const input data failed.";
189 return status;
190 }
191
192 auto abstract = abs_list[i - 1];
193 if (abstract->isa<abstract::AbstractScalar>()) {
194 continue;
195 }
196
197 if (!utils::isa<abstract::AbstractTensor>(abstract)) {
198 if (utils::isa<abstract::AbstractScalar>(abstract)) {
199 continue;
200 }
201 if (utils::isa<abstract::AbstractSequence>(abstract)) {
202 continue;
203 }
204 MS_LOG(ERROR) << "abstract is not a AbstractTensor";
205 return RET_ERROR;
206 }
207 auto shape_value = abstract->BuildValue();
208 if (!shape_value->isa<tensor::Tensor>()) {
209 if (SetAbstractTensorInfo(abstract) != RET_OK) {
210 MS_LOG(ERROR) << "SetAbstractTensorInfo failed";
211 return RET_ERROR;
212 }
213 shape_value = abstract->BuildValue();
214 }
215 auto input_tensor = shape_value->cast<tensor::TensorPtr>();
216 MS_CHECK_FALSE(input_tensor == nullptr, RET_ERROR);
217 if (input_tensor->data().const_data() != nullptr) {
218 MS_LOG(DEBUG) << "abstract already have const data.";
219 continue;
220 }
221 if (data_info.data_.size() == 0) {
222 continue;
223 }
224
225 if (input_tensor->Size() > 0 && input_tensor->Size() == data_info.data_.size()) {
226 if (EOK != common::huge_memcpy(reinterpret_cast<uint8_t *>(input_tensor->data_c()), input_tensor->Size(),
227 data_info.data_.data(), data_info.data_.size())) {
228 MS_LOG(ERROR) << "memcpy_s failed.";
229 return RET_ERROR;
230 }
231 } else {
232 MS_LOG(ERROR) << "the size of tensor data: {" << input_tensor->Size() << "} is not equal to the size of node: {"
233 << data_info.data_.size() << "}";
234 return RET_ERROR;
235 }
236 }
237 return RET_OK;
238 }
239
GetCNodeConstInputs(const CNodePtr & cnode,const converter::FmkType & fmk_type,const bool & train_flag,const bool & copy_data,std::vector<TensorPtr> * const_ms_inputs)240 int LiteTensorExtractor::GetCNodeConstInputs(const CNodePtr &cnode, const converter::FmkType &fmk_type,
241 const bool &train_flag, const bool ©_data,
242 std::vector<TensorPtr> *const_ms_inputs) {
243 MS_CHECK_TRUE_MSG(cnode != nullptr, RET_ERROR, "cnode is nullptr.");
244 MS_CHECK_TRUE_MSG(const_ms_inputs != nullptr, RET_ERROR, "const_ms_inputs is nullptr.");
245 for (size_t i = 1; i < cnode->size(); ++i) {
246 if (utils::isa<CNodePtr>(cnode->input(i))) {
247 continue;
248 }
249 if (GetCNodeConstInput(cnode, i, fmk_type, train_flag, copy_data, const_ms_inputs) != RET_OK) {
250 MS_LOG(ERROR) << "get const inputs failed.";
251 return RET_ERROR;
252 }
253 }
254 return RET_OK;
255 }
256
GetCNodeConstInput(const CNodePtr & cnode,const size_t & index,const converter::FmkType & fmk_type,const bool & train_flag,const bool & copy_data,std::vector<TensorPtr> * const_ms_inputs)257 int LiteTensorExtractor::GetCNodeConstInput(const CNodePtr &cnode, const size_t &index,
258 const converter::FmkType &fmk_type, const bool &train_flag,
259 const bool ©_data, std::vector<TensorPtr> *const_ms_inputs) {
260 MS_CHECK_TRUE_MSG(cnode != nullptr, RET_ERROR, "cnode is nullptr.");
261 MS_CHECK_TRUE_MSG(const_ms_inputs != nullptr, RET_ERROR, "const_ms_inputs is nullptr.");
262 if (utils::isa<CNodePtr>(cnode->input(index))) {
263 return RET_OK;
264 }
265 STATUS status;
266 lite::DataInfo data_info;
267 if (utils::isa<ParameterPtr>(cnode->input(index))) {
268 status = lite::FetchDataFromParameterNode(cnode, index, fmk_type, &data_info, copy_data);
269 } else {
270 status = lite::FetchDataFromValueNode(cnode, index, fmk_type, train_flag, &data_info, copy_data);
271 }
272 if (status == lite::RET_NO_CHANGE) {
273 return RET_OK;
274 }
275 if (status != RET_OK) {
276 MS_LOG(ERROR) << "fetch const input data failed.";
277 return status;
278 }
279 auto tensor = ConvertToLiteTensor(data_info);
280 if (tensor == nullptr) {
281 MS_LOG(ERROR) << "Create lite tensor from data info failed.";
282 return RET_ERROR;
283 }
284 const_ms_inputs->push_back(tensor);
285 return RET_OK;
286 }
287
GetCNodeVarInput(const CNodePtr & cnode,const size_t & index,std::vector<TensorPtr> * var_ms_inputs)288 int LiteTensorExtractor::GetCNodeVarInput(const CNodePtr &cnode, const size_t &index,
289 std::vector<TensorPtr> *var_ms_inputs) {
290 MS_CHECK_TRUE_MSG(cnode != nullptr, RET_ERROR, "cnode is nullptr.");
291 MS_CHECK_TRUE_MSG(var_ms_inputs != nullptr, RET_ERROR, "var_ms_inputs is nullptr.");
292 if (!utils::isa<CNodePtr>(cnode->input(index))) {
293 MS_LOG(ERROR) << "The " << index << "th input for " << cnode->fullname_with_scope() << "should be cnode.";
294 return RET_ERROR;
295 }
296
297 bool has_inferred{false};
298 auto ret = DetermineCertainVarInputHasInferred(cnode, index, &has_inferred);
299 MS_CHECK_TRUE_MSG(ret == RET_OK, RET_ERROR, "determine infer flag failed.");
300 Format format{mindspore::NHWC};
301 ret = opt::DetermineCertainVarInputFormat(cnode, index, &format);
302 MS_CHECK_TRUE_MSG(ret == RET_OK, RET_ERROR, "determine format failed.");
303
304 auto abstract = opt::GetCNodeInputAbstract(cnode, index);
305 MS_CHECK_TRUE_MSG(abstract != nullptr, RET_ERROR, "abstract is nullptr.");
306 if (utils::isa<abstract::AbstractTensor>(abstract)) {
307 lite::DataInfo data_info;
308 if (lite::FetchDataFromAbstract(abstract, &data_info) != RET_OK) {
309 MS_LOG(ERROR) << "FetchDataFromAbstract failed.";
310 return RET_ERROR;
311 }
312 auto tensor = CreateTensorFromData(data_info, has_inferred, format);
313 MS_CHECK_TRUE_MSG(tensor != nullptr, RET_ERROR, "CreateTensorFromData failed.");
314 var_ms_inputs->emplace_back(tensor);
315 } else if (utils::isa<abstract::AbstractTuple>(abstract)) {
316 auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(abstract);
317 MS_CHECK_TRUE_MSG(tuple != nullptr, RET_ERROR, "tuple is nullptr.");
318 for (const auto &element : tuple->elements()) {
319 lite::DataInfo data_info;
320 if (lite::FetchDataFromAbstract(element, &data_info) != RET_OK) {
321 MS_LOG(ERROR) << "FetchDataFromAbstract failed.";
322 return RET_ERROR;
323 }
324 auto tensor = CreateTensorFromData(data_info, has_inferred, format);
325 MS_CHECK_TRUE_MSG(tensor != nullptr, RET_ERROR, "CreateTensorFromData failed.");
326 var_ms_inputs->emplace_back(tensor);
327 }
328 }
329 return RET_OK;
330 }
331
ModifyLiteDynamicShapeToOps(const AbstractBasePtr & abstract)332 int ModifyLiteDynamicShapeToOps(const AbstractBasePtr &abstract) {
333 // change Lite dynamic shape {-1} to core/ops dynamic rank {-2}, will be removed after calling core/infer
334 ShapeVector shape;
335 if (opt::FetchShapeFromAbstract(abstract, &shape) != RET_OK) {
336 MS_LOG(ERROR) << "FetchShapeFromAbstract failed.";
337 return RET_ERROR;
338 }
339 if (shape.size() == 1 && shape[0] == -1) {
340 auto dynamic_shape = std::make_shared<abstract::Shape>(std::vector<int64_t>{abstract::Shape::kShapeRankAny});
341 abstract->set_shape(dynamic_shape);
342 }
343 return RET_OK;
344 }
345
GetCNodeInputAbstractLists(const CNodePtr & cnode,AbstractBasePtrList * abs_list)346 int LiteTensorExtractor::GetCNodeInputAbstractLists(const CNodePtr &cnode, AbstractBasePtrList *abs_list) {
347 MS_ASSERT(cnode != nullptr);
348 MS_ASSERT(abs_list != nullptr);
349 auto origin_inputs = cnode->inputs();
350 if (lite::RemoveIfDepend(cnode) != RET_OK) {
351 MS_LOG(ERROR) << "remove depend failed.";
352 cnode->set_inputs(origin_inputs);
353 return RET_ERROR;
354 }
355 if (lite::RemoveIfMakeTuple(cnode)) {
356 MS_LOG(ERROR) << "remove makeTuple failed.";
357 cnode->set_inputs(origin_inputs);
358 return RET_ERROR;
359 }
360 RemoveIfMonad(cnode);
361 abs_list->clear();
362 abs_list->reserve(cnode->size());
363 for (size_t index = 1; index < cnode->size(); index++) {
364 auto node = cnode->input(index);
365 auto abs = node->abstract();
366 if (abs == nullptr) {
367 if (utils::isa<ValueNodePtr>(node)) {
368 abs = node->cast<ValueNodePtr>()->value()->ToAbstract();
369 } else {
370 MS_LOG(ERROR) << "abstract is nullptr.";
371 cnode->set_inputs(origin_inputs);
372 return RET_ERROR;
373 }
374 }
375 auto abstract = abs->Clone();
376 if (abstract == nullptr) {
377 MS_LOG(ERROR) << "CNode " << cnode->fullname_with_scope() << " get nullptr input abstract.";
378 cnode->set_inputs(origin_inputs);
379 return RET_ERROR;
380 }
381
382 if (utils::isa<abstract::AbstractTensor>(abstract)) {
383 auto ret = ModifyLiteDynamicShapeToOps(abstract);
384 if (ret != RET_OK) {
385 MS_LOG(ERROR) << "ModifyLiteDynamicShapeToOps failed.";
386 cnode->set_inputs(origin_inputs);
387 return RET_ERROR;
388 }
389 } else if (utils::isa<abstract::AbstractTuple>(abstract)) {
390 auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(abstract);
391 MS_CHECK_TRUE_MSG(tuple != nullptr, RET_ERROR, "tuple is nullptr.");
392 for (const auto &element : tuple->elements()) {
393 if (utils::isa<abstract::AbstractTensor>(element)) {
394 auto ret = ModifyLiteDynamicShapeToOps(element);
395 if (ret != RET_OK) {
396 MS_LOG(ERROR) << "ModifyLiteDynamicShapeToOps failed.";
397 cnode->set_inputs(origin_inputs);
398 return RET_ERROR;
399 }
400 }
401 }
402 }
403 abs_list->push_back(abstract);
404 }
405 cnode->set_inputs(origin_inputs);
406 return RET_OK;
407 }
408
GetCNodeInputTensors(const CNodePtr & cnode,std::vector<TensorPtr> * inputs,converter::FmkType fmk_type,bool train_flag,bool copy_data)409 int LiteTensorExtractor::GetCNodeInputTensors(const CNodePtr &cnode, std::vector<TensorPtr> *inputs,
410 converter::FmkType fmk_type, bool train_flag, bool copy_data) {
411 MS_ASSERT(cnode != nullptr);
412 MS_ASSERT(inputs != nullptr);
413 auto origin_inputs = cnode->inputs();
414 if (lite::RemoveIfDepend(cnode) != RET_OK) {
415 MS_LOG(ERROR) << "remove depend failed.";
416 return RET_ERROR;
417 }
418 if (lite::RemoveIfMakeTuple(cnode)) {
419 MS_LOG(ERROR) << "remove makeTuple failed.";
420 return RET_ERROR;
421 }
422 RemoveIfMonad(cnode);
423
424 for (size_t i = 1; i < cnode->size(); ++i) {
425 if (utils::isa<CNodePtr>(cnode->input(i))) {
426 std::vector<TensorPtr> var_inputs;
427 if (GetCNodeVarInput(cnode, i, &var_inputs) != RET_OK) {
428 MS_LOG(ERROR) << "get var inputs failed.";
429 cnode->set_inputs(origin_inputs);
430 return RET_ERROR;
431 }
432 inputs->insert(inputs->end(), var_inputs.begin(), var_inputs.end());
433 } else {
434 std::vector<TensorPtr> const_inputs;
435 if (GetCNodeConstInput(cnode, i, fmk_type, train_flag, copy_data, &const_inputs) != RET_OK) {
436 MS_LOG(ERROR) << "get const inputs failed.";
437 cnode->set_inputs(origin_inputs);
438 return RET_ERROR;
439 }
440 inputs->insert(inputs->end(), const_inputs.begin(), const_inputs.end());
441 }
442 }
443 cnode->set_inputs(origin_inputs);
444 return RET_OK;
445 }
446
GetCNodeOutputTensors(const CNodePtr & cnode,std::vector<TensorPtr> * outputs,bool train_flag)447 int LiteTensorExtractor::GetCNodeOutputTensors(const CNodePtr &cnode, std::vector<TensorPtr> *outputs,
448 bool train_flag) {
449 MS_ASSERT(cnode != nullptr);
450 MS_ASSERT(outputs != nullptr);
451 std::vector<lite::DataInfo> data_infos;
452 if (utils::isa<abstract::AbstractTuple>(cnode->abstract())) {
453 auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(cnode->abstract());
454 if (tuple == nullptr) {
455 MS_LOG(ERROR) << "tuple is nullptr.";
456 return RET_ERROR;
457 }
458 auto elements = tuple->elements();
459 for (size_t i = 0; i < elements.size(); i++) {
460 lite::DataInfo data_info;
461 data_info.node_type_ = lite::NodeType_CNode;
462 if (train_flag) {
463 data_infos.emplace_back(data_info);
464 if (CheckPrimitiveType(cnode, prim::kPrimConv2DFusion) || CheckPrimitiveType(cnode, prim::kPrimAdam)) {
465 break;
466 }
467 } else {
468 if (!utils::isa<abstract::AbstractTensorPtr>(elements[i])) {
469 MS_LOG(ERROR) << "abstract is not AbstractTensor.";
470 return RET_ERROR;
471 }
472 auto type = kNumberTypeFloat32;
473 if (utils::isa<abstract::AbstractTensorPtr>(elements[i])) {
474 auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(elements[i]);
475 MS_CHECK_TRUE_MSG(abstract_tensor != nullptr, RET_ERROR, "Cast to abstract tensor failed!");
476 MS_CHECK_TRUE_RET(abstract_tensor->element() != nullptr, lite::RET_NULL_PTR);
477 auto typePtr = abstract_tensor->element()->GetTypeTrack();
478 MS_CHECK_TRUE_RET(typePtr != nullptr, lite::RET_NULL_PTR);
479 type = typePtr->type_id();
480 }
481 data_info.data_type_ = type;
482 data_infos.emplace_back(data_info);
483 if (CheckPrimitiveType(cnode, prim::kPrimConv2DFusion)) {
484 break;
485 }
486 }
487 }
488 } else {
489 lite::DataInfo data_info;
490 auto type = kNumberTypeFloat32;
491 if (utils::isa<abstract::AbstractTensorPtr>(cnode->abstract())) {
492 auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(cnode->abstract());
493 MS_CHECK_TRUE_MSG(abstract_tensor != nullptr, RET_ERROR, "Cast to abstract tensor failed!");
494 MS_CHECK_TRUE_RET(abstract_tensor->element() != nullptr, lite::RET_NULL_PTR);
495 auto typePtr = abstract_tensor->element()->GetTypeTrack();
496 MS_CHECK_TRUE_RET(typePtr != nullptr, lite::RET_NULL_PTR);
497 type = typePtr->type_id();
498 }
499 data_info.data_type_ = type;
500 data_info.node_type_ = lite::NodeType_CNode;
501 data_infos.emplace_back(data_info);
502 }
503 for (const auto &data_info : data_infos) {
504 auto tensor = ConvertToLiteTensor(data_info);
505 if (tensor == nullptr) {
506 MS_LOG(ERROR) << "Create lite tensor from data info failed.";
507 return RET_ERROR;
508 }
509 outputs->push_back(tensor);
510 }
511 return RET_OK;
512 }
513 } // namespace opt
514 } // namespace mindspore
515