1 /**
2 * Copyright 2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/extendrt/graph_compiler/anfnode_tensor_adapter.h"
18 #include <algorithm>
19 #include "src/extendrt/graph_compiler/compile_result_builder.h"
20 #include "ir/anf.h"
21 #include "ir/func_graph.h"
22 #include "ir/primitive.h"
23 #include "ops/sequence_ops.h"
24 #include "utils/ms_utils_secure.h"
25
26 using ShapePtr = mindspore::abstract::ShapePtr;
27 using AbstractBasePtr = mindspore::abstract::AbstractBasePtr;
28 using AbstractTensorPtr = mindspore::abstract::AbstractTensorPtr;
29 using AbstractSequencePtr = mindspore::abstract::AbstractSequencePtr;
30
31 namespace mindspore {
32 namespace lite {
33 namespace {
GetRealAbstract(const CNodePtr & cnode)34 AbstractBasePtr GetRealAbstract(const CNodePtr &cnode) {
35 // MakeTuple infer is skipped in converter, so cnode->abstract is nullptr. The function can be deleted if MakeTuple is
36 // inferred in the converter.
37 if (!IsPrimitive(cnode->input(kIndex0), prim::kPrimMakeTuple)) {
38 return cnode->abstract();
39 }
40 std::vector<abstract::AbstractBasePtr> abstracts;
41 for (size_t i = 1; i < cnode->size(); ++i) {
42 const auto &input = cnode->inputs()[i];
43 MSLITE_CHECK_PTR_RETURN(input, nullptr);
44 const auto &abstract = input->abstract();
45 MSLITE_CHECK_PTR_RETURN(input, abstract);
46 abstracts.emplace_back(abstract);
47 }
48 return std::make_shared<abstract::AbstractTuple>(abstracts);
49 }
50 } // namespace
51
Convert2Tensor(const ParameterPtr & param_node,Format format)52 InferTensor *TensorAdapter::Convert2Tensor(const ParameterPtr ¶m_node, Format format) {
53 auto adapter = TensorAdapter::Create(param_node, format);
54 if (adapter == nullptr) {
55 MS_LOG(ERROR) << "Create tensor-adapter from parameter failed, parameter : " << param_node;
56 return nullptr;
57 }
58 return adapter->ToTensor();
59 }
60
Convert2Tensor(const ValueNodePtr & value_node,Format format)61 InferTensor *TensorAdapter::Convert2Tensor(const ValueNodePtr &value_node, Format format) {
62 auto adapter = TensorAdapter::Create(value_node, format);
63 if (adapter == nullptr) {
64 MS_LOG(ERROR) << "Create tensor-adapter from value-node failed, value-node : " << value_node;
65 return nullptr;
66 }
67 return adapter->ToTensor();
68 }
69
Convert2Tensor(const AbstractTensorPtr & abstract,Format format)70 InferTensor *TensorAdapter::Convert2Tensor(const AbstractTensorPtr &abstract, Format format) {
71 auto adapter = TensorAdapter::Create(abstract, format);
72 if (adapter == nullptr) {
73 MS_LOG(ERROR) << "Create tensor-adapter from abstracttensor failed, abstract : " << abstract;
74 return nullptr;
75 }
76 return adapter->ToTensor();
77 }
78
Convert2Tensor(const AbstractBasePtr & abstract,Format format)79 InferTensor *TensorAdapter::Convert2Tensor(const AbstractBasePtr &abstract, Format format) {
80 auto adapter = TensorAdapter::Create(abstract, format);
81 if (adapter == nullptr) {
82 MS_LOG(ERROR) << "Create tensor-adapter from abstractbase failed, abstract : " << abstract;
83 return nullptr;
84 }
85 return adapter->ToTensor();
86 }
87
ToTensor()88 InferTensor *TensorAdapter::ToTensor() {
89 std::vector<int32_t> int32_shape;
90 if (std::any_of(shape_.begin(), shape_.end(),
91 [](const ShapeValueDType &dim) { return dim == abstract::Shape::kShapeRankAny; })) {
92 int32_shape.emplace_back(-1);
93 } else {
94 int32_shape.resize(shape_.size());
95 for (size_t i = 0; i < shape_.size(); i++) {
96 int32_shape[i] = static_cast<int32_t>(shape_[i]);
97 }
98 }
99 auto *tensor = InferTensor::CreateTensor(name_, data_type_, int32_shape, data_, data_len_);
100 if (tensor == nullptr) {
101 return nullptr;
102 }
103 // move data to tensor
104 tensor->set_own_data(own_data_);
105 own_data_ = false;
106 tensor->set_format(format_);
107 return tensor;
108 }
109
CreateTensorsFromAbstract(const AbstractBasePtr & abstract,Format format)110 std::vector<std::unique_ptr<InferTensor>> TensorAdapter::CreateTensorsFromAbstract(const AbstractBasePtr &abstract,
111 Format format) {
112 if (abstract == nullptr) {
113 MS_LOG(ERROR) << "Input `abstract` is nullptr.";
114 return {};
115 }
116 std::vector<std::unique_ptr<InferTensor>> results;
117 // multi output abstract
118 if (utils::isa<AbstractSequencePtr>(abstract)) {
119 auto elements = utils::cast<AbstractSequencePtr>(abstract)->elements();
120 for (auto &element : elements) {
121 auto tensor = TensorAdapter::Convert2Tensor(element, format);
122 if (tensor == nullptr) {
123 MS_LOG(ERROR) << "Create tensor from abstract failed, abstract : " << element;
124 return {};
125 }
126 results.emplace_back(std::unique_ptr<InferTensor>(tensor));
127 }
128 return results;
129 }
130 // single output abstract
131 if (utils::isa<AbstractTensorPtr>(abstract)) {
132 auto tensor = TensorAdapter::Convert2Tensor(abstract, format);
133 if (tensor == nullptr) {
134 MS_LOG(ERROR) << "Create tensor from abstract failed, abstract : " << abstract;
135 return {};
136 }
137 results.emplace_back(std::unique_ptr<InferTensor>(tensor));
138 return results;
139 }
140 MS_LOG(ERROR) << "Unsupported abstract: " << abstract;
141 return {};
142 }
143
Convert2Tensor(const CNodePtr & cnode,Format format)144 std::vector<InferTensor *> TensorAdapter::Convert2Tensor(const CNodePtr &cnode, Format format) {
145 if (cnode == nullptr) {
146 MS_LOG(ERROR) << "Input cnode is nullptr.";
147 return {};
148 }
149
150 auto abstract = GetRealAbstract(cnode);
151 if (abstract == nullptr) {
152 MS_LOG(ERROR) << "CNode abstract is nullptr.";
153 return {};
154 }
155 auto tmp = TensorAdapter::CreateTensorsFromAbstract(abstract);
156 if (tmp.empty()) {
157 MS_LOG(ERROR) << "Create tensors from output abstract of cnode failed, cnode : " << cnode->fullname_with_scope();
158 return {};
159 }
160 std::vector<InferTensor *> results;
161 results.reserve(tmp.size());
162 std::transform(tmp.begin(), tmp.end(), std::back_inserter(results),
163 [](std::unique_ptr<InferTensor> &tensor) { return tensor.release(); });
164 return results;
165 }
166
Create(const ParameterPtr & param_node,Format format)167 TensorAdapterPtr TensorAdapter::Create(const ParameterPtr ¶m_node, Format format) {
168 if (param_node == nullptr) {
169 MS_LOG(ERROR) << "Input parameter is nullptr.";
170 return nullptr;
171 }
172 ShapeVector shape_vector;
173 TypeId data_type = kTypeUnknown;
174 auto status = GetDTAndShapeFromParameter(param_node, &data_type, &shape_vector);
175 if (status != kSuccess) {
176 MS_LOG(ERROR) << "Get data type and shape from param node failed.";
177 return nullptr;
178 }
179 if (data_type == kObjectTypeString) {
180 MS_LOG(ERROR) << "Not support kObjectTypeString type DefaultParam.";
181 return nullptr;
182 }
183 auto abstract = param_node->abstract();
184 if (abstract == nullptr) {
185 MS_LOG(ERROR) << "Abstract of parameter is nullptr.";
186 return nullptr;
187 }
188 auto adapter = std::make_shared<TensorAdapter>(abstract->name());
189 adapter->data_type_ = data_type;
190 adapter->shape_ = shape_vector;
191 adapter->format_ = format;
192 adapter->is_const_ = param_node->has_default();
193 if (!adapter->is_const_) {
194 return adapter;
195 }
196 auto tensor_info = std::dynamic_pointer_cast<tensor::Tensor>(param_node->default_param());
197 if (tensor_info == nullptr) {
198 MS_LOG(ERROR) << "Cast default-param to tensor failed.";
199 return nullptr;
200 }
201 adapter->compress_type_ = tensor_info->compression_type();
202 adapter->data_ = tensor_info->data_c();
203 adapter->data_len_ = tensor_info->Size();
204 adapter->own_data_ = false;
205 return adapter;
206 }
207
CreateFromTensorValueNode(const ValueNodePtr & value_node)208 TensorAdapterPtr TensorAdapter::CreateFromTensorValueNode(const ValueNodePtr &value_node) {
209 auto value_abstract = value_node->abstract();
210 if (value_abstract == nullptr) {
211 MS_LOG(ERROR) << "Abstract of value is nullptr";
212 return nullptr;
213 }
214 auto adapter = TensorAdapter::Create(value_abstract);
215 if (adapter == nullptr) {
216 MS_LOG(ERROR) << "Create tensor adapter from abstract of valuenode failed, valuenode: "
217 << value_node->fullname_with_scope();
218 return nullptr;
219 }
220 adapter->is_const_ = true;
221
222 auto value = value_node->value();
223 if (value == nullptr) {
224 MS_LOG(ERROR) << "Value of value-node is nullptr, " << value_node->fullname_with_scope();
225 return nullptr;
226 }
227 auto data = value->cast<tensor::TensorPtr>();
228 if (data == nullptr) {
229 MS_LOG(ERROR) << "Value of tensor-type value-node is not a Tensor, " << value_node->fullname_with_scope();
230 return nullptr;
231 }
232 adapter->data_ = data->data_c();
233 adapter->data_len_ = data->Size();
234 adapter->own_data_ = false;
235 return adapter;
236 }
237
CreateFromInt32ImmValue(const ValueNodePtr & value_node)238 TensorAdapterPtr TensorAdapter::CreateFromInt32ImmValue(const ValueNodePtr &value_node) {
239 MS_ASSERT(value_node != nullptr);
240 auto adapter = std::make_shared<TensorAdapter>(value_node->fullname_with_scope());
241 adapter->is_const_ = true;
242 adapter->data_type_ = kNumberTypeInt32;
243 adapter->shape_ = {1};
244 auto value = value_node->value();
245 if (value == nullptr) {
246 MS_LOG(ERROR) << "Value of value-node is nullptr, " << value_node->fullname_with_scope();
247 return nullptr;
248 }
249 auto data = GetValue<int32_t>(value);
250 adapter->data_ = malloc(sizeof(int32_t));
251 if (adapter->data_ == nullptr) {
252 MS_LOG(ERROR) << "malloc const tensor data failed.";
253 return nullptr;
254 }
255 (reinterpret_cast<int32_t *>(adapter->data_))[0] = data;
256 adapter->data_len_ = sizeof(int32_t);
257 adapter->own_data_ = true;
258 return adapter;
259 }
260
CreateFromInt64ImmValue(const ValueNodePtr & value_node)261 TensorAdapterPtr TensorAdapter::CreateFromInt64ImmValue(const ValueNodePtr &value_node) {
262 MS_ASSERT(value_node != nullptr);
263 auto adapter = std::make_shared<TensorAdapter>(value_node->fullname_with_scope());
264 adapter->is_const_ = true;
265 adapter->data_type_ = kNumberTypeInt64;
266 adapter->shape_ = {1};
267 auto value = value_node->value();
268 if (value == nullptr) {
269 MS_LOG(ERROR) << "Value of value-node is nullptr, " << value_node->fullname_with_scope();
270 return nullptr;
271 }
272 auto data = GetValue<int64_t>(value);
273 adapter->data_ = malloc(sizeof(int64_t));
274 if (adapter->data_ == nullptr) {
275 MS_LOG(ERROR) << "malloc const tensor data failed.";
276 return nullptr;
277 }
278 (reinterpret_cast<int64_t *>(adapter->data_))[0] = data;
279 adapter->data_len_ = sizeof(int64_t);
280 adapter->own_data_ = true;
281 return adapter;
282 }
283
CreateFromBoolImmValue(const ValueNodePtr & value_node)284 TensorAdapterPtr TensorAdapter::CreateFromBoolImmValue(const ValueNodePtr &value_node) {
285 MS_ASSERT(value_node != nullptr);
286 auto adapter = std::make_shared<TensorAdapter>(value_node->fullname_with_scope());
287 adapter->is_const_ = true;
288 adapter->data_type_ = kNumberTypeBool;
289 adapter->shape_ = {1};
290 auto value = value_node->value();
291 if (value == nullptr) {
292 MS_LOG(ERROR) << "Value of value-node is nullptr, " << value_node->fullname_with_scope();
293 return nullptr;
294 }
295 auto data = value->cast<mindspore::BoolImmPtr>();
296 if (data == nullptr) {
297 MS_LOG(ERROR) << "BoolImm Value of cast to BoolImmPtr failed, " << value_node->fullname_with_scope();
298 return nullptr;
299 }
300 auto data_value = data->value();
301 adapter->data_ = malloc(sizeof(bool));
302 if (adapter->data_ == nullptr) {
303 MS_LOG(ERROR) << "malloc const tensor data failed.";
304 return nullptr;
305 }
306 (reinterpret_cast<bool *>(adapter->data_))[0] = data_value;
307 adapter->data_len_ = sizeof(bool);
308 adapter->own_data_ = true;
309 return adapter;
310 }
311
CreateFromNumberTypeValue(const ValueNodePtr & value_node)312 TensorAdapterPtr TensorAdapter::CreateFromNumberTypeValue(const ValueNodePtr &value_node) {
313 MS_ASSERT(value_node != nullptr);
314 auto adapter = std::make_shared<TensorAdapter>(value_node->fullname_with_scope());
315 adapter->is_const_ = true;
316 adapter->data_type_ = kNumberTypeInt32;
317 adapter->shape_ = {1};
318 auto data = utils::cast<NumberPtr>(value_node->value());
319 if (data == nullptr) {
320 MS_LOG(ERROR) << "Value of Number type value-node is not a NumberPtr, " << value_node->fullname_with_scope();
321 return nullptr;
322 }
323 TypeId number_type = data->number_type();
324 static const std::unordered_map<TypeId, TypeId> TypeToTypeMap = {
325 {kNumberTypeInt, kNumberTypeInt32}, {kNumberTypeUInt, kNumberTypeUInt32}, {kNumberTypeFloat, kNumberTypeFloat32}};
326 if (TypeToTypeMap.find(number_type) != TypeToTypeMap.end()) {
327 number_type = TypeToTypeMap.at(number_type);
328 }
329 auto number_data = static_cast<int32_t>(number_type);
330 adapter->data_ = malloc(sizeof(int32_t));
331 if (adapter->data_ == nullptr) {
332 MS_LOG(ERROR) << "malloc const tensor data failed.";
333 return nullptr;
334 }
335 (reinterpret_cast<int32_t *>(adapter->data_))[0] = number_data;
336 adapter->data_len_ = sizeof(int32_t);
337 adapter->own_data_ = true;
338 return adapter;
339 }
340
CreateFromIntSequenceValue(const ValueNodePtr & value_node)341 TensorAdapterPtr TensorAdapter::CreateFromIntSequenceValue(const ValueNodePtr &value_node) {
342 MS_ASSERT(value_node != nullptr);
343 auto value_seq = utils::cast<ValueSequencePtr>(value_node->value());
344 if (value_seq == nullptr) {
345 MS_LOG(ERROR) << "Value of Sequence type value-node is not a ValueSequencePtr, "
346 << value_node->fullname_with_scope();
347 return nullptr;
348 }
349 auto adapter = std::make_shared<TensorAdapter>(value_node->fullname_with_scope());
350 adapter->is_const_ = true;
351 if (!value_seq->value().empty()) {
352 if (value_seq->value().front()->type()->number_type() == kNumberTypeInt32 ||
353 value_seq->value().front()->type()->number_type() == kNumberTypeInt) {
354 adapter->data_type_ = kNumberTypeInt32;
355 auto data = GetValue<std::vector<int32_t>>(value_seq);
356 auto data_len = data.size() * sizeof(int32_t);
357 adapter->shape_ = {static_cast<int64_t>(data.size())};
358 adapter->data_len_ = data_len;
359 if (data_len > 0) {
360 adapter->data_ = malloc(data_len);
361 if (adapter->data_ == nullptr) {
362 MS_LOG(ERROR) << "malloc const tensor data failed.";
363 return nullptr;
364 }
365 auto ret = memcpy_s(adapter->data_, data_len, data.data(), data_len);
366 if (ret != EOK) {
367 MS_LOG(ERROR) << "memcpy const tensor data failed: " << ret;
368 free(adapter->data_);
369 return nullptr;
370 }
371 adapter->own_data_ = true;
372 } else {
373 adapter->data_ = nullptr;
374 adapter->own_data_ = false;
375 }
376 } else if (value_seq->value().front()->type()->number_type() == kNumberTypeInt64) {
377 adapter->data_type_ = kNumberTypeInt64;
378 auto data = GetValue<std::vector<int64_t>>(value_seq);
379 auto data_len = data.size() * sizeof(int64_t);
380 adapter->shape_ = {static_cast<int64_t>(data.size())};
381 adapter->data_len_ = data_len;
382 if (data_len > 0) {
383 adapter->data_ = malloc(data_len);
384 if (adapter->data_ == nullptr) {
385 MS_LOG(ERROR) << "malloc const tensor data failed.";
386 return nullptr;
387 }
388 auto ret = memcpy_s(adapter->data_, data_len, data.data(), data_len);
389 if (ret != EOK) {
390 MS_LOG(ERROR) << "memcpy const tensor data failed: " << ret;
391 free(adapter->data_);
392 return nullptr;
393 }
394 adapter->own_data_ = true;
395 } else {
396 adapter->data_ = nullptr;
397 adapter->own_data_ = false;
398 }
399 } else {
400 MS_LOG(ERROR) << "only support integer value ValueSequence.";
401 return nullptr;
402 }
403 }
404 return adapter;
405 }
406
Create(const ValueNodePtr & value_node,Format format)407 TensorAdapterPtr TensorAdapter::Create(const ValueNodePtr &value_node, Format format) {
408 MS_ASSERT(value_node != nullptr);
409 auto value = value_node->value();
410 TensorAdapterPtr adapter;
411 if (value->isa<tensor::Tensor>()) {
412 adapter = CreateFromTensorValueNode(value_node);
413 } else if (value->isa<mindspore::Int32Imm>()) {
414 adapter = CreateFromInt32ImmValue(value_node);
415 } else if (value->isa<mindspore::Int64Imm>()) {
416 adapter = CreateFromInt64ImmValue(value_node);
417 } else if (value->isa<mindspore::BoolImm>()) {
418 adapter = CreateFromBoolImmValue(value_node);
419 } else if (value->isa<mindspore::ValueSequence>()) {
420 adapter = CreateFromIntSequenceValue(value_node);
421 } else if (value->isa<Number>()) {
422 adapter = CreateFromNumberTypeValue(value_node);
423 } else {
424 MS_LOG(ERROR) << "Not support value type: " << value->type();
425 return nullptr;
426 }
427 if (adapter == nullptr) {
428 return nullptr;
429 }
430 adapter->format_ = format;
431 return adapter;
432 }
433
Create(const AbstractBasePtr & abs,Format format)434 TensorAdapterPtr TensorAdapter::Create(const AbstractBasePtr &abs, Format format) {
435 auto abs_tensor = utils::cast<AbstractTensorPtr>(abs);
436 if (abs_tensor == nullptr) {
437 MS_LOG(ERROR) << "Input abstract is not a AbstractTensor.";
438 return nullptr;
439 }
440 return TensorAdapter::Create(abs_tensor, format);
441 }
442
Create(const AbstractTensorPtr & abs_tensor,Format format)443 TensorAdapterPtr TensorAdapter::Create(const AbstractTensorPtr &abs_tensor, Format format) {
444 if (abs_tensor == nullptr) {
445 MS_LOG(ERROR) << "Input abstract is not a AbstractTensor.";
446 return nullptr;
447 }
448 ShapeVector shape_vector;
449 TypeId data_type = kTypeUnknown;
450 auto ret = GetDTAndShapeFromAbTensor(abs_tensor, &data_type, &shape_vector);
451 if (ret != kSuccess) {
452 MS_LOG(ERROR) << "Get data type and shape from value node failed.";
453 return nullptr;
454 }
455 auto adapter = std::make_shared<TensorAdapter>(abs_tensor->name());
456 adapter->data_type_ = data_type;
457 adapter->shape_ = shape_vector;
458 adapter->format_ = format;
459 return adapter;
460 }
461
GetDTAndShapeFromAbTensor(const AbstractTensorPtr & abstract,TypeId * data_type,ShapeVector * shape_vector)462 StatusCode TensorAdapter::GetDTAndShapeFromAbTensor(const AbstractTensorPtr &abstract, TypeId *data_type,
463 ShapeVector *shape_vector) {
464 if (MS_UNLIKELY(abstract == nullptr || data_type == nullptr || shape_vector == nullptr)) {
465 MS_LOG(ERROR) << "input argument is nullptr";
466 return kLiteInputParamInvalid;
467 }
468 if (abstract->element() == nullptr) {
469 MS_LOG(ERROR) << "`element` of abstract is nullptr";
470 return kLiteError;
471 }
472 auto type_ptr = abstract->element()->GetTypeTrack();
473 if (type_ptr == nullptr) {
474 MS_LOG(ERROR) << "Type of abstract is nullptr";
475 return kLiteError;
476 }
477 *data_type = type_ptr->type_id();
478 if (!utils::isa<ShapePtr>(abstract->BuildShape())) {
479 MS_LOG(ERROR) << "Shape of Abstract of parameter should be ShapePtr";
480 return kLiteError;
481 }
482 *shape_vector = utils::cast<ShapePtr>(abstract->BuildShape())->shape();
483 return kSuccess;
484 }
485
SetDTAndShapeFromAbTensor(const TypeId & data_type,const ShapeVector & shape,const AbstractTensorPtr & abstract)486 StatusCode TensorAdapter::SetDTAndShapeFromAbTensor(const TypeId &data_type, const ShapeVector &shape,
487 const AbstractTensorPtr &abstract) {
488 if (MS_UNLIKELY(abstract == nullptr)) {
489 MS_LOG(ERROR) << "input `abstract` is nullptr";
490 return kLiteInputParamInvalid;
491 }
492 if (!utils::isa<ShapePtr>(abstract->BuildShape())) {
493 MS_LOG(ERROR) << "Shape of Abstract of parameter should be ShapePtr";
494 return kLiteError;
495 }
496 auto build_shape = utils::cast<ShapePtr>(abstract->BuildShape());
497 build_shape->set_shape(shape);
498 abstract->set_shape(build_shape);
499
500 if (abstract->element() == nullptr) {
501 MS_LOG(ERROR) << "`element` of abstract is nullptr";
502 return kLiteError;
503 }
504 abstract->element()->set_type(TypeIdToType(data_type));
505 return kSuccess;
506 }
507
SetDTAndShapeFromAbTensor(const TypeId & data_type,const std::vector<int> & shape,const mindspore::abstract::AbstractTensorPtr & abstract)508 StatusCode TensorAdapter::SetDTAndShapeFromAbTensor(const TypeId &data_type, const std::vector<int> &shape,
509 const mindspore::abstract::AbstractTensorPtr &abstract) {
510 ShapeVector shape_vec;
511 shape_vec.resize(shape.size());
512 (void)std::transform(shape.begin(), shape.end(), shape_vec.begin(),
513 [](const int &dim) { return static_cast<ShapeValueDType>(dim); });
514 return TensorAdapter::SetDTAndShapeFromAbTensor(data_type, shape_vec, abstract);
515 }
516
GetDTAndShapeFromParameter(const ParameterPtr & param_node,TypeId * data_type,ShapeVector * shape_vector)517 StatusCode TensorAdapter::GetDTAndShapeFromParameter(const ParameterPtr ¶m_node, TypeId *data_type,
518 ShapeVector *shape_vector) {
519 MS_ASSERT(param_node != nullptr && data_type != nullptr && shape_vector != nullptr);
520 auto abstract_base = param_node->abstract();
521 if (abstract_base == nullptr) {
522 MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << param_node->name();
523 return kLiteError;
524 }
525 auto abstract_tensor = utils::cast<AbstractTensorPtr>(abstract_base);
526 if (abstract_tensor == nullptr) {
527 MS_LOG(ERROR) << "Abstract of parameter should be abstract tensor, " << param_node->name();
528 return kLiteError;
529 }
530 return GetDTAndShapeFromAbTensor(abstract_tensor, data_type, shape_vector);
531 }
532
SetDTAndShapeFromAbTensorToLiteTensor(const AbstractBasePtr & abstract,InferTensor * tensor)533 bool TensorAdapter::SetDTAndShapeFromAbTensorToLiteTensor(const AbstractBasePtr &abstract, InferTensor *tensor) {
534 if (!utils::isa<mindspore::abstract::AbstractTensorPtr>(abstract)) {
535 MS_LOG(ERROR) << "The abstract should be tensor, but got abstract : " << abstract;
536 return false;
537 }
538 ShapeVector shape_vector;
539 TypeId data_type = kTypeUnknown;
540 auto ret = TensorAdapter::GetDTAndShapeFromAbTensor(utils::cast<mindspore::abstract::AbstractTensorPtr>(abstract),
541 &data_type, &shape_vector);
542 if (ret != kSuccess) {
543 MS_LOG(ERROR) << "Get dtype and shape from abstract failed, abstract : " << abstract;
544 return false;
545 }
546 std::vector<int32_t> int32_shape;
547 std::transform(shape_vector.begin(), shape_vector.end(), std::back_inserter(int32_shape),
548 [](const auto &shape) { return static_cast<int32_t>(shape); });
549 tensor->set_data_type(data_type);
550 tensor->set_shape(int32_shape);
551 tensor->set_format(NCHW);
552 return true;
553 }
554
SetDTAndShapeFromLiteTensorToAbTensor(const InferTensor & tensor,const AbstractBasePtr & abstract)555 bool TensorAdapter::SetDTAndShapeFromLiteTensorToAbTensor(const InferTensor &tensor, const AbstractBasePtr &abstract) {
556 if (MS_UNLIKELY(abstract == nullptr)) {
557 MS_LOG(ERROR) << "Input `abstract` is nullptr";
558 return false;
559 }
560 if (!utils::isa<mindspore::abstract::AbstractTensorPtr>(abstract)) {
561 MS_LOG(ERROR) << "The abstract should be tensor, but got abstract : " << abstract;
562 return false;
563 }
564
565 auto ret = TensorAdapter::SetDTAndShapeFromAbTensor(tensor.data_type(), tensor.shape(),
566 utils::cast<mindspore::abstract::AbstractTensorPtr>(abstract));
567 if (ret != kSuccess) {
568 MS_LOG(ERROR) << "Set dtype and shape to abstract failed, abstract : " << abstract;
569 return false;
570 }
571 return true;
572 }
573 } // namespace lite
574 } // namespace mindspore
575