• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/common/tensor_util.h"
18 #include <string>
19 #include <algorithm>
20 #include <unordered_map>
21 #include "schema/model_generated.h"
22 #include "include/errorcode.h"
23 #include "src/common/log_adapter.h"
24 #include "src/litert/pack_weight_manager.h"
25 #include "src/litert/kernel/cpu/fp16/fp16_op_handler.h"
26 #include "nnacl/base/cast_base.h"
27 namespace mindspore {
28 namespace lite {
FreeInTensorC(std::vector<TensorC * > * tensors_in,const std::shared_ptr<Allocator> & allocator)29 void FreeInTensorC(std::vector<TensorC *> *tensors_in, const std::shared_ptr<Allocator> &allocator) {
30   if (tensors_in == nullptr) {
31     return;
32   }
33   for (auto &i : *tensors_in) {
34     if (i == nullptr) {
35       continue;
36     }
37     if (i->data_type_ == kObjectTypeTensorType) {
38       auto *tensorListC = reinterpret_cast<TensorListC *>(i);
39       if (tensorListC->tensors_ != nullptr) {
40         if (allocator != nullptr && !IS_RUNTIME_ALLOCATOR(allocator)) {
41           allocator->Free(tensorListC->tensors_);
42         } else {
43           free(tensorListC->tensors_);
44         }
45         tensorListC->tensors_ = nullptr;
46       }
47     }
48   }
49   tensors_in->clear();
50 }
51 
FreeOutTensorC(std::vector<TensorC * > * tensors_out,const std::shared_ptr<Allocator> & allocator)52 void FreeOutTensorC(std::vector<TensorC *> *tensors_out, const std::shared_ptr<Allocator> &allocator) {
53   if (tensors_out == nullptr) {
54     return;
55   }
56   for (auto &i : *tensors_out) {
57     if (i == nullptr) {
58       continue;
59     }
60     if (i->data_type_ == static_cast<int>(kObjectTypeTensorType)) {
61       auto *tensorListC = reinterpret_cast<TensorListC *>(i);
62       if (tensorListC->tensors_ != nullptr) {
63         for (size_t j = 0; j < tensorListC->element_num_; ++j) {
64           if (tensorListC->tensors_[j] != nullptr && tensorListC->tensors_[j]->data_ != nullptr) {
65             free(tensorListC->tensors_[j]->data_);
66           }
67         }
68         free((tensorListC->tensors_));
69         tensorListC->tensors_ = nullptr;
70       }
71     }
72   }
73   tensors_out->clear();
74 }
75 
Tensor2TensorC(const Tensor * src,TensorC * dst)76 int Tensor2TensorC(const Tensor *src, TensorC *dst) {
77   MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
78   dst->format_ = static_cast<int>(src->format());
79   dst->data_ = src->data();
80   dst->data_type_ = src->data_type();
81   dst->shape_size_ = src->shape().size();
82   if (dst->shape_size_ > MAX_SHAPE_SIZE) {
83     MS_LOG(ERROR) << "tensor shape size " << dst->shape_size_ << " is larger than max shape size " << MAX_SHAPE_SIZE;
84     return RET_ERROR;
85   }
86   for (size_t i = 0; i < dst->shape_size_; i++) {
87     dst->shape_[i] = src->shape().at(i);
88   }
89   return RET_OK;
90 }
91 
TensorC2Tensor(TensorC * src,Tensor * dst,std::shared_ptr<Allocator> allocator)92 int TensorC2Tensor(TensorC *src, Tensor *dst, std::shared_ptr<Allocator> allocator) {
93   MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
94   dst->set_format(static_cast<mindspore::Format>(src->format_));
95   dst->set_data_type(static_cast<TypeId>(src->data_type_));  // get data during the runtime period
96   dst->set_shape(std::vector<int>(src->shape_, src->shape_ + src->shape_size_));
97   if (src->data_ != nullptr) {
98     auto data = dst->MutableData();
99     MS_CHECK_TRUE_RET(data != nullptr, RET_ERROR);
100     if (data == src->data_) {  // tensor
101       dst->set_own_data(true);
102       dst->set_category(CONST_TENSOR);
103       return RET_OK;
104     }
105     memcpy(data, src->data_, dst->Size());  // tensor_list
106     dst->set_category(CONST_TENSOR);
107   }
108   return RET_OK;
109 }
110 
GenerateOutTensorC(const OpParameter * const parameter,const std::vector<lite::Tensor * > & outputs,std::vector<TensorC * > * out_tensor_c)111 int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &outputs,
112                        std::vector<TensorC *> *out_tensor_c) {
113   MS_CHECK_TRUE_RET(out_tensor_c != nullptr && parameter != nullptr, RET_ERROR);
114   if (parameter->type_ == mindspore::schema::PrimitiveType_TensorListFromTensor ||
115       parameter->type_ == mindspore::schema::PrimitiveType_TensorListReserve ||
116       parameter->type_ == mindspore::schema::PrimitiveType_TensorListSetItem) {
117 #ifndef CONTROLFLOW_TENSORLIST_CLIP
118     // TensorListC ->TensorC
119     MS_CHECK_TRUE_RET(!outputs.empty() && outputs.front()->data_type() == TypeId::kObjectTypeTensorType, RET_ERROR);
120     auto output = static_cast<TensorList *>(outputs[0]);
121     TensorListC *tensor_list_c = output->ConvertToTensorListC();
122     tensor_list_c->element_num_ = 0;
123     out_tensor_c->push_back(reinterpret_cast<TensorC *const>(tensor_list_c));
124 #else
125     return RET_NOT_SUPPORT;
126 #endif
127   } else {
128     (void)std::transform(outputs.begin(), outputs.end(), std::back_inserter(*out_tensor_c),
129                          [](lite::Tensor *output) { return output->ConvertToTensorC(); });
130   }
131   return RET_OK;
132 }
133 
GenerateInTensorC(const std::vector<lite::Tensor * > & inputs,std::vector<TensorC * > * in_tensor_c,const std::shared_ptr<Allocator> & allocator)134 int GenerateInTensorC(const std::vector<lite::Tensor *> &inputs, std::vector<TensorC *> *in_tensor_c,
135                       const std::shared_ptr<Allocator> &allocator) {
136   MS_CHECK_TRUE_RET(in_tensor_c != nullptr, RET_ERROR);
137   int ret = RET_OK;
138   for (auto input : inputs) {
139     if (input->data_type() == kObjectTypeTensorType) {
140 #ifndef CONTROLFLOW_TENSORLIST_CLIP
141       // Tensor ->TensorList -> TensorListC -> TensorC
142       auto *tensor_list = reinterpret_cast<TensorList *>(input);
143       TensorListC *tensor_list_c = tensor_list->ConvertToTensorListC();
144       auto tensors = tensor_list->tensors();
145       if (!tensors.empty()) {
146         if (allocator != nullptr && !IS_RUNTIME_ALLOCATOR(allocator)) {
147           tensor_list_c->tensors_ = reinterpret_cast<TensorC **>(allocator->Malloc(tensors.size() * sizeof(void *)));
148         } else {
149           tensor_list_c->tensors_ = reinterpret_cast<TensorC **>(malloc(tensors.size() * sizeof(void *)));
150         }
151         for (size_t i = 0; i < tensors.size(); ++i) {
152           tensor_list_c->tensors_[i] = tensors[i]->ConvertToTensorC();
153         }
154       }
155       in_tensor_c->push_back(reinterpret_cast<TensorC *>(tensor_list_c));
156 #else
157       return RET_NOT_SUPPORT;
158 #endif
159     } else {
160       // Tensor -> TensorC
161       TensorC *tensor_c = input->ConvertToTensorC();
162       in_tensor_c->emplace_back(tensor_c);
163     }
164   }
165   return ret;
166 }
167 
CheckTensorsInvalid(const std::vector<Tensor * > & tensors)168 int CheckTensorsInvalid(const std::vector<Tensor *> &tensors) {
169   for (auto tensor : tensors) {
170     if (tensor == nullptr) {
171       MS_LOG(ERROR) << "Graph input tensor is nullptr";
172       return RET_ERROR;
173     }
174     if (MS_UNLIKELY(tensor->data_type() != kObjectTypeTensorType && tensor->data() == nullptr)) {
175       MS_LOG(ERROR) << "Graph input tensor data is nullptr " << tensor->tensor_name();
176       return RET_ERROR;
177     }
178     const auto &shape = tensor->shape();
179     bool valid = all_of(shape.begin(), shape.end(), [](int i) { return i >= 0; });
180     if (MS_UNLIKELY(!valid)) {
181       MS_LOG(ERROR) << "The shape of tensor contains negative dimension,"
182                     << "check the model and assign the input shape with method Resize().";
183       return RET_ERROR;
184     }
185     if (MS_UNLIKELY(tensor->format() != mindspore::NHWC && tensor->format() != mindspore::NCHW)) {
186       MS_LOG(ERROR) << "model input's format may be changed, which should be NHWC or NCHW";
187       return RET_FORMAT_ERR;
188     }
189     if (MS_UNLIKELY(tensor->data() == nullptr)) {
190       MS_LOG(ERROR) << "tensor data should be filled before run op";
191       return RET_ERROR;
192     }
193   }
194   return RET_OK;
195 }
196 
ShapeToString(const std::vector<int> & shape)197 std::string ShapeToString(const std::vector<int> &shape) {
198   std::string result = "[";
199   int max_size = 40;
200   result.reserve(max_size);
201   for (size_t i = 0; i < shape.size(); ++i) {
202     result += std::to_string(shape[i]);
203     if (i + 1 < shape.size()) {
204       result += ", ";
205     }
206   }
207   result += "]";
208   return result;
209 }
210 
CheckGraphInputShapes(const std::vector<Tensor * > & inputs,const std::unordered_map<Tensor *,std::vector<int>> & input_shape_map)211 int CheckGraphInputShapes(const std::vector<Tensor *> &inputs,
212                           const std::unordered_map<Tensor *, std::vector<int>> &input_shape_map) {
213   for (const auto input : inputs) {
214     MS_CHECK_TRUE_MSG(input != nullptr, RET_ERROR, "graph input tensor is nullptr.");
215     if (input_shape_map.find(input) == input_shape_map.end()) {
216       MS_LOG(ERROR) << "can't find " << input->tensor_name() << " in input_shape_map";
217       return RET_ERROR;
218     }
219     if (!input_shape_map.at(input).empty() && input_shape_map.at(input) != input->shape()) {
220 #if defined(ENABLE_LITE_ACL)
221       MS_LOG(WARNING) << "Please check graph input " << input->tensor_name()
222                       << " shape:" << ShapeToString(input->shape())
223                       << " has been modified by DVPP method to shape:" << ShapeToString(input_shape_map.at(input))
224                       << "."
225                       << "If not, the modification is illegal, please modify the input shape with method Resize().";
226 #elif defined(ENABLE_LITE_DPICO)
227       MS_LOG(WARNING) << "Please check graph input " << input->tensor_name()
228                       << " shape:" << ShapeToString(input->shape())
229                       << " has been modified by setting 'SupportZeroCopy=on' to shape:"
230                       << ShapeToString(input_shape_map.at(input)) << "."
231                       << "If not, the modification is illegal, please modify the input shape with method Resize().";
232 #else
233       MS_LOG(ERROR) << "graph input:" << input->tensor_name()
234                     << " shape has been illegally modified, please modify the input shape with method Resize().";
235       return RET_ERROR;
236 #endif
237     }
238   }
239   return RET_OK;
240 }
241 
LiteTensorsToMSTensors(const std::vector<lite::Tensor * > & lite_tensors)242 std::vector<mindspore::MSTensor> LiteTensorsToMSTensors(const std::vector<lite::Tensor *> &lite_tensors) {
243   std::vector<mindspore::MSTensor> tensors;
244   (void)std::transform(lite_tensors.begin(), lite_tensors.end(), std::back_inserter(tensors), [](lite::Tensor *tensor) {
245     return mindspore::MSTensor(std::make_shared<LiteTensorImpl>(tensor));
246   });
247 
248   return tensors;
249 }
250 
MoveCommonTensorData(Tensor * dst_tensor,Tensor * src_tensor)251 int MoveCommonTensorData(Tensor *dst_tensor, Tensor *src_tensor) {
252   MS_ASSERT(src_tensor != dst_tensor);
253   if (src_tensor->data() == dst_tensor->data()) {
254     MS_LOG(DEBUG) << "no need to move data.";
255     return RET_OK;
256   }
257   dst_tensor->FreeData();
258   dst_tensor->ResetRefCount();
259   dst_tensor->set_allocator(src_tensor->allocator());
260 
261   if (src_tensor->data() != nullptr) {
262     dst_tensor->set_data(src_tensor->MutableData()); /* using MutableData to sync GPU data */
263   }
264 
265   if (src_tensor->data() == dst_tensor->data() && src_tensor->IsConst()) {
266     dst_tensor->set_own_data(false);
267   } else {
268     dst_tensor->set_own_data(src_tensor->own_data());
269   }
270   src_tensor->DecRefCount();
271   return RET_OK;
272 }
273 
MoveTensorData(Tensor * dst_tensor,Tensor * src_tensor)274 int MoveTensorData(Tensor *dst_tensor, Tensor *src_tensor) {
275   if (src_tensor == dst_tensor) {
276     MS_LOG(INFO) << "no need to move.";
277     return RET_OK;
278   }
279   MS_ASSERT(src_tensor->allocator() != nullptr);
280   auto ret = RET_OK;
281   if (src_tensor->data_type() == kObjectTypeTensorType) {
282     ret =
283       MoveTensorListTensorData(reinterpret_cast<TensorList *>(dst_tensor), reinterpret_cast<TensorList *>(src_tensor));
284   } else {
285     ret = MoveCommonTensorData(dst_tensor, src_tensor);
286   }
287   return ret;
288 }
289 
SetCommonTensorData(Tensor * dst_tensor,Tensor * src_tensor)290 void SetCommonTensorData(Tensor *dst_tensor, Tensor *src_tensor) {
291   dst_tensor->set_data(src_tensor->data());
292   dst_tensor->set_own_data(false);
293 }
294 
SetTensorData(Tensor * dst_tensor,Tensor * src_tensor)295 int SetTensorData(Tensor *dst_tensor, Tensor *src_tensor) {
296   auto ret = RET_OK;
297   if (src_tensor->data_type() == kObjectTypeTensorType) {
298     ret =
299       SetTensorListTensorData(reinterpret_cast<TensorList *>(dst_tensor), reinterpret_cast<TensorList *>(src_tensor));
300   } else {
301     SetCommonTensorData(dst_tensor, src_tensor);
302   }
303   return ret;
304 }
305 
CastTensorData(Tensor * dst,Tensor * src,bool support_fp16)306 int CastTensorData(Tensor *dst, Tensor *src, bool support_fp16) {
307   int ret = RET_OK;
308   if (src->data_type() != kObjectTypeTensorType) {
309     ret = CastCommonTensorData(dst, src, support_fp16);
310   } else {
311     ret =
312       CastTensorListTensorData(reinterpret_cast<TensorList *>(dst), reinterpret_cast<TensorList *>(src), support_fp16);
313   }
314   src->DecRefCount();
315   return ret;
316 }
317 
CastCommonTensorData(Tensor * dst,Tensor * src,bool support_fp16)318 int CastCommonTensorData(Tensor *dst, Tensor *src, bool support_fp16) {
319   auto dst_data = dst->ReallocData(); /* using MutableData to sync GPU data */
320   if (dst_data == nullptr) {
321     MS_LOG(ERROR) << "Remalloc memory failed.";
322     return RET_NULL_PTR;
323   }
324   dst->ResetRefCount();
325   if (dst->shape() != src->shape()) {
326     MS_LOG(ERROR) << "dst tensor: " << dst->tensor_name() << " shape: " << dst->shape() << " vs "
327                   << "src tensor: " << src->tensor_name() << " shape: " << src->shape();
328     return RET_PARAM_INVALID;
329   }
330   auto src_data = src->MutableData();
331   size_t src_nums_size = static_cast<size_t>(src->ElementsNum());
332   auto dst_data_type = dst->data_type();
333   auto src_data_type = src->data_type();
334   // Some case dst data type is unknown, we will set to float32. In this case, need case is true, but actually no need
335   // cast data
336   if (dst_data_type == src_data_type) {
337     memcpy(dst_data, src_data, src_nums_size);
338     return RET_OK;
339   }
340   if (dst_data_type == kNumberTypeFloat32 && src_data_type == kNumberTypeFloat16) {
341 #if defined(ENABLE_ARM) && defined(ENABLE_FP16)
342     Float16ToFloat32_fp16_handler(src_data, dst_data, src_nums_size, support_fp16);
343 #else
344     // mix kernel support fp16 on GPU/Ascend
345     Float16ToFloat32_fp16_handler(src_data, dst_data, src_nums_size, support_fp16);
346 #endif
347   } else if (dst_data_type == kNumberTypeFloat16 && src_data_type == kNumberTypeFloat32) {
348 #if defined(ENABLE_ARM) && defined(ENABLE_FP16)
349     Float32ToFloat16_fp16_handler(src_data, dst_data, src_nums_size, support_fp16);
350 #else
351     // mix kernel support fp16 on GPU/Ascend
352     Float16ToFloat32_fp16_handler(src_data, dst_data, src_nums_size, support_fp16);
353 #endif
354   } else if (dst_data_type == kNumberTypeFloat32 && src_data_type == kNumberTypeInt32) {
355     Int32ToFloat32(static_cast<const int32_t *>(src_data), static_cast<float *>(dst_data),
356                    static_cast<int>(src_nums_size));
357   } else if (dst_data_type == kNumberTypeInt32 && src_data_type == kNumberTypeFloat32) {
358     Float32ToInt32(static_cast<const float *>(src_data), static_cast<int32_t *>(dst_data),
359                    static_cast<int>(src_nums_size));
360   } else {
361     MS_LOG(ERROR) << "not support dst_data_type: " << dst_data_type << " src_data_type: " << src_data_type;
362     return RET_NOT_SUPPORT;
363   }
364   return RET_OK;
365 }
366 
NeedCastData(Tensor * dst_tensor,Tensor * src_tensor)367 bool NeedCastData(Tensor *dst_tensor, Tensor *src_tensor) {
368   if (IsUnKnownDtype(dst_tensor) || IsUnKnownDtype(src_tensor)) {
369     MS_LOG(INFO) << "Type unknown, no need cast.";
370     return false;
371   }
372   return !IsSameDtype(dst_tensor, src_tensor);
373 }
374 
375 // support_fp16: current device and package support float16
CastConstTensorData(Tensor * tensor,TypeId dst_data_type,bool support_fp16)376 int CastConstTensorData(Tensor *tensor, TypeId dst_data_type, bool support_fp16) {
377   MS_CHECK_TRUE_RET(tensor != nullptr, RET_NULL_PTR);
378   if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) {
379     return RET_OK;
380   }
381   if (dst_data_type != kNumberTypeFloat32 && dst_data_type != kNumberTypeFloat16) {
382     return RET_OK;
383   }
384   MS_CHECK_TRUE_RET(tensor->IsConst(), RET_ERROR);
385   if (tensor->data_type() == dst_data_type) {
386     return RET_OK;
387   }
388   auto origin_own_data = tensor->own_data();
389   auto origin_dt = tensor->data_type();
390   auto origin_data = tensor->data();
391   MS_CHECK_TRUE_RET(origin_data != nullptr, RET_ERROR);
392   tensor->set_data(nullptr);
393   tensor->set_data_type(dst_data_type);
394   auto ret = tensor->MallocData();
395   if (RET_OK != ret) {
396     MS_LOG(ERROR) << "malloc data failed";
397     // reset tensor
398     tensor->set_data(origin_data);
399     tensor->set_data_type(origin_dt);
400     tensor->set_own_data(origin_own_data);
401     return ret;
402   }
403   auto new_tensor_data = tensor->data();
404   MS_ASSERT(new_tensor_data != nullptr);
405   if (dst_data_type == kNumberTypeFloat32) {
406     bool replace = false;
407     void *data = lite::PackWeightManager::GetInstance()->ReplaceFp16Data(origin_data, tensor->Size(), &replace);
408     if (replace) {
409       if (data == nullptr) {
410         MS_LOG(ERROR) << "replace fp16 data failed.";
411         return RET_ERROR;
412       }
413       if (tensor->allocator() == nullptr) {
414         free(new_tensor_data);
415       } else {
416         tensor->allocator()->Free(new_tensor_data);
417       }
418       tensor->set_data(data);
419       tensor->set_own_data(false);
420     } else {
421       data = new_tensor_data;
422     }
423     Float16ToFloat32_fp16_handler(origin_data, data, tensor->ElementsNum(), support_fp16);
424   } else {  // dst_data_type == kNumberTypeFloat16
425     Float32ToFloat16_fp16_handler(origin_data, new_tensor_data, tensor->ElementsNum(), support_fp16);
426   }
427   if (origin_own_data) {
428     if (tensor->allocator() == nullptr) {
429       free(origin_data);
430     } else {
431       tensor->allocator()->Free(origin_data);
432     }
433   }
434   return RET_OK;
435 }
436 
437 #ifndef CONTROLFLOW_TENSORLIST_CLIP
438 
SetTensorShape(Tensor * dst,Tensor * src)439 int SetTensorShape(Tensor *dst, Tensor *src) {
440   if (dst->data_type() != kObjectTypeTensorType && src->data_type() != kObjectTypeTensorType) {
441     dst->set_shape(src->shape());
442     dst->set_format(src->format());
443     return RET_OK;
444   } else if (dst->data_type() == kObjectTypeTensorType && src->data_type() == kObjectTypeTensorType) {
445     auto input_tensorlist = reinterpret_cast<TensorList *>(dst);
446     auto input_data_tensorlist = reinterpret_cast<TensorList *>(src);
447     MS_CHECK_FALSE_MSG(input_tensorlist == nullptr, RET_ERROR, "cast to tensorlist failed.");
448     MS_CHECK_FALSE_MSG(input_data_tensorlist == nullptr, RET_ERROR, "cast to tensorlist failed.");
449     input_tensorlist->set_element_shape(input_data_tensorlist->element_shape());
450     // because some model shape is not same as tensors().size(), we need the real shape, which is the tensors().size().
451     int real_shape_val = static_cast<int>(input_data_tensorlist->tensors().size());
452     std::vector<int> real_shape{real_shape_val};
453     input_tensorlist->set_shape(real_shape);
454     // hard code for some model
455     if (input_data_tensorlist->tensors_data_type() != kTypeUnknown &&
456         input_tensorlist->tensors_data_type() == kTypeUnknown) {
457       input_tensorlist->set_tensors_data_type(input_data_tensorlist->tensors_data_type());
458     }
459     return RET_OK;
460   } else {
461     MS_LOG(ERROR) << "not able to set tensor shape between tensor and tensorlist.";
462     return RET_ERROR;
463   }
464 }
465 
CastTensorListTensorData(TensorList * dst_tensorlist,TensorList * src_tensorlist,bool support_fp16)466 int CastTensorListTensorData(TensorList *dst_tensorlist, TensorList *src_tensorlist, bool support_fp16) {
467   MS_ASSERT(src_tensorlist != nullptr);
468   MS_ASSERT(dst_tensorlist != nullptr);
469   dst_tensorlist->set_shape(src_tensorlist->shape());
470   std::vector<std::vector<int>> tensors_shapes{};
471   tensors_shapes.resize(src_tensorlist->tensors().size());
472   for (size_t i = 0; i < tensors_shapes.size(); ++i) {
473     tensors_shapes[i] = src_tensorlist->tensors()[i]->shape();
474   }
475   if (!dst_tensorlist->shape().empty()) {
476     if (src_tensorlist->tensors_data_type() == kNumberTypeFloat16) {
477       auto ret = dst_tensorlist->MallocTensorListData(kNumberTypeFloat32, tensors_shapes);
478       MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "dst_tensorlist MallocTensorListData failed.");
479     }
480     if (src_tensorlist->tensors_data_type() == kNumberTypeFloat32) {
481       auto ret = dst_tensorlist->MallocTensorListData(kNumberTypeFloat16, tensors_shapes);
482       MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "dst_tensorlist MallocTensorListData failed.");
483     }
484   }
485   dst_tensorlist->set_allocator(src_tensorlist->allocator());
486   dst_tensorlist->ResetRefCount();
487 
488   for (size_t i = 0; i < src_tensorlist->tensors().size(); ++i) {
489     auto src_tensor = src_tensorlist->tensors()[i];
490     auto dst_tensor = dst_tensorlist->tensors()[i];
491     auto ret = CastCommonTensorData(dst_tensor, src_tensor, support_fp16);
492     MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "cast tensor data failed.");
493   }
494   return RET_OK;
495 }
496 
MoveTensorListTensorData(TensorList * dst_tensorlist,TensorList * src_tensorlist)497 int MoveTensorListTensorData(TensorList *dst_tensorlist, TensorList *src_tensorlist) {
498   MS_ASSERT(src_tensorlist != nullptr);
499   MS_ASSERT(dst_tensorlist != nullptr);
500   dst_tensorlist->FreeData();
501   dst_tensorlist->ResetRefCount();
502   dst_tensorlist->set_allocator(src_tensorlist->allocator());
503 
504   auto src_tensorlist_tensors_size = src_tensorlist->tensors().size();
505   auto dst_tensorlist_tensors_size = dst_tensorlist->tensors().size();
506   if (src_tensorlist_tensors_size != dst_tensorlist_tensors_size) {
507     MS_LOG(ERROR) << "src tensorlist: " << src_tensorlist->tensor_name()
508                   << " tensors size: " << src_tensorlist_tensors_size
509                   << " vs dst tensorlist: " << dst_tensorlist->tensor_name()
510                   << " tensors size: " << dst_tensorlist_tensors_size;
511     return RET_ERROR;
512   }
513 
514   // hard code for some model
515   dst_tensorlist->set_tensors_data_type(src_tensorlist->tensors_data_type());
516 
517   dst_tensorlist->set_own_data(src_tensorlist->own_data());
518   for (size_t i = 0; i < src_tensorlist_tensors_size; ++i) {
519     auto src_tensor = src_tensorlist->tensors()[i];
520     auto dst_tensor = dst_tensorlist->tensors()[i];
521 
522     dst_tensor->set_own_data(src_tensor->own_data());
523     if (src_tensor->data() != nullptr) {
524       dst_tensor->set_data(src_tensor->MutableData()); /* using MutableData to sync GPU data */
525     }
526     if (src_tensor->data() == dst_tensor->data() && src_tensor->IsConst()) {
527       dst_tensor->set_own_data(false);
528     } else {
529       dst_tensor->set_own_data(src_tensor->own_data());
530     }
531     dst_tensor->set_shape(src_tensor->shape());
532   }
533 
534   if (src_tensorlist->IsConst() || src_tensorlist->IsGraphInput()) {
535     dst_tensorlist->set_own_data(false);
536   } else {
537     src_tensorlist->DecRefCount();
538   }
539   return RET_OK;
540 }
541 
SetTensorListTensorData(TensorList * dst_tensor_list,TensorList * src_tensor_list)542 int SetTensorListTensorData(TensorList *dst_tensor_list, TensorList *src_tensor_list) {
543   auto ret = dst_tensor_list->FreeTensorListData();
544   MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "FreeTensorListData failed.");
545   dst_tensor_list->set_own_data(false);
546   dst_tensor_list->set_tensors(src_tensor_list->tensors());
547   dst_tensor_list->set_tensors_data_type(src_tensor_list->tensors_data_type());
548   dst_tensor_list->set_element_shape(src_tensor_list->element_shape());
549   return RET_OK;
550 }
551 
TensorListC2TensorList(const TensorListC * src,TensorList * dst)552 int TensorListC2TensorList(const TensorListC *src, TensorList *dst) {
553   MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
554   dst->set_data_type(static_cast<TypeId>(src->data_type_));
555   dst->set_format(static_cast<mindspore::Format>(src->format_));
556   dst->set_shape(std::vector<int>(1, src->element_num_));
557 
558   // Set Tensors
559   for (size_t i = 0; i < src->element_num_; i++) {
560     auto ret = TensorC2Tensor(src->tensors_[i], dst->GetTensor(static_cast<int>(i)));
561     if (ret != RET_OK) {
562       MS_LOG(ERROR) << "TensorC2Tensor failed";
563       return ret;
564     }
565   }
566 
567   return RET_OK;
568 }
569 
TensorListDataType(Tensor * tensor)570 TypeId TensorListDataType(Tensor *tensor) {
571   auto tensor_list = reinterpret_cast<TensorList *>(tensor);
572   auto tensor_list_dtype = tensor_list->tensors_data_type();
573   if (tensor_list_dtype == kNumberTypeFloat32 || tensor_list_dtype == kNumberTypeFloat16 ||
574       tensor_list_dtype == kNumberTypeInt8 || tensor_list_dtype == kNumberTypeInt32 ||
575       tensor_list_dtype == kNumberTypeBool) {
576     return tensor_list_dtype;
577   }
578   // if not found, return float32 as default.
579   return kNumberTypeFloat32;
580 }
581 
MallocTensorListDataAccordingToTensorListC(Tensor * tensor,TensorListC * tensor_list_c)582 TensorList *MallocTensorListDataAccordingToTensorListC(Tensor *tensor, TensorListC *tensor_list_c) {
583   auto *tensor_list = reinterpret_cast<TensorList *>(tensor);
584   tensor_list->set_shape({static_cast<int>(tensor_list_c->element_num_)});
585   auto tensor_shape = std::vector<std::vector<int>>(
586     tensor_list_c->element_num_, std::vector<int>(tensor_list_c->element_shape_,
587                                                   tensor_list_c->element_shape_ + tensor_list_c->element_shape_size_));
588   auto ret = tensor_list->MallocTensorListData(static_cast<TypeId>(tensor_list_c->tensors_data_type_), tensor_shape);
589   MS_CHECK_FALSE_MSG(ret != RET_OK, nullptr, "tensor list MallocTensorListData");
590   return tensor_list;
591 }
592 
DecodeTensorLsit(Tensor * tensor,const int * src_data,size_t length)593 int DecodeTensorLsit(Tensor *tensor, const int *src_data, size_t length) {
594   auto tensor_list = reinterpret_cast<TensorList *>(tensor);
595   if (tensor_list->Decode(src_data, length) != RET_OK) {
596     MS_LOG(ERROR) << "Decode tensorlist data failed";
597     return RET_ERROR;
598   }
599   return RET_OK;
600 }
601 
CreateTensorList(const std::vector<int> & shape,const Category & src_category,const void * src_data)602 Tensor *CreateTensorList(const std::vector<int> &shape, const Category &src_category, const void *src_data) {
603   auto dst_tensor = new (std::nothrow) TensorList(shape, std::vector<int>(), src_category);
604   // set tensor list datatype
605   auto tensor_list = reinterpret_cast<TensorList *>(dst_tensor);
606   MS_CHECK_TRUE_RET(tensor_list != nullptr, nullptr);
607   if (src_data != nullptr) {
608     auto tensor_data_type = TypeId(reinterpret_cast<const int *>(src_data)[0]);
609     tensor_list->set_tensors_data_type(tensor_data_type);
610   }
611   return dst_tensor;
612 }
613 
CopyTensorListTensorDataType(TensorList * dst_tensorlist,TensorList * src_tensorlist)614 int CopyTensorListTensorDataType(TensorList *dst_tensorlist, TensorList *src_tensorlist) {
615   // shape may change, because tensors.size() can be change in RunGraph
616   if (dst_tensorlist->data_type() != src_tensorlist->data_type() ||
617       dst_tensorlist->format() != src_tensorlist->format()) {
618     MS_LOG(ERROR) << "input tensorlist and output tensorlist data_type or format is incompatible";
619     MS_LOG(ERROR) << "input tensor data_type: " << src_tensorlist->data_type() << " vs "
620                   << "output tensor data_type: " << dst_tensorlist->data_type()
621                   << "input tensor format: " << src_tensorlist->format() << " vs "
622                   << "output tensor format: " << dst_tensorlist->format();
623     return RET_ERROR;
624   }
625   // when tensorlist malloc is done. this need to check element_shape compatibility
626   dst_tensorlist->set_element_shape(src_tensorlist->element_shape());
627 
628   auto update_data_type = kTypeUnknown;
629   auto dst_tensor_data_type = dst_tensorlist->tensors_data_type();
630   auto src_tensor_data_type = src_tensorlist->tensors_data_type();
631   if (dst_tensor_data_type != src_tensor_data_type) {
632     if (src_tensor_data_type != kTypeUnknown && dst_tensor_data_type != kTypeUnknown) {
633       MS_LOG(ERROR) << "input tensorlist and output tensorlist is incompatible";
634       return RET_ERROR;
635     }
636     update_data_type = dst_tensor_data_type != kTypeUnknown ? dst_tensor_data_type : src_tensor_data_type;
637   }
638   if (update_data_type != kTypeUnknown) {
639     src_tensorlist->set_tensors_data_type(update_data_type);
640     dst_tensorlist->set_tensors_data_type(update_data_type);
641   }
642   return RET_OK;
643 }
644 
SetTensorListTensorDataType(const TypeId & data_type,Tensor * tensor)645 void SetTensorListTensorDataType(const TypeId &data_type, Tensor *tensor) {
646   if (tensor->data_type() == kObjectTypeTensorType) {
647     auto old_tensorlist = reinterpret_cast<TensorList *>(tensor);
648     if (old_tensorlist->tensors_data_type() == kNumberTypeFloat16 ||
649         old_tensorlist->tensors_data_type() == kNumberTypeFloat32) {
650       old_tensorlist->set_tensors_data_type(data_type);
651     }
652   }
653 }
654 
IsSameDtype(const Tensor * input_1,const Tensor * input_2)655 bool IsSameDtype(const Tensor *input_1, const Tensor *input_2) {
656   if (input_1->data_type() != kObjectTypeTensorType && input_2->data_type() != kObjectTypeTensorType) {
657     return input_1->data_type() == input_2->data_type();
658   } else if (input_1->data_type() == kObjectTypeTensorType && input_2->data_type() == kObjectTypeTensorType) {
659     auto input_tensor_list_1 = reinterpret_cast<const TensorList *>(input_1);
660     auto input_tensor_list_2 = reinterpret_cast<const TensorList *>(input_2);
661     return input_tensor_list_1->tensors_data_type() == input_tensor_list_2->tensors_data_type();
662   } else {
663     return false;
664   }
665 }
666 
IsSameShape(const Tensor * input_1,const Tensor * input_2)667 bool IsSameShape(const Tensor *input_1, const Tensor *input_2) {
668   if (input_1->data_type() != kObjectTypeTensorType && input_2->data_type() != kObjectTypeTensorType) {
669     return input_1->shape() == input_2->shape();
670   } else if (input_1->data_type() == kObjectTypeTensorType && input_2->data_type() == kObjectTypeTensorType) {
671     auto input_tensor_list_1 = reinterpret_cast<const TensorList *>(input_1);
672     auto input_tensor_list_2 = reinterpret_cast<const TensorList *>(input_2);
673     return input_tensor_list_1->shape() == input_tensor_list_2->shape() &&
674            input_tensor_list_1->element_shape() == input_tensor_list_2->element_shape();
675   } else {
676     return false;
677   }
678 }
679 
MallocTensorData(Tensor * tensor)680 int MallocTensorData(Tensor *tensor) {
681   auto ret = RET_OK;
682   if (tensor->data_type() != kObjectTypeTensorType) {
683     tensor->FreeData();
684     auto size = tensor->ElementsNum();
685     if (size <= 0) {
686       return RET_OK;
687     }
688     ret = tensor->MallocData();
689   } else {
690     auto tensor_list = reinterpret_cast<TensorList *>(tensor);
691     ret = tensor_list->FreeTensorListData();
692     MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "free tensor list data failed.");
693     auto size = tensor->ElementsNum();
694     if (size <= 0) {
695       return RET_OK;
696     }
697     std::vector<std::vector<int>> tensors_shape{};
698     for (int i = 0; i < size; ++i) {
699       tensors_shape.push_back(tensor_list->element_shape());
700     }
701     ret = tensor_list->MallocTensorListData(tensor_list->tensors_data_type(), tensors_shape);
702     MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "malloc tensor list data failed.");
703   }
704   return ret;
705 }
706 
IsUnKnownDtype(const Tensor * input)707 bool IsUnKnownDtype(const Tensor *input) {
708   if (input->data_type() == kTypeUnknown) {
709     return true;
710   } else if (input->data_type() == kObjectTypeTensorType) {
711     auto input_tensor_list = reinterpret_cast<const TensorList *>(input);
712     return input_tensor_list->tensors_data_type() == kTypeUnknown;
713   }
714   return false;
715 }
716 
717 #else
718 
SetTensorShape(Tensor * dst,Tensor * src)719 int SetTensorShape(Tensor *dst, Tensor *src) {
720   if (dst->data_type() != kObjectTypeTensorType && src->data_type() != kObjectTypeTensorType) {
721     dst->set_shape(src->shape());
722     dst->set_format(src->format());
723     return RET_OK;
724   } else {
725     MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
726     return RET_ERROR;
727   }
728 }
729 
CastTensorListTensorData(TensorList * dst_tensorlist,TensorList * src_tensorlist,bool support_fp16)730 int CastTensorListTensorData(TensorList *dst_tensorlist, TensorList *src_tensorlist, bool support_fp16) {
731   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
732   return RET_ERROR;
733 }
734 
MoveTensorListTensorData(TensorList * dst_tensorlist,TensorList * src_tensorlist)735 int MoveTensorListTensorData(TensorList *dst_tensorlist, TensorList *src_tensorlist) {
736   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
737   return RET_ERROR;
738 }
739 
SetTensorListTensorData(TensorList * dst_tensor_list,TensorList * src_tensor_list)740 int SetTensorListTensorData(TensorList *dst_tensor_list, TensorList *src_tensor_list) {
741   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
742   return RET_ERROR;
743 }
744 
FreeTensorListC(TensorListC * tensorlist_c,std::shared_ptr<Allocator> allocator)745 void FreeTensorListC(TensorListC *tensorlist_c, std::shared_ptr<Allocator> allocator) {
746   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
747   return;
748 }
749 
TensorListC2TensorList(const TensorListC * src,TensorList * dst)750 int TensorListC2TensorList(const TensorListC *src, TensorList *dst) {
751   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
752   return RET_ERROR;
753 }
754 
TensorListDataType(Tensor * tensor)755 TypeId TensorListDataType(Tensor *tensor) {
756   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
757   return kTypeUnknown;
758 }
759 
MallocTensorListDataAccordingToTensorListC(Tensor * tensor,TensorListC * tensor_list_c)760 TensorList *MallocTensorListDataAccordingToTensorListC(Tensor *tensor, TensorListC *tensor_list_c) {
761   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
762   return nullptr;
763 }
764 
DecodeTensorLsit(Tensor * tensor,const int * src_data,size_t length)765 int DecodeTensorLsit(Tensor *tensor, const int *src_data, size_t length) {
766   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
767   return RET_ERROR;
768 }
769 
CreateTensorList(const std::vector<int> & shape,const Category & src_category,const void * src_data)770 Tensor *CreateTensorList(const std::vector<int> &shape, const Category &src_category, const void *src_data) {
771   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
772   return nullptr;
773 }
774 
CopyTensorListTensorDataType(TensorList * dst_tensorlist,TensorList * src_tensorlist)775 int CopyTensorListTensorDataType(TensorList *dst_tensorlist, TensorList *src_tensorlist) {
776   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
777   return RET_ERROR;
778 }
779 
SetTensorListTensorDataType(const TypeId & data_type,Tensor * tensor)780 void SetTensorListTensorDataType(const TypeId &data_type, Tensor *tensor) {
781   MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
782   return;
783 }
784 
IsSameDtype(const Tensor * input_1,const Tensor * input_2)785 bool IsSameDtype(const Tensor *input_1, const Tensor *input_2) {
786   if (input_1->data_type() != kObjectTypeTensorType && input_2->data_type() != kObjectTypeTensorType) {
787     return input_1->data_type() == input_2->data_type();
788   } else {
789     MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
790     return false;
791   }
792 }
793 
IsSameShape(const Tensor * input_1,const Tensor * input_2)794 bool IsSameShape(const Tensor *input_1, const Tensor *input_2) {
795   if (input_1->data_type() != kObjectTypeTensorType && input_2->data_type() != kObjectTypeTensorType) {
796     return input_1->shape() == input_2->shape();
797   } else {
798     MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
799     return false;
800   }
801 }
802 
MallocTensorData(Tensor * tensor)803 int MallocTensorData(Tensor *tensor) {
804   auto ret = RET_OK;
805   if (tensor->data_type() != kObjectTypeTensorType) {
806     tensor->FreeData();
807     auto size = tensor->ElementsNum();
808     if (size <= 0) {
809       return RET_OK;
810     }
811     ret = tensor->MallocData();
812   } else {
813     MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
814     return RET_ERROR;
815   }
816   return ret;
817 }
818 
IsUnKnownDtype(const Tensor * input)819 bool IsUnKnownDtype(const Tensor *input) {
820   if (input->data_type() == kTypeUnknown) {
821     return true;
822   }
823   return false;
824 }
825 
826 #endif
827 }  // namespace lite
828 }  // namespace mindspore
829