• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/tensorlist.h"
18 #include <utility>
19 #include <algorithm>
20 #include "src/common/log_adapter.h"
21 #include "src/tensor.h"
22 #include "nnacl/op_base.h"
23 
24 namespace mindspore::lite {
25 #ifndef CONTROLFLOW_TENSORLIST_CLIP
26 namespace {
27 constexpr int kOffset = 2;
28 }  // namespace
TensorList(std::vector<int> shape,std::vector<int> element_shape,Category category)29 TensorList::TensorList(std::vector<int> shape, std::vector<int> element_shape, Category category)
30     : Tensor(kObjectTypeTensorType, std::move(shape), mindspore::NHWC, category) {
31   tensor_list_c_ = {false, kObjectTypeTensorType, Format_NHWC, 0, kTypeUnknown, -1, nullptr, 0, element_shape.size()};
32   if (shape.size() > MAX_SHAPE_SIZE) {
33     tensor_list_c_.element_shape_size_ = 0;
34     MS_LOG(WARNING) << "The shape-size has exceeded the limit 8, now is " << element_shape.size();
35     return;
36   }
37   for (size_t i = 0; i < element_shape.size(); ++i) {
38     tensor_list_c_.element_shape_[i] = element_shape[i];
39   }
40 }
41 
~TensorList()42 TensorList::~TensorList() {
43   if (!this->tensors_.empty()) {
44     this->TensorList::FreeData();
45     this->FreeTensorListData();
46   }
47 }
48 
FreeData()49 void TensorList::FreeData() {
50   if (this->IsConst() || this->IsGraphInput()) {
51     return;
52   }
53   // free data buf of each tensor in tensors_
54   for (auto tensor : tensors_) {
55     if (tensor == nullptr) {
56       continue;
57     }
58     tensor->FreeData();
59   }
60 }
61 
FreeTensorListData()62 int TensorList::FreeTensorListData() {
63   // del each tensor in tensors_ and clear tensors_
64   if (this->tensors_.empty()) {
65     return RET_OK;
66   }
67   for (auto &tensor : this->tensors_) {
68     if (tensor != nullptr) {
69       delete tensor;
70       tensor = nullptr;
71     }
72   }
73   tensors_.clear();
74   return RET_OK;
75 }
76 
MallocTensorListData(TypeId dtype,const std::vector<std::vector<int>> & tensor_shape)77 int TensorList::MallocTensorListData(TypeId dtype, const std::vector<std::vector<int> > &tensor_shape) {
78   // This function will create a new tensors_
79   // Your must to set shape(param2: tensor_shape) and data_type_(tensors_data_type_ = param1: dtype) of each tensor in
80   // tensors_. After that, you need to call function:MallocData to malloc data buf of each tensor in tensors_.
81   if (!this->tensors_.empty()) {
82     // If tensors_ is not empty then clear this tensors_ and rebuild a new tensors_.
83     auto ret = FreeTensorListData();
84     if (ret != RET_OK) {
85       return RET_ERROR;
86     }
87   }
88   if (this->shape().size() == 0) {
89     MS_LOG(INFO) << "tensorlist has no elements, no need malloc data.";
90     return RET_OK;
91   }
92   if (this->shape().size() != 1) {
93     MS_LOG(ERROR) << "tensorlist shape:" << this->shape().size() << " must be one-dimensional";
94     return RET_ERROR;
95   }
96   if (tensor_shape.empty()) {
97     MS_LOG(INFO) << "tensor_shape is empty, no need malloc tensor list data";
98     return RET_OK;
99   }
100   if (static_cast<size_t>(this->ElementsNum()) != tensor_shape.size()) {
101     MS_LOG(ERROR) << "tensorlist ElementsNum():" << this->ElementsNum()
102                   << " must be equal to param2:tensor_shape.size():" << tensor_shape.size();
103     return RET_ERROR;
104   }
105   this->tensor_list_c_.tensors_data_type_ = dtype;
106   for (int i = 0; i < this->ElementsNum(); ++i) {
107     auto tensor_ptr = new (std::nothrow) Tensor(dtype, tensor_shape[i]);
108     if (tensor_ptr == nullptr) {
109       MS_LOG(ERROR) << "new tensors_[" << i << "] is failed!";
110       return RET_ERROR;
111     }
112     if (!this->allocator()) {
113       tensor_ptr->set_allocator(this->allocator());
114     }
115     tensor_ptr->set_init_ref_count(this->init_ref_count());
116     tensor_ptr->set_ref_count(this->ref_count());
117     this->tensors_.push_back(tensor_ptr);
118   }
119   return RET_OK;
120 }
121 
MallocData(const AllocatorPtr allocator)122 int TensorList::MallocData(const AllocatorPtr allocator) {
123   if (allocator != nullptr) {
124     allocator_ = allocator;
125   }
126   // malloc data buf of each tensor in tensors_
127   for (int i = 0; i < this->ElementsNum(); ++i) {
128     if (tensors_.empty()) {
129       return RET_OK;
130     }
131     auto tensor_ptr = this->tensors_[i];
132     if (tensor_ptr == nullptr) {
133       MS_LOG(ERROR) << "tensors_[" << i << "] is nullptr!";
134       return RET_ERROR;
135     }
136     // if data_type() is kTypeUnknown then data buf will not to be malloc
137     if (tensor_ptr->data_type() != kTypeUnknown) {
138       auto ret = tensor_ptr->MallocData(this->allocator_);
139       if (ret != RET_OK) {
140         MS_LOG(ERROR) << "tensorlist malloc tensors_[:" << i << "] is failed!";
141         return RET_ERROR;
142       }
143     }
144   }
145   return RET_OK;
146 }
147 
SetTensor(int index,const Tensor * src_tensor)148 int TensorList::SetTensor(int index, const Tensor *src_tensor) {
149   MS_CHECK_TRUE_MSG(src_tensor != nullptr, RET_ERROR, "src tensor cannot null");
150   // your can use this fun to modify tensor[index] value
151   if (src_tensor->data_type() != this->tensor_list_c_.tensors_data_type_) {
152     MS_LOG(ERROR) << "src_tensor->data_type():" << src_tensor->data_type()
153                   << " must be equal to tensors_data_type_:" << this->tensor_list_c_.tensors_data_type_;
154     return RET_ERROR;
155   }
156   auto element_num = this->ElementsNum();
157   MS_CHECK_GE(element_num, 0, RET_ERROR);
158   if (index < 0 || index > (element_num - 1)) {
159     MS_LOG(ERROR) << "index:" << index << " must in [0, " << this->ElementsNum() - 1 << "]!";
160     return RET_ERROR;
161   }
162   auto dst_tensor = this->tensors_[index];
163   // free original tensor data
164   delete dst_tensor;
165   this->tensors_[index] = Tensor::CopyTensor(*src_tensor);
166   if (this->tensors_[index] == nullptr) {
167     MS_LOG(ERROR) << "SetTensor: new tensor is failed!";
168     return RET_ERROR;
169   }
170   return RET_OK;
171 }
172 
CheckTensorListParam()173 int TensorList::CheckTensorListParam() {
174   for (int i = 0; i < this->ElementsNum(); ++i) {
175     // each tensor in tensorlist must be not nullptr
176     if (this->tensors_[i] == nullptr) {
177       MS_LOG(ERROR) << "CheckTensorListParam: tensors_[" << i << "] is nullptr";
178       return RET_ERROR;
179     }
180     if (this->tensors_[i]->data_type() != this->tensor_list_c_.tensors_data_type_) {
181       MS_LOG(ERROR) << "CheckTensorListParam: tensors_[i] data_type:" << this->tensors_[i]->data_type()
182                     << " is not equal to tensors_data_type_:" << this->tensor_list_c_.tensors_data_type_;
183       return RET_ERROR;
184     }
185   }
186   return RET_OK;
187 }
188 
GetTensor(int index)189 Tensor *TensorList::GetTensor(int index) {
190   // return tensor[index] ptr. With this function, you can modify tensors_[index] at will.
191   if (index < 0 || index >= static_cast<int>(this->tensors_.size())) {
192     MS_LOG(ERROR) << "index:" << index << " must in [0, " << this->ElementsNum() - 1 << "]!";
193     return nullptr;
194   }
195   return this->tensors_[index];
196 }
197 
IsCompatibleShape(const std::vector<int> & shape)198 bool TensorList::IsCompatibleShape(const std::vector<int> &shape) {
199   if (this->tensors_.empty() && this->tensor_list_c_.element_shape_size_ == 0) {
200     return true;
201   }
202   if (shape.size() != this->tensor_list_c_.element_shape_size_) {
203     return false;
204   }
205   for (size_t i = 0; i < shape.size(); ++i) {
206     if (this->tensor_list_c_.element_shape_[i] >= 0 && shape[i] >= 0 &&
207         this->tensor_list_c_.element_shape_[i] != shape[i]) {
208       return false;
209     }
210   }
211   return true;
212 }
213 
IsCompatibleShape(const Tensor * src)214 bool TensorList::IsCompatibleShape(const Tensor *src) {
215   MS_CHECK_TRUE_MSG(src != nullptr, false, "src tensor cannot null");
216   // shape is store in Tensor.
217   if (static_cast<size_t>(src->ElementsNum()) != this->tensor_list_c_.element_shape_size_) {
218     return false;
219   }
220   if (src->data_type() != kNumberTypeInt && src->data_type() != kNumberTypeInt32) {
221     MS_LOG(ERROR) << "src tensor data_type:" << src->data_type() << " is not int";
222     return false;
223   }
224   auto src_ptr = reinterpret_cast<int *>(src->data());
225   for (size_t i = 0; i < this->tensor_list_c_.element_shape_size_; ++i) {
226     if (this->tensor_list_c_.element_shape_[i] >= 0 && src_ptr[i] >= 0 &&
227         this->tensor_list_c_.element_shape_[i] != src_ptr[i]) {
228       return false;
229     }
230   }
231   return true;
232 }
233 
Decode(const int * data,size_t length)234 STATUS TensorList::Decode(const int *data, size_t length) {
235   if (data == nullptr) {
236     MS_LOG(ERROR) << "data is nullptr";
237     return RET_ERROR;
238   }
239   MS_CHECK_LT(1, length, RET_ERROR);
240   tensor_list_c_.tensors_data_type_ = TypeId(data[0]);
241   if (tensor_list_c_.tensors_data_type_ < kTypeUnknown || tensor_list_c_.tensors_data_type_ > kMonadTypeEnd) {
242     MS_LOG(ERROR) << "TypeId illegal.";
243     return RET_ERROR;
244   }
245   if (data[1] < 0 || data[1] > MAX_SHAPE_SIZE) {
246     MS_LOG(WARNING) << "The shape-size must be in [0, 8], now is " << data[1];
247     return RET_ERROR;
248   }
249   tensor_list_c_.element_shape_size_ = data[1];
250   constexpr int kShapeIndexStart = 2;
251   MS_CHECK_LT(static_cast<size_t>(data[1] + kShapeIndexStart), length, RET_ERROR);
252   for (int j = 0; j < data[1]; ++j) {
253     tensor_list_c_.element_shape_[j] = data[kOffset + j];
254   }
255   int tensors_num = data[kOffset + data[1]];
256   if (tensors_num < 0) {
257     MS_LOG(WARNING) << "not able to create tensors, need infer shape.";
258     return RET_OK;
259   }
260 
261   if (this->ElementsNum() != tensors_num) {
262     MS_LOG(WARNING) << "Input tensorlist data is invalid: shape size(" << this->ElementsNum() << ") != tensors_num("
263                     << tensors_num << ").";
264     MS_LOG(WARNING) << "tensor name: " << this->tensor_name_;
265   }
266   tensors_.reserve(tensors_num);
267   int tensor_index = kOffset + data[1] + 1;
268   for (int i = 0; i < tensors_num; i++) {
269     MS_CHECK_LT(static_cast<size_t>(tensor_index), length, RET_ERROR);
270     int tensor_dims_size = data[tensor_index++];
271     std::vector<int> shape(tensor_dims_size);
272     for (int j = 0; j < tensor_dims_size; j++) {
273       MS_CHECK_LT(static_cast<size_t>(tensor_index), length, RET_ERROR);
274       shape[j] = data[tensor_index++];
275     }
276     auto tensor = new (std::nothrow) Tensor(static_cast<TypeId>(tensor_list_c_.tensors_data_type_), shape);
277     if (tensor == nullptr) {
278       MS_LOG(ERROR) << "new Tensor failed";
279       return RET_NULL_PTR;
280     }
281     tensors_.emplace_back(tensor);
282   }
283   return RET_OK;
284 }
285 
IsConst() const286 bool TensorList::IsConst() const {
287   return this->tensor_c_.category_ == CONST_TENSOR || this->tensor_c_.category_ == CONST_SCALAR;
288 }
289 
CopyTensorList(const TensorList & src,bool copy_data,const AllocatorPtr & allocator)290 TensorList *TensorList::CopyTensorList(const TensorList &src, bool copy_data, const AllocatorPtr &allocator) {
291   auto *result = new TensorList;
292   if (result == nullptr) {
293     MS_LOG(ERROR) << "New tensor failed";
294     return result;
295   }
296   (void)memcpy(&result->tensor_c_, &src.tensor_c_, sizeof(TensorC));
297   result->tensor_c_.data_ = nullptr;
298   (void)memcpy(&result->tensor_list_c_, &src.tensor_list_c_, sizeof(TensorListC));
299   result->tensor_list_c_.tensors_ = nullptr;
300   result->set_allocator(allocator);
301   result->set_tensor_name(src.tensor_name() + "_duplicate");
302   std::vector<std::vector<int> > tensor_shape{};
303   (void)std::transform(src.tensors_.begin(), src.tensors_.end(), std::back_inserter(tensor_shape),
304                        [](const Tensor *tensor_item) { return tensor_item->shape(); });
305 
306   for (LiteQuantParam quant : src.quant_params()) {
307     result->AddQuantParam(quant);
308   }
309 
310   if (result->shape().empty()) {
311     return result;
312   }
313   result->MallocTensorListData(static_cast<TypeId>(src.tensor_list_c_.tensors_data_type_), tensor_shape);
314   if (copy_data) {
315     for (size_t i = 1; i < src.tensors_.size(); ++i) {
316       auto ret = Tensor::CopyTensorData(*(src.tensors_[i]), result->tensors_[i]);
317       if (ret != RET_OK) {
318         MS_LOG(ERROR) << "CopyTensorData error";
319         delete result;
320         return nullptr;
321       }
322     }
323     result->own_data_ = src.own_data_;
324   }
325 
326   return result;
327 }
328 #endif
329 }  // namespace mindspore::lite
330