1 /**
2 * Copyright 2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/common/tensor_util.h"
18 #include <algorithm>
19 #include "schema/model_generated.h"
20 #include "include/errorcode.h"
21 #include "src/common/log_adapter.h"
22
23 namespace mindspore {
24 namespace lite {
OutputTensor2TensorC(const std::vector<lite::Tensor * > & tensors,std::vector<TensorC * > * tensors_c)25 int OutputTensor2TensorC(const std::vector<lite::Tensor *> &tensors, std::vector<TensorC *> *tensors_c) {
26 MS_ASSERT(tensors_c != nullptr);
27 for (size_t i = 0; i < tensors.size(); ++i) {
28 auto *tensor_c = static_cast<TensorC *>(malloc(sizeof(TensorC)));
29 if (tensor_c == nullptr) {
30 MS_LOG(ERROR) << "malloc tensor fail!";
31 return RET_ERROR;
32 }
33 tensor_c->data_type_ = kNumberTypeFloat32;
34 tensor_c->format_ = mindspore::NCHW;
35 tensor_c->data_ = nullptr;
36 tensor_c->shape_size_ = 0;
37 tensors_c->push_back(tensor_c);
38 }
39 return RET_OK;
40 }
41
FreeAllTensorC(std::vector<TensorC * > * tensors_in)42 void FreeAllTensorC(std::vector<TensorC *> *tensors_in) {
43 if (tensors_in == nullptr) {
44 return;
45 }
46 for (auto &i : *tensors_in) {
47 if (i == nullptr) {
48 continue;
49 }
50 #ifndef CONTROLFLOW_TENSORLIST_CLIP
51 if (i->data_type_ == kObjectTypeTensorType) {
52 TensorListC *tensorListC = reinterpret_cast<TensorListC *>(i);
53 FreeTensorListC(tensorListC);
54 tensorListC = nullptr;
55 } else {
56 #endif
57 free(i);
58 i = nullptr;
59 #ifndef CONTROLFLOW_TENSORLIST_CLIP
60 }
61 #endif
62 }
63 tensors_in->clear();
64 }
65
Tensor2TensorC(const Tensor * src,TensorC * dst)66 int Tensor2TensorC(const Tensor *src, TensorC *dst) {
67 MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
68 dst->is_ready_ = src->IsReady();
69 dst->format_ = src->format();
70 dst->data_ = src->data();
71 dst->data_type_ = src->data_type();
72 dst->shape_size_ = src->shape().size();
73 if (dst->shape_size_ > MAX_SHAPE_SIZE) {
74 MS_LOG(ERROR) << "tensor shape size " << dst->shape_size_ << " is larger than max shape size " << MAX_SHAPE_SIZE;
75 return RET_ERROR;
76 }
77 for (size_t i = 0; i < dst->shape_size_; i++) {
78 dst->shape_[i] = src->shape().at(i);
79 }
80 return RET_OK;
81 }
82
TensorC2Tensor(const TensorC * src,Tensor * dst)83 int TensorC2Tensor(const TensorC *src, Tensor *dst) {
84 MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
85 dst->set_format(static_cast<mindspore::Format>(src->format_));
86 dst->set_data_type(static_cast<TypeId>(src->data_type_)); // get data during the runtime period
87 dst->set_shape(std::vector<int>(src->shape_, src->shape_ + src->shape_size_));
88 return RET_OK;
89 }
90
91 #ifndef CONTROLFLOW_TENSORLIST_CLIP
FreeTensorListC(TensorListC * tensorlist_c)92 void FreeTensorListC(TensorListC *tensorlist_c) {
93 MS_ASSERT(tensorlist_c != nullptr);
94 if (tensorlist_c->tensors_ != nullptr) {
95 free(tensorlist_c->tensors_);
96 tensorlist_c->tensors_ = nullptr;
97 }
98 free(tensorlist_c);
99 }
100
TensorList2TensorListC(TensorList * src,TensorListC * dst)101 int TensorList2TensorListC(TensorList *src, TensorListC *dst) {
102 MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
103 dst->is_ready_ = src->IsReady();
104 dst->data_type_ = static_cast<TypeIdC>(src->data_type());
105 dst->format_ = src->format();
106 dst->shape_value_ = src->shape().empty() ? 0 : src->shape().front();
107 dst->element_num_ = src->shape().empty() ? 0 : src->tensors().size();
108
109 if ((dst->element_num_ != 0 && SIZE_MAX / dst->element_num_ < sizeof(TensorC)) ||
110 dst->element_num_ * sizeof(TensorC) > MAX_MALLOC_SIZE) {
111 MS_LOG(ERROR) << "data size error.";
112 return RET_ERROR;
113 }
114 dst->tensors_ = reinterpret_cast<TensorC *>(malloc(dst->element_num_ * sizeof(TensorC)));
115 if (dst->tensors_ == nullptr) {
116 return RET_ERROR;
117 }
118 memset(dst->tensors_, 0, dst->element_num_ * sizeof(TensorC));
119 for (size_t i = 0; i < dst->element_num_; i++) {
120 auto ret = Tensor2TensorC(src->tensors().at(i), &dst->tensors_[i]);
121 if (ret != RET_OK) {
122 MS_LOG(ERROR) << "Tensor to TensorC failed.";
123 return ret;
124 }
125 }
126
127 dst->tensors_data_type_ = src->tensors_data_type();
128 dst->element_shape_size_ = src->element_shape().size();
129 for (size_t i = 0; i < dst->element_shape_size_; i++) {
130 dst->element_shape_[i] = src->element_shape().at(i);
131 }
132 dst->max_elements_num_ = src->max_elements_num();
133 return NNACL_OK;
134 }
135
TensorListC2TensorList(const TensorListC * src,TensorList * dst)136 int TensorListC2TensorList(const TensorListC *src, TensorList *dst) {
137 MS_CHECK_TRUE_RET(src != nullptr && dst != nullptr, RET_ERROR);
138 dst->set_data_type(static_cast<TypeId>(src->data_type_));
139 dst->set_format(static_cast<mindspore::Format>(src->format_));
140 dst->set_shape(std::vector<int>(1, src->element_num_));
141 dst->set_tensors_data_type(static_cast<TypeId>(src->tensors_data_type_));
142
143 // Set Tensors
144 for (size_t i = 0; i < src->element_num_; i++) {
145 auto ret = TensorC2Tensor(&src->tensors_[i], dst->GetTensor(i));
146 if (ret != RET_OK) {
147 MS_LOG(ERROR) << "TensorC2Tensor failed";
148 return ret;
149 }
150 }
151
152 dst->set_element_shape(std::vector<int>(src->element_shape_, src->element_shape_ + src->element_shape_size_));
153 dst->set_max_elements_num(src->max_elements_num_);
154 return RET_OK;
155 }
156
GenerateMergeSwitchOutTensorC(const std::vector<lite::Tensor * > & inputs,int outputs_size,std::vector<TensorC * > * out_tensor_c)157 int GenerateMergeSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, int outputs_size,
158 std::vector<TensorC *> *out_tensor_c) {
159 MS_CHECK_TRUE_RET(out_tensor_c != nullptr, RET_ERROR);
160 int ret = RET_OK;
161 for (int i = 0; i < outputs_size; i++) {
162 out_tensor_c->push_back(nullptr);
163 }
164 return ret;
165 }
166 #endif
167
GenerateOutTensorC(const OpParameter * const parameter,const std::vector<lite::Tensor * > & inputs,const std::vector<lite::Tensor * > & outputs,std::vector<TensorC * > * out_tensor_c)168 int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs,
169 const std::vector<lite::Tensor *> &outputs, std::vector<TensorC *> *out_tensor_c) {
170 MS_CHECK_TRUE_RET(out_tensor_c != nullptr && parameter != nullptr, RET_ERROR);
171 if (parameter->type_ == mindspore::schema::PrimitiveType_TensorListFromTensor ||
172 parameter->type_ == mindspore::schema::PrimitiveType_TensorListReserve ||
173 parameter->type_ == mindspore::schema::PrimitiveType_TensorListSetItem) {
174 #ifndef CONTROLFLOW_TENSORLIST_CLIP
175 // TensorListC ->TensorC
176 auto *tensor_list_c = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC)));
177 if (tensor_list_c == nullptr) {
178 return RET_ERROR;
179 }
180 memset(tensor_list_c, 0, sizeof(TensorListC));
181 out_tensor_c->push_back(reinterpret_cast<TensorC *const>(tensor_list_c));
182 return RET_OK;
183 #else
184 MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
185 return RET_ERROR;
186 #endif
187 } else {
188 return OutputTensor2TensorC(outputs, out_tensor_c);
189 }
190 }
191
GenerateInTensorC(const OpParameter * const parameter,const std::vector<lite::Tensor * > & inputs,const std::vector<lite::Tensor * > & outputs,std::vector<TensorC * > * in_tensor_c)192 int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs,
193 const std::vector<lite::Tensor *> &outputs, std::vector<TensorC *> *in_tensor_c) {
194 MS_CHECK_TRUE_RET(in_tensor_c != nullptr, RET_ERROR);
195 int ret = RET_OK;
196 for (auto input : inputs) {
197 if (input->data_type() == kObjectTypeTensorType) {
198 #ifndef CONTROLFLOW_TENSORLIST_CLIP
199 // Tensor ->TensorList -> TensorListC -> TensorC
200 auto *tensor_list = reinterpret_cast<TensorList *>(input);
201 auto *tensor_list_c = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC)));
202 if (tensor_list_c == nullptr) {
203 ret = RET_NULL_PTR;
204 break;
205 }
206 memset(tensor_list_c, 0, sizeof(TensorListC));
207 ret = TensorList2TensorListC(tensor_list, tensor_list_c);
208 if (ret != RET_OK) {
209 free(tensor_list_c->tensors_);
210 free(tensor_list_c);
211 return NNACL_ERR;
212 }
213 in_tensor_c->push_back(reinterpret_cast<TensorC *>(tensor_list_c));
214 #else
215 MS_LOG(ERROR) << unsupport_controlflow_tensorlist_log;
216 return RET_NOT_SUPPORT;
217 #endif
218 } else {
219 // Tensor -> TensorC
220 auto *tensor_c = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
221 if (tensor_c == nullptr) {
222 ret = RET_NULL_PTR;
223 break;
224 }
225 ret = Tensor2TensorC(input, tensor_c);
226 if (ret != RET_OK) {
227 MS_LOG(ERROR) << "Tensor to TensorC failed.";
228 free(tensor_c);
229 return ret;
230 }
231 in_tensor_c->emplace_back(tensor_c);
232 }
233 }
234 return ret;
235 }
236
CheckTensorsInvalid(const std::vector<Tensor * > & tensors)237 int CheckTensorsInvalid(const std::vector<Tensor *> &tensors) {
238 for (auto &tensor : tensors) {
239 if (tensor == nullptr) {
240 MS_LOG(ERROR) << "Graph input tensor is nullptr";
241 return RET_ERROR;
242 }
243 if (tensor->data_type() != kObjectTypeTensorType && tensor->data() == nullptr) {
244 MS_LOG(ERROR) << "Graph input tensor data is nullptr " << tensor->tensor_name();
245 return RET_ERROR;
246 }
247 const auto &shape = tensor->shape();
248 bool valid = all_of(shape.begin(), shape.end(), [](int i) { return i >= 0; });
249 if (!valid) {
250 MS_LOG(ERROR) << "The shape of tensor contains negative dimension,"
251 << "check the model and assign the input shape with method Resize().";
252 return RET_ERROR;
253 }
254 if (tensor->format() != mindspore::NHWC && tensor->format() != mindspore::NCHW) {
255 MS_LOG(ERROR) << "model input's format may be changed, which should be NHWC or NCHW";
256 return RET_FORMAT_ERR;
257 }
258 if (tensor->data() == nullptr) {
259 MS_LOG(ERROR) << "tensor data should be filled before run op";
260 return RET_ERROR;
261 }
262 }
263 return RET_OK;
264 }
265
LiteTensorsToMSTensors(const std::vector<lite::Tensor * > & lite_tensors)266 std::vector<mindspore::MSTensor> LiteTensorsToMSTensors(const std::vector<lite::Tensor *> &lite_tensors) {
267 std::vector<mindspore::MSTensor> tensors;
268 std::transform(lite_tensors.begin(), lite_tensors.end(), std::back_inserter(tensors), [](lite::Tensor *tensor) {
269 return mindspore::MSTensor(std::make_shared<mindspore::MSTensor::Impl>(tensor));
270 });
271
272 return tensors;
273 }
274 } // namespace lite
275 } // namespace mindspore
276