1 /**
2 * Copyright 2020-2022 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/tensor.h"
18 #include "tools/common/tensor_util.h"
19 #include "src/common/utils.h"
20 #include "tools/common/graph_util.h"
21 #include "abstract/utils.h"
22 #include "nnacl/op_base.h"
23
24 namespace mindspore::lite {
25 namespace {
26 constexpr float kInputDataFloatMin = 0.1f;
27 constexpr float kInputDataFloatMax = 1.0f;
28 constexpr double kInputDataDoubleMin = 0.1;
29 constexpr double kInputDataDoubleMax = 1.0;
30 constexpr int64_t kInputDataInt64Min = 0;
31 constexpr int64_t kInputDataInt64Max = 1;
32 constexpr int32_t kInputDataInt32Min = 0;
33 constexpr int32_t kInputDataInt32Max = 1;
34 constexpr int16_t kInputDataInt16Min = 0;
35 constexpr int16_t kInputDataInt16Max = 1;
36 constexpr int16_t kInputDataInt8Min = -127;
37 constexpr int16_t kInputDataInt8Max = 127;
38 constexpr int16_t kInputDataUint8Min = 0;
39 constexpr int16_t kInputDataUint8Max = 254;
40 } // namespace
GetTensorQuantParam(const std::unique_ptr<TensorT> & tensor)41 std::unique_ptr<QuantParamT> GetTensorQuantParam(const std::unique_ptr<TensorT> &tensor) {
42 MS_ASSERT(tensor != nullptr);
43 auto &quantParams = tensor->quantParams;
44 if (!quantParams.empty()) {
45 return CopyQuantParamT(quantParams.front());
46 } else {
47 return nullptr;
48 }
49 }
CopyQuantParamT(const std::unique_ptr<schema::QuantParamT> & srcQuantParam)50 std::unique_ptr<schema::QuantParamT> CopyQuantParamT(const std::unique_ptr<schema::QuantParamT> &srcQuantParam) {
51 MS_ASSERT(srcQuantParam != nullptr);
52 std::unique_ptr<schema::QuantParamT> dstQuantParam = std::make_unique<schema::QuantParamT>();
53 dstQuantParam->inited = srcQuantParam->inited;
54 dstQuantParam->scale = srcQuantParam->scale;
55 dstQuantParam->zeroPoint = srcQuantParam->zeroPoint;
56 dstQuantParam->min = srcQuantParam->min;
57 dstQuantParam->max = srcQuantParam->max;
58 dstQuantParam->narrowRange = srcQuantParam->narrowRange;
59 dstQuantParam->numBits = srcQuantParam->numBits;
60 dstQuantParam->dstDtype = srcQuantParam->dstDtype;
61 dstQuantParam->multiplier = srcQuantParam->multiplier;
62 return dstQuantParam;
63 }
64
CreateTensorInfo(const void * data,size_t data_size,const std::vector<int64_t> & shape,TypeId data_type)65 tensor::TensorPtr CreateTensorInfo(const void *data, size_t data_size, const std::vector<int64_t> &shape,
66 TypeId data_type) {
67 if (data_type == kTypeUnknown) {
68 MS_LOG(ERROR) << "data type of tensor is unknown";
69 return nullptr;
70 }
71 int size = 1;
72 for (auto dim : shape) {
73 MS_CHECK_INT_MUL_NOT_OVERFLOW(size, dim, nullptr);
74 size *= dim;
75 }
76 tensor::TensorPtr tensor_info = nullptr;
77 if (shape.empty() && data_size == mindspore::abstract::TypeIdSize(data_type)) {
78 ShapeVector scalar_shape = {1};
79 tensor_info = std::make_shared<tensor::Tensor>(data_type, scalar_shape);
80 if (tensor_info == nullptr) {
81 MS_LOG(ERROR) << "new tensor init failed";
82 return nullptr;
83 }
84 tensor_info->set_shape({});
85 } else {
86 tensor_info = std::make_shared<tensor::Tensor>(data_type, shape);
87 }
88 if (tensor_info == nullptr) {
89 MS_LOG(ERROR) << "new tensor init failed";
90 return nullptr;
91 }
92 if (data_size == 0) {
93 return tensor_info;
94 }
95 if (data == nullptr) {
96 MS_LOG(ERROR) << "input tensor data is nullptr";
97 return nullptr;
98 }
99 MS_CHECK_TRUE_MSG(tensor_info->Size() == data_size, nullptr, "invalid const tensor");
100 auto ret = memcpy_s(tensor_info->data_c(), tensor_info->data().nbytes(), data, data_size);
101 if (ret != EOK) {
102 MS_LOG(ERROR) << "memcpy_s error : " << ret;
103 return nullptr;
104 }
105 return tensor_info;
106 }
107
CreateTensorAbstract(const std::vector<int64_t> & shape,TypeId data_type)108 AbstractBasePtr CreateTensorAbstract(const std::vector<int64_t> &shape, TypeId data_type) {
109 auto tensor_info = CreateTensorInfo(nullptr, 0, shape, data_type);
110 if (tensor_info == nullptr) {
111 MS_LOG(ERROR) << "Create tensor info failed";
112 return nullptr;
113 }
114 auto abstract = tensor_info->ToAbstract();
115 if (abstract == nullptr) {
116 MS_LOG(ERROR) << "Create tensor abstarct failed";
117 return nullptr;
118 }
119 return abstract;
120 }
121
SetParameterAbstractAndParam(const ParameterPtr & parameter,const void * data,size_t data_size,const std::vector<int64_t> & shape,TypeId data_type)122 int SetParameterAbstractAndParam(const ParameterPtr ¶meter, const void *data, size_t data_size,
123 const std::vector<int64_t> &shape, TypeId data_type) {
124 if (parameter == nullptr) {
125 MS_LOG(ERROR) << "Input parameter is nullptr";
126 return RET_INPUT_PARAM_INVALID;
127 }
128 auto tensor_info = CreateTensorInfo(data, data_size, shape, data_type);
129 if (tensor_info == nullptr) {
130 MS_LOG(ERROR) << "Create tensor info failed";
131 return RET_ERROR;
132 }
133 auto abstract = tensor_info->ToAbstract();
134 if (abstract == nullptr) {
135 MS_LOG(ERROR) << "Create tensor abstarct failed";
136 return RET_ERROR;
137 }
138 parameter->set_abstract(abstract);
139 return RET_OK;
140 }
141
SetTensorData(const tensor::TensorPtr & tensor_info,const void * data,size_t data_size)142 int SetTensorData(const tensor::TensorPtr &tensor_info, const void *data, size_t data_size) {
143 if (tensor_info == nullptr) {
144 MS_LOG(ERROR) << "tensor info is nullptr.";
145 return RET_ERROR;
146 }
147 if (data == nullptr) {
148 MS_LOG(ERROR) << "data is nullptr.";
149 return RET_ERROR;
150 }
151 MS_CHECK_TRUE_MSG(tensor_info->Size() == data_size, RET_ERROR, "invalid const tensor");
152 auto ret = memcpy_s(tensor_info->data_c(), tensor_info->data().nbytes(), data, data_size);
153 if (ret != EOK) {
154 MS_LOG(ERROR) << "memcpy_s error : " << ret;
155 return RET_ERROR;
156 }
157 return RET_OK;
158 }
159
CreateTensorTFromTensorInfo(const tensor::TensorPtr & tensor_info,const std::string & tensor_name)160 std::unique_ptr<schema::TensorT> CreateTensorTFromTensorInfo(const tensor::TensorPtr &tensor_info,
161 const std::string &tensor_name) {
162 if (tensor_info == nullptr) {
163 MS_LOG(ERROR) << "Input tensor is nullptr";
164 return nullptr;
165 }
166 auto schema_tensor = std::make_unique<schema::TensorT>();
167 MS_CHECK_TRUE_MSG(schema_tensor != nullptr, nullptr, "schema_tensor is nullptr");
168 schema_tensor->name = tensor_name;
169 auto ret = UpdateTensorTFromTensorInfo(tensor_info, &schema_tensor);
170 if (ret != RET_OK) {
171 MS_LOG(ERROR) << "Init schema tensor failed";
172 return nullptr;
173 }
174 return schema_tensor;
175 }
176
UpdateTensorTFromTensorInfo(const tensor::TensorPtr & src_tensor,std::unique_ptr<schema::TensorT> * dst_tensor)177 int UpdateTensorTFromTensorInfo(const tensor::TensorPtr &src_tensor, std::unique_ptr<schema::TensorT> *dst_tensor) {
178 if (src_tensor == nullptr) {
179 MS_LOG(ERROR) << "Input tensor info is nullptr";
180 return RET_INPUT_PARAM_INVALID;
181 }
182 if (dst_tensor == nullptr || *dst_tensor == nullptr) {
183 MS_LOG(ERROR) << "Input schema tensor is nullptr";
184 return RET_INPUT_PARAM_INVALID;
185 }
186 auto &schema_tensor = *dst_tensor;
187 schema_tensor->format = schema::Format_NHWC;
188 schema_tensor->dataType = src_tensor->data_type();
189 auto &shape_vector = src_tensor->shape();
190 std::vector<int32_t> dims;
191 (void)std::transform(shape_vector.begin(), shape_vector.end(), std::back_inserter(dims),
192 [](const int64_t &value) { return static_cast<int32_t>(value); });
193 schema_tensor->dims = dims;
194 if (src_tensor->data().data() != nullptr) {
195 schema_tensor->data.resize(src_tensor->data().nbytes());
196 if (EOK != memcpy_s(schema_tensor->data.data(), schema_tensor->data.size(), src_tensor->data().data(),
197 src_tensor->data().nbytes())) {
198 MS_LOG(ERROR) << "memcpy_s failed.";
199 return RET_ERROR;
200 }
201 }
202 return RET_OK;
203 }
204
InitParameterFromTensorInfo(const ParameterPtr & param_node,const tensor::TensorPtr & tensor_info)205 int InitParameterFromTensorInfo(const ParameterPtr ¶m_node, const tensor::TensorPtr &tensor_info) {
206 if (tensor_info == nullptr) {
207 MS_LOG(ERROR) << "tensor info is nullptr.";
208 return RET_ERROR;
209 }
210 auto abstract_tensor = tensor_info->ToAbstract();
211 if (abstract_tensor == nullptr) {
212 MS_LOG(ERROR) << "Create abstract tensor failed.";
213 return RET_ERROR;
214 }
215 param_node->set_abstract(abstract_tensor);
216 param_node->set_default_param(tensor_info);
217 return RET_OK;
218 }
219
GetElementSize(const TensorT & tensor)220 size_t GetElementSize(const TensorT &tensor) { return GetElementSize(TypeId(tensor.dataType)); }
221
GetElementSize(const TypeId & dataType)222 size_t GetElementSize(const TypeId &dataType) {
223 switch (dataType) {
224 case kNumberTypeUInt8:
225 return sizeof(uint8_t);
226 case kNumberTypeInt32:
227 return sizeof(int32_t);
228 case kNumberTypeFloat:
229 return sizeof(float);
230 case kNumberTypeInt16:
231 return sizeof(int16_t);
232 case kNumberTypeInt8:
233 return sizeof(int8_t);
234 case kNumberTypeUInt32:
235 return sizeof(uint32_t);
236 default:
237 return sizeof(float);
238 }
239 }
240
GetShapeSize(const TensorT & tensor)241 size_t GetShapeSize(const TensorT &tensor) {
242 auto shape = tensor.dims;
243 size_t shapeSize = 1;
244 for (auto dim : shape) {
245 if (dim <= 0) {
246 MS_LOG(WARNING) << "Dim value less than or equal to 0 found in tensor's shape.";
247 return 0;
248 }
249 shapeSize *= static_cast<size_t>(dim);
250 }
251 return shapeSize;
252 }
253
CopyTensorDefT(const std::unique_ptr<TensorT> & oldTensor)254 std::unique_ptr<TensorT> CopyTensorDefT(const std::unique_ptr<TensorT> &oldTensor) {
255 auto newTensor = std::unique_ptr<TensorT>(new (std::nothrow) TensorT);
256 if (newTensor == nullptr) {
257 MS_LOG(ERROR) << "new TensorT failed";
258 return nullptr;
259 }
260 newTensor->dims = oldTensor->dims;
261 newTensor->format = oldTensor->format;
262 newTensor->dataType = oldTensor->dataType;
263 newTensor->refCount = oldTensor->refCount;
264 newTensor->nodeType = oldTensor->nodeType;
265 newTensor->data = oldTensor->data;
266 if (!oldTensor->quantParams.empty()) {
267 newTensor->quantParams.emplace_back(GetTensorQuantParam(oldTensor));
268 }
269 return newTensor;
270 }
271
GetRefCount(MetaGraphT * graphT,uint32_t tensorIdx)272 size_t GetRefCount(MetaGraphT *graphT, uint32_t tensorIdx) {
273 MS_CHECK_TRUE_MSG(graphT != nullptr, 0, "graphT is nullptr");
274 MS_ASSERT(graphT->allTensors.size() > tensorIdx);
275 size_t refCount = 0;
276 for (auto &node : graphT->nodes) {
277 MS_ASSERT(node != nullptr);
278 if (IsContain(node->inputIndex, tensorIdx)) {
279 refCount++;
280 }
281 }
282 return refCount;
283 }
284
GetShapeSize(const std::vector<int32_t> & shape)285 size_t GetShapeSize(const std::vector<int32_t> &shape) {
286 size_t shapeSize = 1;
287 for (auto dim : shape) {
288 if (dim <= 0) {
289 MS_LOG(WARNING) << "Dim value: " << dim << " is less than or equal to 0 found in tensor's shape.";
290 return 0;
291 }
292 shapeSize *= static_cast<size_t>(dim);
293 }
294 return shapeSize;
295 }
296
GenerateRandomData(size_t size,void * data,int data_type)297 int GenerateRandomData(size_t size, void *data, int data_type) {
298 MS_ASSERT(data != nullptr);
299 switch (data_type) {
300 case kNumberTypeFloat32:
301 case kNumberTypeFloat:
302 FillInputData<float>(size, data, std::uniform_real_distribution<float>(kInputDataFloatMin, kInputDataFloatMax));
303 break;
304 case kNumberTypeFloat64:
305 FillInputData<double>(size, data,
306 std::uniform_real_distribution<double>(kInputDataDoubleMin, kInputDataDoubleMax));
307 break;
308 case kNumberTypeInt64:
309 FillInputData<int64_t>(size, data,
310 std::uniform_int_distribution<int64_t>(kInputDataInt64Min, kInputDataInt64Max));
311 break;
312 case kNumberTypeInt:
313 case kNumberTypeInt32:
314 FillInputData<int32_t>(size, data,
315 std::uniform_int_distribution<int32_t>(kInputDataInt32Min, kInputDataInt32Max));
316 break;
317 case kNumberTypeInt16:
318 FillInputData<int16_t>(size, data,
319 std::uniform_int_distribution<int16_t>(kInputDataInt16Min, kInputDataInt16Max));
320 break;
321 case kNumberTypeInt8:
322 FillInputData<int8_t>(size, data, std::uniform_int_distribution<int16_t>(kInputDataInt8Min, kInputDataInt8Max));
323 break;
324 case kNumberTypeUInt8:
325 FillInputData<uint8_t>(size, data,
326 std::uniform_int_distribution<uint16_t>(kInputDataUint8Min, kInputDataUint8Max));
327 break;
328 default:
329 char *casted_data = static_cast<char *>(data);
330 for (size_t i = 0; i < size; i++) {
331 casted_data[i] = static_cast<char>(i);
332 }
333 }
334 return RET_OK;
335 }
336
GenerateRandomData(mindspore::MSTensor * tensor)337 int GenerateRandomData(mindspore::MSTensor *tensor) {
338 MS_CHECK_TRUE_MSG(tensor != nullptr, RET_NULL_PTR, "tensor is nullptr");
339 auto input_data = tensor->MutableData();
340 if (input_data == nullptr) {
341 MS_LOG(ERROR) << "MallocData for inTensor failed";
342 return RET_ERROR;
343 }
344 int status = RET_ERROR;
345 if (static_cast<TypeId>(tensor->DataType()) == kObjectTypeString) {
346 MSTensor *input = MSTensor::StringsToTensor(tensor->Name(), {"you're the best."});
347 if (input == nullptr) {
348 std::cerr << "StringsToTensor failed" << std::endl;
349 MS_LOG(ERROR) << "StringsToTensor failed";
350 return RET_ERROR;
351 }
352 *tensor = *input;
353 delete input;
354 } else {
355 status = GenerateRandomData(tensor->DataSize(), input_data, static_cast<int>(tensor->DataType()));
356 }
357 if (status != RET_OK) {
358 std::cerr << "GenerateRandomData for inTensor failed: " << status << std::endl;
359 MS_LOG(ERROR) << "GenerateRandomData for inTensor failed:" << status;
360 return status;
361 }
362 return RET_OK;
363 }
364 } // namespace mindspore::lite
365