1 /**
2 * Copyright 2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "include/api/types.h"
18 #include <cstring>
19 #include <limits>
20 #include <numeric>
21 #include "include/api/status.h"
22 #include "include/api/dual_abi_helper.h"
23 #include "src/litert/cxx_api/tensor/tensor_impl.h"
24 #include "src/litert/cxx_api/tensor_utils.h"
25 #include "src/common/log_adapter.h"
26 #ifdef ENABLE_CLOUD_INFERENCE
27 #include <fstream>
28 #include "utils/file_utils.h"
29 #include "ir/dtype.h"
30 #include "utils/convert_utils_base.h"
31 #include "extendrt/kernel/ascend/plugin/ascend_allocator_plugin.h"
32 #endif
33
34 namespace mindspore {
35 class Buffer::Impl {
36 public:
Impl()37 Impl() : data_() {}
38 ~Impl() = default;
Impl(const void * data,size_t data_len)39 Impl(const void *data, size_t data_len) {
40 if (data != nullptr) {
41 (void)SetData(data, data_len);
42 } else {
43 ResizeData(data_len);
44 }
45 }
46
Data() const47 const void *Data() const { return data_.data(); }
MutableData()48 void *MutableData() { return data_.data(); }
DataSize() const49 size_t DataSize() const { return data_.size(); }
50
ResizeData(size_t data_len)51 void ResizeData(size_t data_len) { data_.resize(data_len); }
52
SetData(const void * data,size_t data_len)53 bool SetData(const void *data, size_t data_len) {
54 ResizeData(data_len);
55 if (DataSize() != data_len) {
56 MS_LOG(ERROR) << "Set data failed, tensor current data size " << DataSize() << " not match data len " << data_len;
57 return false;
58 }
59
60 if (data == nullptr) {
61 return data_len == 0;
62 }
63
64 if (MutableData() == nullptr) {
65 MS_LOG(ERROR) << "Set data failed, data len " << data_len;
66 return false;
67 }
68
69 (void)memcpy(MutableData(), data, data_len);
70 return true;
71 }
72
73 protected:
74 std::vector<uint8_t> data_;
75 };
76
MSTensor()77 MSTensor::MSTensor() {
78 auto impl = std::make_shared<LiteTensorImpl>(new (std::nothrow) lite::Tensor());
79 if (impl != nullptr) {
80 impl->set_from_session(false);
81 } else {
82 MS_LOG(ERROR) << "Failed to allocate tensor impl.";
83 }
84 impl_ = impl;
85 }
MSTensor(std::nullptr_t)86 MSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {}
MSTensor(const std::shared_ptr<Impl> & impl)87 MSTensor::MSTensor(const std::shared_ptr<Impl> &impl) : impl_(impl) {}
MSTensor(const std::vector<char> & name,enum DataType type,const std::vector<int64_t> & shape,const void * data,size_t data_len)88 MSTensor::MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
89 const void *data, size_t data_len)
90 : impl_(LiteTensorImpl::CreateTensorImplByDeepCopy(CharToString(name), type, shape, data, data_len)) {}
91 MSTensor::~MSTensor() = default;
92
operator ==(std::nullptr_t) const93 bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; }
94
operator !=(std::nullptr_t) const95 bool MSTensor::operator!=(std::nullptr_t) const { return impl_ != nullptr; }
96
operator ==(const MSTensor & tensor) const97 bool MSTensor::operator==(const MSTensor &tensor) const {
98 if (impl_ == nullptr) {
99 return false;
100 }
101 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(impl_);
102 auto lite_tensor_impl = std::static_pointer_cast<LiteTensorImpl>(tensor.impl_);
103 if (lite_tensor_impl == nullptr) {
104 MS_LOG(ERROR) << "Cast lite tensor impl ptr failed.";
105 return false;
106 }
107
108 return lite_impl->lite_tensor() == lite_tensor_impl->lite_tensor();
109 }
110
operator !=(const MSTensor & tensor) const111 bool MSTensor::operator!=(const MSTensor &tensor) const { return !operator==(tensor); }
112
CreateTensor(const std::vector<char> & name,enum DataType type,const std::vector<int64_t> & shape,const void * data,size_t data_len,const std::vector<char> & device,int device_id)113 MSTensor *MSTensor::CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
114 const void *data, size_t data_len, const std::vector<char> &device,
115 int device_id) noexcept {
116 MS_LOG(INFO) << "device id: " << device_id << ", device type: " << device;
117 auto device_type = CharToString(device);
118 if (!device_type.empty() && device_type == "ascend") {
119 #ifdef ENABLE_CLOUD_INFERENCE
120 kernel::AscendAllocatorPlugin::GetInstance().Register();
121 // check device id
122 device_id = device_id == -1 ? kernel::AscendAllocatorPlugin::GetInstance().GetCurrentDeviceId() : device_id;
123 // check device data size
124 size_t element_size = CalTensorDataSize(shape, type);
125 MS_CHECK_FALSE_MSG(data_len != 0 && element_size != data_len, nullptr, "data len not equal element size.");
126 // malloc device data
127 void *device_data = kernel::AscendAllocatorPlugin::GetInstance().Malloc(element_size, device_id);
128 MS_CHECK_TRUE_MSG(device_data != nullptr, nullptr, "malloc device data failed.");
129 // create tensor
130 auto impl = LiteTensorImpl::CreateTensorImpl(CharToString(name), type, shape, nullptr, 0);
131 if (impl == nullptr) {
132 kernel::AscendAllocatorPlugin::GetInstance().Free(device_data, device_id);
133 MS_LOG(ERROR) << "Allocate tensor impl failed.";
134 return nullptr;
135 }
136 if (data != nullptr) {
137 // init device data by host data buf
138 auto status = kernel::AscendAllocatorPlugin::GetInstance().CopyHostDataToDevice(const_cast<void *>(data),
139 device_data, element_size);
140 if (status != kSuccess) {
141 kernel::AscendAllocatorPlugin::GetInstance().Free(device_data, device_id);
142 MS_LOG(ERROR) << "copy host data to device data failed.";
143 return nullptr;
144 }
145 }
146
147 // init impl
148 impl->SetDeviceData(device_data);
149 impl->SetDeviceId(device_id);
150 impl->SetDevice(device_type);
151
152 auto ms_tensor = new (std::nothrow) MSTensor(impl);
153 if (ms_tensor == nullptr) {
154 kernel::AscendAllocatorPlugin::GetInstance().Free(device_data, device_id);
155 MS_LOG(ERROR) << "Allocate MSTensor failed.";
156 return nullptr;
157 }
158 impl->set_own_data(true);
159 return ms_tensor;
160 #endif
161 MS_LOG(ERROR) << "Unsupported Feature.";
162 return nullptr;
163 }
164 if (data_len > MAX_MALLOC_SIZE) {
165 MS_LOG(ERROR) << "data_len is error.";
166 return nullptr;
167 }
168 if (data_len > 0 && data == nullptr) {
169 MS_LOG(ERROR) << "Null data ptr of tensor.";
170 return nullptr;
171 }
172 if (data_len == 0 && data != nullptr) {
173 MS_LOG(ERROR) << "Data len doesn't match the data buffer size.";
174 return nullptr;
175 }
176
177 void *new_data = nullptr;
178 if (data != nullptr) {
179 new_data = malloc(data_len);
180 if (new_data == nullptr) {
181 MS_LOG(ERROR) << "Allocate data failed.";
182 return nullptr;
183 }
184 (void)memcpy(new_data, data, data_len);
185 }
186 auto impl = LiteTensorImpl::CreateTensorImpl(CharToString(name), type, shape, new_data, data_len);
187 if (impl == nullptr) {
188 MS_LOG(ERROR) << "Allocate tensor impl failed.";
189 free(new_data);
190 return nullptr;
191 }
192
193 auto ms_tensor = new (std::nothrow) MSTensor(impl);
194 if (ms_tensor == nullptr) {
195 MS_LOG(ERROR) << "Allocate MSTensor failed.";
196 free(new_data);
197 return nullptr;
198 }
199 impl->set_own_data(true);
200 return ms_tensor;
201 }
202
CreateTensor(const std::vector<char> & name,const MSTensor & tensor,const std::vector<char> & device,int device_id)203 MSTensor *MSTensor::CreateTensor(const std::vector<char> &name, const MSTensor &tensor, const std::vector<char> &device,
204 int device_id) noexcept {
205 #ifdef ENABLE_CLOUD_INFERENCE
206 kernel::AscendAllocatorPlugin::GetInstance().Register();
207 auto dst_device_type = CharToString(device);
208 if (!dst_device_type.empty() && dst_device_type != "ascend") {
209 MS_LOG(ERROR) << "only support create ascend device tensor.";
210 return nullptr;
211 }
212
213 auto src_device_type = tensor.GetDevice();
214 if (!src_device_type.empty() && src_device_type != "ascend") {
215 MS_LOG(ERROR) << "only tensor tensor is ascend device tensor.";
216 return nullptr;
217 }
218 if (src_device_type.empty() && static_cast<MSTensor>(tensor).GetDeviceData() != nullptr) {
219 MS_LOG(ERROR) << "tensor tensor is device tensor, but device data is nullptr.";
220 return nullptr;
221 }
222
223 if (src_device_type.empty() && dst_device_type.empty()) {
224 MS_LOG(INFO) << "copy host tensor to host tensor.";
225 if (tensor.Data() != nullptr) {
226 return CreateTensor(tensor.Name(), tensor.DataType(), tensor.Shape(), static_cast<MSTensor>(tensor).MutableData(),
227 tensor.DataSize());
228 } else {
229 return CreateTensor(tensor.Name(), tensor.DataType(), tensor.Shape(), nullptr, 0);
230 }
231 } else if (src_device_type == "ascend" && dst_device_type == "ascend") {
232 MS_LOG(INFO) << "copy device tensor to device tensor.";
233 auto new_tensor =
234 CreateTensor(tensor.Name(), tensor.DataType(), tensor.Shape(), nullptr, tensor.DataSize(), "ascend", device_id);
235 MS_CHECK_FALSE_MSG(new_tensor == nullptr, nullptr, "create new device tensor failed.");
236 auto status = kernel::AscendAllocatorPlugin::GetInstance().CopyDeviceDataToDevice(
237 static_cast<MSTensor>(tensor).GetDeviceData(), new_tensor->GetDeviceData(), new_tensor->DataSize(),
238 tensor.DataSize(), tensor.GetDeviceId(), new_tensor->GetDeviceId());
239 if (status != kSuccess) {
240 return nullptr;
241 }
242 return new_tensor;
243 } else if (src_device_type.empty() && dst_device_type == "ascend") {
244 MS_LOG(INFO) << "copy host tensor to device tensor.";
245 return CreateTensor(tensor.Name(), tensor.DataType(), tensor.Shape(), static_cast<MSTensor>(tensor).MutableData(),
246 tensor.DataSize(), dst_device_type, device_id);
247 } else if (src_device_type == "ascend" && dst_device_type.empty()) {
248 MS_LOG(INFO) << "copy device tensor to host tensor.";
249 auto host_form_device = malloc(tensor.DataSize());
250 MS_CHECK_FALSE_MSG(host_form_device == nullptr, nullptr, "malloc host buf failed.");
251 auto status = kernel::AscendAllocatorPlugin::GetInstance().CopyDeviceDataToHost(
252 static_cast<MSTensor>(tensor).GetDeviceData(), host_form_device, tensor.DataSize(), tensor.GetDeviceId());
253 if (status != kSuccess) {
254 free(host_form_device);
255 return nullptr;
256 }
257 auto new_tensor =
258 CreateTensor(tensor.Name(), tensor.DataType(), tensor.Shape(), host_form_device, tensor.DataSize());
259 free(host_form_device);
260 host_form_device = nullptr;
261 return new_tensor;
262 } else {
263 MS_LOG(ERROR) << "device type is wrong.";
264 return nullptr;
265 }
266 #endif
267 MS_LOG(ERROR) << "Unsupported Feature.";
268 return nullptr;
269 }
270
CreateRefTensor(const std::vector<char> & name,enum DataType type,const std::vector<int64_t> & shape,const void * data,size_t data_len,bool own_data)271 MSTensor *MSTensor::CreateRefTensor(const std::vector<char> &name, enum DataType type,
272 const std::vector<int64_t> &shape, const void *data, size_t data_len,
273 bool own_data) noexcept {
274 auto impl = LiteTensorImpl::CreateTensorImpl(CharToString(name), type, shape, data, data_len);
275 if (impl == nullptr) {
276 MS_LOG(ERROR) << "Allocate tensor impl failed.";
277 return nullptr;
278 }
279 impl->set_own_data(own_data);
280 auto ms_tensor = new (std::nothrow) MSTensor(impl);
281 if (ms_tensor == nullptr) {
282 MS_LOG(ERROR) << "Allocate tensor impl failed.";
283 return nullptr;
284 }
285 return ms_tensor;
286 }
287
CreateDeviceTensor(const std::vector<char> & name,enum DataType type,const std::vector<int64_t> & shape,void * data,size_t data_len)288 MSTensor MSTensor::CreateDeviceTensor(const std::vector<char> &name, enum DataType type,
289 const std::vector<int64_t> &shape, void *data, size_t data_len) noexcept {
290 #ifdef ENABLE_CLOUD_INFERENCE
291 auto impl = LiteTensorImpl::CreateTensorImpl(CharToString(name), type, shape, nullptr, 0);
292 if (impl == nullptr) {
293 MS_LOG(ERROR) << "Allocate tensor impl failed.";
294 return MSTensor(nullptr);
295 }
296 if (data_len < impl->DataSize()) {
297 MS_LOG(ERROR) << "The size " << data_len << " of data cannot be less that the memory size required by the shape "
298 << shape << " and data type " << TypeIdToString(static_cast<enum TypeId>(type));
299 return MSTensor(nullptr);
300 }
301 impl->SetDeviceData(data);
302 return MSTensor(impl);
303 #else
304 MS_LOG(ERROR) << "Unsupported Feature.";
305 return MSTensor(nullptr);
306 #endif
307 }
308
CreateTensorFromFile(const std::vector<char> & file,enum DataType type,const std::vector<int64_t> & shape)309 MSTensor *MSTensor::CreateTensorFromFile(const std::vector<char> &file, enum DataType type,
310 const std::vector<int64_t> &shape) noexcept {
311 #ifdef ENABLE_CLOUD_INFERENCE
312 try {
313 std::string file_str = CharToString(file);
314
315 auto realpath = mindspore::FileUtils::GetRealPath(file_str.c_str());
316 if (!realpath.has_value()) {
317 MS_LOG(ERROR) << "Get real path failed, path=" << file_str;
318 return nullptr;
319 }
320
321 // Read image file
322 auto file_path = realpath.value();
323 if (file_path.empty()) {
324 MS_LOG(ERROR) << "Can not find any input file.";
325 return nullptr;
326 }
327
328 std::ifstream ifs(file_path, std::ios::in | std::ios::binary);
329 if (!ifs.good()) {
330 MS_LOG(ERROR) << "File: " + file_path + " does not exist.";
331 return nullptr;
332 }
333 if (!ifs.is_open()) {
334 MS_LOG(ERROR) << "File: " + file_path + " open failed.";
335 return nullptr;
336 }
337
338 auto &io_seekg1 = ifs.seekg(0, std::ios::end);
339 if (!io_seekg1.good() || io_seekg1.fail() || io_seekg1.bad()) {
340 ifs.close();
341 MS_LOG(ERROR) << "Failed to seekg file: " + file_path;
342 return nullptr;
343 }
344
345 size_t size = static_cast<size_t>(ifs.tellg());
346 std::vector<int64_t> tensor_shape;
347 tensor_shape = shape.empty() ? std::vector<int64_t>{static_cast<int64_t>(size)} : shape;
348 MSTensor *ret = new (std::nothrow) MSTensor(file_path, type, tensor_shape, nullptr, size);
349 if (ret == nullptr) {
350 ifs.close();
351 MS_LOG(ERROR) << "Malloc memory failed.";
352 return nullptr;
353 }
354 auto &io_seekg2 = ifs.seekg(0, std::ios::beg);
355 if (!io_seekg2.good() || io_seekg2.fail() || io_seekg2.bad()) {
356 ifs.close();
357 MS_LOG(ERROR) << "Failed to seekg file: " + file_path;
358 return nullptr;
359 }
360
361 std::map<enum DataType, size_t> TypeByte = {
362 {DataType::kTypeUnknown, 0}, {DataType::kObjectTypeString, 0}, {DataType::kNumberTypeBool, 1},
363 {DataType::kNumberTypeInt8, 1}, {DataType::kNumberTypeInt16, 2}, {DataType::kNumberTypeInt32, 4},
364 {DataType::kNumberTypeInt64, 8}, {DataType::kNumberTypeUInt8, 1}, {DataType::kNumberTypeUInt16, 2},
365 {DataType::kNumberTypeUInt32, 4}, {DataType::kNumberTypeUInt64, 8}, {DataType::kNumberTypeFloat16, 2},
366 {DataType::kNumberTypeFloat32, 4}, {DataType::kNumberTypeFloat64, 8},
367 };
368
369 if (LongToSize(ret->ElementNum()) * TypeByte[type] != size) {
370 ifs.close();
371 MS_LOG(ERROR) << "Tensor data size: " << LongToSize(ret->ElementNum()) * TypeByte[type]
372 << " not match input data length: " << size;
373 return nullptr;
374 }
375
376 auto &io_read = ifs.read(reinterpret_cast<char *>(ret->MutableData()), static_cast<std::streamsize>(size));
377 if (!io_read.good() || io_read.fail() || io_read.bad()) {
378 ifs.close();
379 MS_LOG(ERROR) << "Failed to read file: " + file_path;
380 return nullptr;
381 }
382 ifs.close();
383
384 return ret;
385 } catch (...) {
386 MS_LOG(ERROR) << "Unknown error occurred.";
387 return nullptr;
388 }
389 #else
390 MS_LOG(ERROR) << "Unsupported Feature.";
391 return nullptr;
392 #endif
393 }
394
CharStringsToTensor(const std::vector<char> & name,const std::vector<std::vector<char>> & inputs)395 MSTensor *MSTensor::CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &inputs) {
396 #ifndef STRING_KERNEL_CLIP
397 auto impl = LiteTensorImpl::StringsToTensorImpl(CharToString(name), VectorCharToString(inputs));
398 if (impl == nullptr) {
399 MS_LOG(ERROR) << "Allocate tensor impl failed.";
400 return nullptr;
401 }
402 auto ms_tensor = new (std::nothrow) MSTensor(impl);
403 if (ms_tensor == nullptr) {
404 MS_LOG(ERROR) << "Allocate tensor impl failed.";
405 return nullptr;
406 }
407 return ms_tensor;
408 #else
409 MS_LOG(ERROR) << unsupport_string_tensor_log;
410 return nullptr;
411 #endif
412 }
413
TensorToStringChars(const MSTensor & tensor)414 std::vector<std::vector<char>> MSTensor::TensorToStringChars(const MSTensor &tensor) {
415 #ifndef STRING_KERNEL_CLIP
416 if (tensor.impl_ == nullptr) {
417 MS_LOG(ERROR) << "Invalid tensor.";
418 return {{}};
419 }
420 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(tensor.impl_);
421 return VectorStringToChar(LiteTensorImpl::TensorImplToStrings(lite_impl));
422 #else
423 std::vector<std::vector<char>> empty;
424 MS_LOG(ERROR) << unsupport_string_tensor_log;
425 return empty;
426 #endif
427 }
428
Clone() const429 MSTensor *MSTensor::Clone() const {
430 if (impl_ == nullptr) {
431 MS_LOG(ERROR) << "Invalid tensor.";
432 return nullptr;
433 }
434 auto data_len = this->DataSize();
435 if (data_len > MAX_MALLOC_SIZE) {
436 MS_LOG(ERROR) << "Illegal data size of tensor.";
437 return nullptr;
438 }
439 if (data_len > 0 && impl_->Data() == nullptr) {
440 MS_LOG(ERROR) << "Null data ptr of tensor.";
441 return nullptr;
442 }
443 if (data_len == 0 && impl_->Data() != nullptr) {
444 MS_LOG(ERROR) << "Data len doesn't match the data buffer size.";
445 return nullptr;
446 }
447
448 void *new_data = nullptr;
449 if (impl_->Data() != nullptr) {
450 new_data = malloc(data_len);
451 if (new_data == nullptr) {
452 MS_LOG(ERROR) << "Allocate data failed.";
453 return nullptr;
454 }
455 (void)memcpy(new_data, impl_->MutableData(), data_len);
456 }
457
458 auto impl = LiteTensorImpl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), new_data, data_len);
459 if (impl == nullptr) {
460 MS_LOG(ERROR) << "Allocate tensor impl failed.";
461 if (new_data != nullptr) {
462 free(new_data);
463 }
464 return nullptr;
465 }
466
467 auto ms_tensor = new (std::nothrow) MSTensor(impl);
468 if (ms_tensor == nullptr) {
469 MS_LOG(ERROR) << "Allocate MSTensor failed.";
470 if (new_data != nullptr) {
471 free(new_data);
472 }
473 return nullptr;
474 }
475 impl->set_own_data(true);
476 return ms_tensor;
477 }
478
CharName() const479 std::vector<char> MSTensor::CharName() const {
480 if (impl_ == nullptr) {
481 MS_LOG(ERROR) << "Invalid tensor implement.";
482 return std::vector<char>();
483 }
484 return StringToChar(impl_->Name());
485 }
486
ElementNum() const487 int64_t MSTensor::ElementNum() const {
488 if (impl_ == nullptr) {
489 MS_LOG(ERROR) << "Invalid tensor implement.";
490 return -1;
491 }
492 return std::static_pointer_cast<MutableTensorImpl>(impl_)->ElementNum();
493 }
494
DataType() const495 enum DataType MSTensor::DataType() const {
496 if (impl_ == nullptr) {
497 MS_LOG(ERROR) << "Invalid tensor implement.";
498 return DataType::kTypeUnknown;
499 }
500 return impl_->DataType();
501 }
502
Shape() const503 const std::vector<int64_t> &MSTensor::Shape() const {
504 static const std::vector<int64_t> empty{};
505 if (impl_ == nullptr) {
506 MS_LOG(ERROR) << "Invalid tensor implement.";
507 return empty;
508 }
509 return impl_->Shape();
510 }
511
Data() const512 std::shared_ptr<const void> MSTensor::Data() const {
513 if (impl_ == nullptr) {
514 MS_LOG(ERROR) << "Invalid tensor implement.";
515 return nullptr;
516 }
517 return impl_->Data();
518 }
519
MutableData()520 void *MSTensor::MutableData() {
521 if (impl_ == nullptr) {
522 MS_LOG(ERROR) << "Invalid tensor implement.";
523 return nullptr;
524 }
525 return impl_->MutableData();
526 }
527
SetDeviceData(void * data)528 void MSTensor::SetDeviceData(void *data) {
529 if (impl_ == nullptr) {
530 MS_LOG(ERROR) << "Invalid tensor implement.";
531 return;
532 }
533 std::static_pointer_cast<MutableTensorImpl>(impl_)->SetDeviceData(data);
534 }
535
GetDeviceData()536 void *MSTensor::GetDeviceData() {
537 if (impl_ == nullptr) {
538 MS_LOG(ERROR) << "Invalid tensor implement.";
539 return nullptr;
540 }
541 return std::static_pointer_cast<MutableTensorImpl>(impl_)->GetDeviceData();
542 }
543
IsConst() const544 bool MSTensor::IsConst() const {
545 if (impl_ == nullptr) {
546 MS_LOG(ERROR) << "Invalid tensor implement.";
547 return false;
548 }
549 return std::static_pointer_cast<MutableTensorImpl>(impl_)->IsConst();
550 }
551
DataSize() const552 size_t MSTensor::DataSize() const {
553 if (impl_ == nullptr) {
554 MS_LOG(ERROR) << "Invalid tensor implement.";
555 return 0;
556 }
557 return impl_->DataSize();
558 }
559
GetDevice() const560 std::string MSTensor::GetDevice() const {
561 if (impl_ == nullptr) {
562 MS_LOG(ERROR) << "Invalid tensor implement.";
563 return "";
564 }
565 return std::static_pointer_cast<MutableTensorImpl>(impl_)->GetDevice();
566 }
567
GetDeviceId() const568 int MSTensor::GetDeviceId() const {
569 if (impl_ == nullptr) {
570 MS_LOG(ERROR) << "Invalid tensor implement.";
571 return -1;
572 }
573 return std::static_pointer_cast<MutableTensorImpl>(impl_)->GetDeviceId();
574 }
575
IsDevice() const576 bool MSTensor::IsDevice() const {
577 if (impl_ == nullptr) {
578 MS_LOG(ERROR) << "Invalid tensor implement.";
579 return false;
580 }
581 return impl_->IsDevice();
582 }
583
DestroyTensorPtr(MSTensor * tensor)584 void MSTensor::DestroyTensorPtr(MSTensor *tensor) noexcept {
585 if (tensor != nullptr) {
586 delete tensor;
587 }
588 }
589
SetShape(const std::vector<int64_t> & shape)590 void MSTensor::SetShape(const std::vector<int64_t> &shape) {
591 if (impl_ == nullptr) {
592 MS_LOG(ERROR) << "Invalid tensor implement.";
593 return;
594 }
595
596 std::static_pointer_cast<MutableTensorImpl>(impl_)->SetShape(shape);
597 }
598
SetDataType(enum DataType data_type)599 void MSTensor::SetDataType(enum DataType data_type) {
600 if (impl_ == nullptr) {
601 MS_LOG(ERROR) << "Invalid tensor implement.";
602 return;
603 }
604
605 std::static_pointer_cast<MutableTensorImpl>(impl_)->SetDataType(data_type);
606 }
607
SetTensorName(const std::vector<char> & name)608 void MSTensor::SetTensorName(const std::vector<char> &name) {
609 if (impl_ == nullptr) {
610 MS_LOG(ERROR) << "Invalid tensor implement.";
611 return;
612 }
613 std::static_pointer_cast<MutableTensorImpl>(impl_)->SetName(CharToString(name));
614 }
615
SetAllocator(std::shared_ptr<Allocator> allocator)616 void MSTensor::SetAllocator(std::shared_ptr<Allocator> allocator) {
617 if (impl_ == nullptr) {
618 MS_LOG(ERROR) << "Invalid tensor implement.";
619 return;
620 }
621
622 return std::static_pointer_cast<MutableTensorImpl>(impl_)->SetAllocator(allocator);
623 }
624
allocator() const625 std::shared_ptr<Allocator> MSTensor::allocator() const {
626 if (impl_ == nullptr) {
627 MS_LOG(ERROR) << "Invalid tensor implement.";
628 return nullptr;
629 }
630
631 return std::static_pointer_cast<MutableTensorImpl>(impl_)->GetAllocator();
632 }
633
SetFormat(mindspore::Format format)634 void MSTensor::SetFormat(mindspore::Format format) {
635 if (impl_ == nullptr) {
636 MS_LOG(ERROR) << "Invalid tensor implement.";
637 return;
638 }
639
640 return std::static_pointer_cast<MutableTensorImpl>(impl_)->SetFormat(format);
641 }
642
format() const643 mindspore::Format MSTensor::format() const {
644 if (impl_ == nullptr) {
645 MS_LOG(ERROR) << "Invalid tensor implement.";
646 return mindspore::Format::NHWC;
647 }
648
649 return std::static_pointer_cast<MutableTensorImpl>(impl_)->Format();
650 }
651
SetData(void * data,bool own_data)652 void MSTensor::SetData(void *data, bool own_data) {
653 if (impl_ == nullptr) {
654 MS_LOG(ERROR) << "Invalid tensor implement.";
655 return;
656 }
657
658 return std::static_pointer_cast<MutableTensorImpl>(impl_)->SetData(data, own_data);
659 }
660
QuantParams() const661 std::vector<QuantParam> MSTensor::QuantParams() const {
662 if (impl_ == nullptr) {
663 MS_LOG(ERROR) << "Invalid tensor implement.";
664 return std::vector<QuantParam>{};
665 }
666
667 return std::static_pointer_cast<MutableTensorImpl>(impl_)->GetQuantParams();
668 }
669
SetQuantParams(std::vector<QuantParam> quant_params)670 void MSTensor::SetQuantParams(std::vector<QuantParam> quant_params) {
671 if (impl_ == nullptr) {
672 MS_LOG(ERROR) << "Invalid tensor implement.";
673 return;
674 }
675
676 return std::static_pointer_cast<MutableTensorImpl>(impl_)->SetQuantParams(quant_params);
677 }
678
Buffer()679 Buffer::Buffer() : impl_(std::make_shared<Impl>()) {}
Buffer(const void * data,size_t data_len)680 Buffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared<Impl>(data, data_len)) {}
681 Buffer::~Buffer() = default;
682
Clone() const683 Buffer Buffer::Clone() const {
684 Buffer ret;
685 if (impl_ == nullptr) {
686 MS_LOG(ERROR) << "impl is nullptr.";
687 return ret;
688 }
689 ret.impl_ = std::make_shared<Impl>(*impl_);
690 return ret;
691 }
692
Data() const693 const void *Buffer::Data() const {
694 if (impl_ == nullptr) {
695 MS_LOG(ERROR) << "impl is nullptr.";
696 return nullptr;
697 }
698 return impl_->Data();
699 }
700
MutableData()701 void *Buffer::MutableData() {
702 if (impl_ == nullptr) {
703 MS_LOG(ERROR) << "impl is nullptr.";
704 return nullptr;
705 }
706 return impl_->MutableData();
707 }
708
DataSize() const709 size_t Buffer::DataSize() const {
710 if (impl_ == nullptr) {
711 MS_LOG(ERROR) << "impl is nullptr.";
712 return 0;
713 }
714 return impl_->DataSize();
715 }
716
ResizeData(size_t data_len)717 bool Buffer::ResizeData(size_t data_len) {
718 if (impl_ == nullptr) {
719 MS_LOG(ERROR) << "impl is nullptr.";
720 return false;
721 }
722 impl_->ResizeData(data_len);
723 return true;
724 }
725
SetData(const void * data,size_t data_len)726 bool Buffer::SetData(const void *data, size_t data_len) {
727 if (impl_ == nullptr) {
728 MS_LOG(ERROR) << "impl is nullptr.";
729 return false;
730 }
731 return impl_->SetData(data, data_len);
732 }
733
CharVersion()734 std::vector<char> CharVersion() {
735 std::string version = VERSION_STR;
736 return StringToChar("MindSpore Lite " + version);
737 }
738 } // namespace mindspore
739