1 /**
2 * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
3 *
4 * Copyright 2019-2021 Huawei Technologies Co., Ltd
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <memory>
20 #include <algorithm>
21 #include <utility>
22
23 #include "extendrt/utils/tensor_utils.h"
24 #include "mindspore/ccsrc/kernel/common_utils.h"
25 #include "mindspore/ccsrc/kernel/framework_utils.h"
26 #include "mindspore/ccsrc/kernel/format_utils.h"
27
28 namespace mindspore {
TensorRefData(void * data,size_t bytes_size,size_t data_size,size_t ndim,const std::function<void (uint8_t *)> & deleter)29 TensorRefData::TensorRefData(void *data, size_t bytes_size, size_t data_size, size_t ndim,
30 const std::function<void(uint8_t *)> &deleter)
31 : data_(data), elem_count_(bytes_size), data_size_(data_size), ndim_(ndim), deleter_(deleter) {}
32
~TensorRefData()33 TensorRefData::~TensorRefData() {
34 if (deleter_ && data_) {
35 deleter_(reinterpret_cast<uint8_t *>(data_));
36 }
37 }
38
size() const39 ssize_t TensorRefData::size() const { return static_cast<ssize_t>(elem_count_); }
40
itemsize() const41 ssize_t TensorRefData::itemsize() const {
42 if (elem_count_ == 0) {
43 return 0;
44 }
45 return static_cast<ssize_t>(data_size_ / elem_count_);
46 }
47
nbytes() const48 ssize_t TensorRefData::nbytes() const { return static_cast<ssize_t>(data_size_); }
49
ndim() const50 ssize_t TensorRefData::ndim() const { return static_cast<ssize_t>(ndim_); }
51
data()52 void *TensorRefData::data() { return data_; }
53
const_data() const54 const void *TensorRefData::const_data() const { return data_; }
55
ToString(TypeId type,const ShapeVector & shape,bool use_comma) const56 std::string TensorRefData::ToString(TypeId type, const ShapeVector &shape, bool use_comma) const {
57 std::stringstream stream;
58 stream << "RefTensor:[";
59 for (size_t i = 0; i < shape.size(); i++) {
60 stream << shape[i];
61 if (i + 1 < shape.size()) {
62 stream << ",";
63 }
64 }
65 stream << "]" << type;
66 return stream.str();
67 }
68
Format() const69 mindspore::Format TensorTensorImpl::Format() const {
70 MS_EXCEPTION_IF_NULL(tensor_);
71 return kernel::GetFormatFromStrToEnum(tensor_->device_info().format_);
72 }
73
SetFormat(mindspore::Format format)74 void TensorTensorImpl::SetFormat(mindspore::Format format) {
75 MS_EXCEPTION_IF_NULL(tensor_);
76 auto device_info = tensor_->device_info();
77 device_info.format_ = kernel::GetFormatFromEnumToStr(format);
78 tensor_->set_device_info(device_info);
79 }
80
MSTensorToTensorPtr(const std::vector<MSTensor> & ms_tensors)81 std::vector<mindspore::tensor::TensorPtr> TensorUtils::MSTensorToTensorPtr(const std::vector<MSTensor> &ms_tensors) {
82 std::vector<mindspore::tensor::TensorPtr> tensor_ptrs;
83
84 for (auto ms_tensor : ms_tensors) {
85 auto data_type = ms_tensor.DataType();
86 auto type_id = static_cast<mindspore::TypeId>(data_type);
87 auto shape = ms_tensor.Shape();
88 auto data = ms_tensor.MutableData();
89 auto data_size = ms_tensor.DataSize();
90 auto ref_tensor_data = std::make_shared<TensorRefData>(data, ms_tensor.ElementNum(), data_size, shape.size());
91 auto tensor_ptr = std::make_shared<mindspore::tensor::Tensor>(type_id, shape, ref_tensor_data);
92 tensor_ptr->set_name(ms_tensor.Name());
93 tensor_ptr->set_data_type(type_id);
94 tensor_ptrs.push_back(tensor_ptr);
95 }
96 return tensor_ptrs;
97 }
98
TensorPtrToMSTensor(std::vector<mindspore::tensor::TensorPtr> tensor_ptrs,const std::vector<std::string> & tensor_names)99 std::vector<MSTensor> TensorUtils::TensorPtrToMSTensor(std::vector<mindspore::tensor::TensorPtr> tensor_ptrs,
100 const std::vector<std::string> &tensor_names) {
101 std::vector<MSTensor> ms_tensors;
102 for (size_t i = 0; i < tensor_ptrs.size(); i++) {
103 auto graph_tensor = tensor_ptrs[i];
104 std::string graph_tensor_name = tensor_names[i];
105 graph_tensor->set_name(graph_tensor_name);
106 auto tensor_impl = std::make_shared<TensorTensorImpl>(graph_tensor);
107 ms_tensors.push_back(MSTensor(tensor_impl));
108 }
109 return ms_tensors;
110 }
111
MSTensorToTensor(const std::vector<MSTensor> & ms_tensors)112 std::vector<mindspore::tensor::Tensor> TensorUtils::MSTensorToTensor(const std::vector<MSTensor> &ms_tensors) {
113 std::vector<mindspore::tensor::Tensor> tensors;
114 for (auto ms_tensor : ms_tensors) {
115 auto data_type = ms_tensor.DataType();
116 auto type_id = static_cast<mindspore::TypeId>(data_type);
117 auto shape = ms_tensor.Shape();
118 auto data = const_cast<void *>(ms_tensor.Data().get());
119 auto data_size = ms_tensor.DataSize();
120 auto ref_tensor_data = std::make_shared<TensorRefData>(data, ms_tensor.ElementNum(), data_size, shape.size());
121 mindspore::tensor::Tensor tensor(type_id, shape, ref_tensor_data);
122 auto device_address = ms_tensor.GetDeviceData();
123 if (device_address != nullptr) {
124 auto lite_device_address = std::make_shared<LiteDeviceAddress>(device_address, ms_tensor.DataSize());
125 tensor.set_device_address(lite_device_address);
126 // only use device_id now.
127 auto device_info = tensor::DeviceInfo("DefaultFormat", nullptr, "DefaultFormat", ms_tensor.GetDeviceId());
128 tensor.set_device_info(device_info);
129 }
130 tensors.emplace_back(std::move(tensor));
131 }
132 return tensors;
133 }
134
TensorToMSTensor(std::vector<mindspore::tensor::Tensor> tensors,const std::vector<std::string> & tensor_names)135 std::vector<MSTensor> TensorUtils::TensorToMSTensor(std::vector<mindspore::tensor::Tensor> tensors,
136 const std::vector<std::string> &tensor_names) {
137 std::vector<MSTensor> ms_tensors;
138 for (size_t i = 0; i < tensors.size(); i++) {
139 auto &graph_tensor = tensors[i];
140 std::string graph_tensor_name = tensor_names[i];
141 graph_tensor.set_name(graph_tensor_name);
142 auto tensor_impl = std::make_shared<TensorTensorImpl>(graph_tensor);
143 ms_tensors.emplace_back(MSTensor(tensor_impl));
144 }
145 return ms_tensors;
146 }
147
TensorToTensorPtr(const std::vector<mindspore::tensor::Tensor> & tensors)148 std::vector<mindspore::tensor::TensorPtr> TensorUtils::TensorToTensorPtr(
149 const std::vector<mindspore::tensor::Tensor> &tensors) {
150 std::vector<mindspore::tensor::TensorPtr> tensor_ptrs;
151 for (auto &tensor : tensors) {
152 auto type_id = static_cast<TypeId>(tensor.data_type_c());
153 auto shape = tensor.shape_c();
154 auto data = tensor.data_c();
155 auto data_size = tensor.Size();
156 auto tensor_ptr = std::make_shared<mindspore::tensor::Tensor>(type_id, shape, data, data_size);
157 tensor_ptrs.push_back(tensor_ptr);
158 }
159 return tensor_ptrs;
160 }
161
TensorPtrToTensor(const std::vector<mindspore::tensor::TensorPtr> & tensor_ptrs)162 std::vector<mindspore::tensor::Tensor> TensorUtils::TensorPtrToTensor(
163 const std::vector<mindspore::tensor::TensorPtr> &tensor_ptrs) {
164 std::vector<mindspore::tensor::Tensor> tensors;
165 std::transform(tensor_ptrs.begin(), tensor_ptrs.end(), std::back_inserter(tensors),
166 [](mindspore::tensor::TensorPtr tensor_ptr) { return mindspore::tensor::Tensor(*tensor_ptr); });
167 return tensors;
168 }
169
LiteTensorToAddressPtr(const lite::Tensor * lite_tensor)170 kernel::AddressPtr CloudTensorUtils::LiteTensorToAddressPtr(const lite::Tensor *lite_tensor) {
171 kernel::AddressPtr address_ptr = std::make_shared<kernel::Address>(lite_tensor->data(), lite_tensor->Size());
172 return address_ptr;
173 }
174
LiteTensorToAddressPtrVec(const std::vector<lite::Tensor * > & lite_tensors)175 std::vector<mindspore::kernel::AddressPtr> CloudTensorUtils::LiteTensorToAddressPtrVec(
176 const std::vector<lite::Tensor *> &lite_tensors) {
177 kernel::AddressPtrList address_list;
178
179 for (auto lite_tensor : lite_tensors) {
180 kernel::AddressPtr address = LiteTensorToAddressPtr(lite_tensor);
181 address_list.push_back(address);
182 }
183
184 return address_list;
185 }
186
LiteTensorToKernelTensorPtr(const lite::Tensor * lite_tensor)187 kernel::KernelTensor *CloudTensorUtils::LiteTensorToKernelTensorPtr(const lite::Tensor *lite_tensor) {
188 kernel::AddressPtr address = LiteTensorToAddressPtr(lite_tensor);
189 kernel::KernelTensor *kernel_tensor_ptr = new (std::nothrow) kernel::KernelTensor();
190 if (kernel_tensor_ptr == nullptr) {
191 return kernel_tensor_ptr;
192 }
193 kernel_tensor_ptr->SetData(address);
194 kernel_tensor_ptr->set_format(lite_tensor->format());
195 kernel_tensor_ptr->SetType(std::make_shared<TensorType>(TypeIdToType(lite_tensor->data_type())));
196
197 auto lite_shape = lite_tensor->shape();
198 std::vector<int64_t> shape;
199 for (size_t i = 0; i < lite_shape.size(); i++) {
200 shape.push_back(lite_shape[i]);
201 }
202 kernel_tensor_ptr->SetShape(std::make_shared<abstract::TensorShape>(std::move(shape)));
203 return kernel_tensor_ptr;
204 }
205
LiteTensorToKernelTensorPtrVec(const std::vector<lite::Tensor * > & lite_tensors)206 std::vector<kernel::KernelTensor *> CloudTensorUtils::LiteTensorToKernelTensorPtrVec(
207 const std::vector<lite::Tensor *> &lite_tensors) {
208 std::vector<kernel::KernelTensor *> kernel_tensor_list;
209
210 for (auto lite_tensor : lite_tensors) {
211 if (lite_tensor == nullptr) {
212 continue;
213 }
214 auto kernel_tensor_ptr = LiteTensorToKernelTensorPtr(lite_tensor);
215 kernel_tensor_list.push_back(kernel_tensor_ptr);
216 }
217
218 return kernel_tensor_list;
219 }
220
GetTensorListShapes(const std::vector<infer::abstract::Tensor * > & tensors)221 std::vector<std::vector<int64_t>> AbstractTensorUtils::GetTensorListShapes(
222 const std::vector<infer::abstract::Tensor *> &tensors) {
223 std::vector<std::vector<int64_t>> original_dims;
224 std::transform(tensors.begin(), tensors.end(), std::back_inserter(original_dims),
225 [](infer::abstract::Tensor *tensor) {
226 std::vector<int64_t> shape64;
227 if (tensor != nullptr) {
228 auto shape32 = tensor->shape();
229 std::transform(shape32.begin(), shape32.end(), std::back_inserter(shape64),
230 [](int dim) { return static_cast<int64_t>(dim); });
231 }
232 return shape64;
233 });
234 return original_dims;
235 }
236
SetTensorListShapse(const std::vector<infer::abstract::Tensor * > & tensors,const std::vector<std::vector<int64_t>> & shapes)237 bool AbstractTensorUtils::SetTensorListShapse(const std::vector<infer::abstract::Tensor *> &tensors,
238 const std::vector<std::vector<int64_t>> &shapes) {
239 for (size_t i = 0; i < tensors.size(); i++) {
240 auto tensor = tensors.at(i);
241 if (tensor == nullptr) {
242 continue;
243 }
244 auto shape64 = shapes.at(i);
245 std::vector<int> shape32;
246 std::transform(shape64.begin(), shape64.end(), std::back_inserter(shape32),
247 [](int64_t dim) { return static_cast<int>(dim); });
248 tensor->set_shape(shape32);
249 }
250 return true;
251 }
252 } // namespace mindspore
253