• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2024 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ir/base_tensor.h"
18 
19 #include <cstdint>
20 #include <exception>
21 #include <iomanip>
22 #include <functional>
23 #include <memory>
24 #include <utility>
25 #include <map>
26 #include <vector>
27 #include "mindapi/base/type_id.h"
28 #include "abstract/utils.h"
29 #include "abstract/abstract_value.h"
30 #include "base/complex_storage.h"
31 #include "utils/log_adapter.h"
32 #include "mindspore/ccsrc/include/common/utils/convert_utils.h"
33 #include "utils/shape_utils.h"
34 #include "utils/temp_file_manager.h"
35 
36 namespace mindspore {
37 namespace tensor {
MakeId()38 static std::string MakeId() {
39   // Use atomic to make id generator thread safe.
40   static std::atomic<uint64_t> last_id{1};
41   return "T" + std::to_string(last_id.fetch_add(1, std::memory_order_relaxed));
42 }
43 
TypeIdOf(const TypePtr & data_type,TypeId defaultTypeId)44 static TypeId TypeIdOf(const TypePtr &data_type, TypeId defaultTypeId) {
45   return data_type ? data_type->type_id() : defaultTypeId;
46 }
47 
ShapeToString(const ShapeVector & shape)48 std::string ShapeToString(const ShapeVector &shape) {
49   std::string str = "[";
50   const size_t count = shape.size();
51   for (size_t i = 0; i < count; ++i) {
52     if (i > 0) {
53       str.append(", ");
54     }
55     str.append(std::to_string(shape[i]));
56   }
57   return str.append("]");
58 }
59 
60 // Tensor chunk data.
61 template <typename T>
62 class TensorChunkData : public TensorDataImpl<T> {
63  public:
TensorChunkData(size_t size)64   explicit TensorChunkData(size_t size) : TensorDataImpl<T>(ShapeVector{static_cast<int64_t>(size)}) {}
65 
66   ~TensorChunkData() override = default;
67 
has_sub_data() const68   bool has_sub_data() const override { return true; }
69 };
70 
71 // Tensor compression data.
72 template <typename T>
73 class CompressionTensorData : public TensorDataImpl<T> {
74  public:
CompressionTensorData(size_t size)75   explicit CompressionTensorData(size_t size) : TensorDataImpl<T>(ShapeVector{static_cast<int64_t>(size)}) {}
76 
77   ~CompressionTensorData() override = default;
78 };
79 
BaseTensor(const BaseTensor & tensor)80 BaseTensor::BaseTensor(const BaseTensor &tensor)
81     : MetaTensor(tensor),
82       is_forward_output_(tensor.is_forward_output_),
83       need_pipeline_sync_(tensor.need_pipeline_sync_),
84       id_(tensor.id_),
85       device_sync_(tensor.device_sync_),
86       sync_status_(tensor.sync_status_),
87       auto_grad_meta_data_(tensor.auto_grad_meta_data_),
88       data_(tensor.data_),
89       base_shape_ptr_(tensor.base_shape_ptr_),
90       contiguous_callback_(tensor.contiguous_callback_) {
91   user_data_ = tensor.user_data_;
92 }
93 
BaseTensor(const BaseTensor & tensor,TypeId data_type)94 BaseTensor::BaseTensor(const BaseTensor &tensor, TypeId data_type)
95     : MetaTensor(data_type, tensor.shape_),
96       is_forward_output_(tensor.is_forward_output_),
97       need_pipeline_sync_(tensor.need_pipeline_sync_),
98       id_(tensor.data_type_ != data_type ? MakeId() : tensor.id_),
99       device_sync_(tensor.device_sync_),
100       sync_status_(tensor.sync_status_),
101       auto_grad_meta_data_(tensor.auto_grad_meta_data_),
102       data_(MakeTensorData(data_type, tensor.shape_, tensor.data_->data(), tensor.data_type_)),
103       base_shape_ptr_(tensor.base_shape_ptr_),
104       contiguous_callback_(tensor.contiguous_callback_) {
105   user_data_ = tensor.user_data_;
106 }
107 
operator =(const BaseTensor & tensor)108 BaseTensor &BaseTensor::operator=(const BaseTensor &tensor) {
109   if (this == &tensor) {
110     return *this;
111   }
112   is_forward_output_ = tensor.is_forward_output_;
113   data_ = tensor.data_;
114   id_ = tensor.id_;
115   sync_status_ = tensor.sync_status_;
116   device_sync_ = tensor.device_sync_;
117   need_pipeline_sync_ = tensor.need_pipeline_sync_;
118   lazy_callback_ = tensor.lazy_callback_;
119   contiguous_callback_ = tensor.contiguous_callback_;
120   user_data_ = tensor.user_data_;
121   base_shape_ptr_ = tensor.base_shape_ptr_;
122   auto_grad_meta_data_ = tensor.auto_grad_meta_data_;
123   return *this;
124 }
125 
BaseTensor(TypeId data_type,const ShapeVector & shape,TensorDataPtr data)126 BaseTensor::BaseTensor(TypeId data_type, const ShapeVector &shape, TensorDataPtr data)
127     : MetaTensor(data_type, shape), id_(MakeId()), data_(std::move(data)) {}
128 
BaseTensor(TypeId data_type,const ShapeVector & shape)129 BaseTensor::BaseTensor(TypeId data_type, const ShapeVector &shape)
130     : BaseTensor(data_type, shape, MakeTensorData(data_type, shape)) {}
131 
BaseTensor(TypeId data_type,const ShapeVector & shape,void * data,size_t data_len)132 BaseTensor::BaseTensor(TypeId data_type, const ShapeVector &shape, void *data, size_t data_len)
133     : BaseTensor(data_type, shape, MakeTensorData(data_type, shape, data, data_len)) {}
134 
BaseTensor(TypeId data_type,const ShapeVector & shape,void * data,TypeId src_data_type)135 BaseTensor::BaseTensor(TypeId data_type, const ShapeVector &shape, void *data, TypeId src_data_type)
136     : BaseTensor(data_type, shape, MakeTensorData(data_type, shape, data, src_data_type)) {}
137 
BaseTensor(const std::vector<int64_t> & input,const TypePtr & data_type)138 BaseTensor::BaseTensor(const std::vector<int64_t> &input, const TypePtr &data_type)
139     : MetaTensor(TypeIdOf(data_type, kNumberTypeInt64), {static_cast<int>(input.size())}),
140       id_(MakeId()),
141       data_(MakeTensorData(data_type_, shape_, input.data(), input.size())) {}
142 
BaseTensor(const std::vector<int32_t> & input,const TypePtr & data_type)143 BaseTensor::BaseTensor(const std::vector<int32_t> &input, const TypePtr &data_type)
144     : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {static_cast<int>(input.size())}),
145       id_(MakeId()),
146       data_(MakeTensorData(data_type_, shape_, input.data(), input.size())) {}
147 
BaseTensor(const std::vector<double> & input,const TypePtr & data_type)148 BaseTensor::BaseTensor(const std::vector<double> &input, const TypePtr &data_type)
149     : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {static_cast<int>(input.size())}),
150       id_(MakeId()),
151       data_(MakeTensorData(data_type_, shape_, input.data(), input.size())) {}
152 
BaseTensor(const std::vector<float> & input,const TypePtr & data_type)153 BaseTensor::BaseTensor(const std::vector<float> &input, const TypePtr &data_type)
154     : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {static_cast<int>(input.size())}),
155       id_(MakeId()),
156       data_(MakeTensorData(data_type_, shape_, input.data(), input.size())) {}
157 
BaseTensor(int64_t input,const TypePtr & data_type)158 BaseTensor::BaseTensor(int64_t input, const TypePtr &data_type)
159     : MetaTensor(TypeIdOf(data_type, kNumberTypeInt64), {}),
160       id_(MakeId()),
161       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
162 
BaseTensor(int32_t input,const TypePtr & data_type)163 BaseTensor::BaseTensor(int32_t input, const TypePtr &data_type)
164     : MetaTensor(TypeIdOf(data_type, kNumberTypeInt32), {}),
165       id_(MakeId()),
166       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
167 
BaseTensor(int16_t input,const TypePtr & data_type)168 BaseTensor::BaseTensor(int16_t input, const TypePtr &data_type)
169     : MetaTensor(TypeIdOf(data_type, kNumberTypeInt16), {}),
170       id_(MakeId()),
171       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
172 
BaseTensor(int8_t input,const TypePtr & data_type)173 BaseTensor::BaseTensor(int8_t input, const TypePtr &data_type)
174     : MetaTensor(TypeIdOf(data_type, kNumberTypeInt8), {}),
175       id_(MakeId()),
176       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
177 
BaseTensor(double input,const TypePtr & data_type)178 BaseTensor::BaseTensor(double input, const TypePtr &data_type)
179     : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {}),
180       id_(MakeId()),
181       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
182 
BaseTensor(float input,const TypePtr & data_type)183 BaseTensor::BaseTensor(float input, const TypePtr &data_type)
184     : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat32), {}),
185       id_(MakeId()),
186       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
187 
BaseTensor(float16 input,const TypePtr & data_type)188 BaseTensor::BaseTensor(float16 input, const TypePtr &data_type)
189     : MetaTensor(TypeIdOf(data_type, kNumberTypeFloat16), {}),
190       id_(MakeId()),
191       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
192 #ifndef KERNEL_EXECUTOR_ANDROID
BaseTensor(bfloat16 input,const TypePtr & data_type)193 BaseTensor::BaseTensor(bfloat16 input, const TypePtr &data_type)
194     : MetaTensor(TypeIdOf(data_type, kNumberTypeBFloat16), {}),
195       id_(MakeId()),
196       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
197 #endif
BaseTensor(uint64_t input,const TypePtr & data_type)198 BaseTensor::BaseTensor(uint64_t input, const TypePtr &data_type)
199     : MetaTensor(TypeIdOf(data_type, kNumberTypeUInt64), {}),
200       id_(MakeId()),
201       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
202 
BaseTensor(uint32_t input,const TypePtr & data_type)203 BaseTensor::BaseTensor(uint32_t input, const TypePtr &data_type)
204     : MetaTensor(TypeIdOf(data_type, kNumberTypeUInt32), {}),
205       id_(MakeId()),
206       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
207 
BaseTensor(uint16_t input,const TypePtr & data_type)208 BaseTensor::BaseTensor(uint16_t input, const TypePtr &data_type)
209     : MetaTensor(TypeIdOf(data_type, kNumberTypeUInt16), {}),
210       id_(MakeId()),
211       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
212 
BaseTensor(uint8_t input,const TypePtr & data_type)213 BaseTensor::BaseTensor(uint8_t input, const TypePtr &data_type)
214     : MetaTensor(TypeIdOf(data_type, kNumberTypeUInt8), {}),
215       id_(MakeId()),
216       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
217 
BaseTensor(bool input,const TypePtr & data_type)218 BaseTensor::BaseTensor(bool input, const TypePtr &data_type)
219     : MetaTensor(TypeIdOf(data_type, kNumberTypeBool), {}),
220       id_(MakeId()),
221       data_(MakeTensorData(data_type_, ShapeVector{}, input)) {}
222 
BaseTensor(TypeId data_type,size_t data_size)223 BaseTensor::BaseTensor(TypeId data_type, size_t data_size)
224     : BaseTensor(data_type, ShapeVector{static_cast<int64_t>(data_size)},
225                  MakeTensorData<TensorChunkData>(data_type, data_size)) {}
226 
BaseTensor(TypeId origin_data_type,const ShapeVector & shape,size_t compression_data_size,TensorCompressionType compression_type)227 BaseTensor::BaseTensor(TypeId origin_data_type, const ShapeVector &shape, size_t compression_data_size,
228                        TensorCompressionType compression_type)
229     : BaseTensor(origin_data_type, shape,
230                  MakeTensorData<CompressionTensorData>(kNumberTypeInt8, compression_data_size)) {}
231 
operator ==(const BaseTensor & tensor) const232 bool BaseTensor::operator==(const BaseTensor &tensor) const {
233   return (&tensor == this || (MetaTensor::operator==(tensor) && data_ == tensor.data_));
234 }
235 
ValueEqual(const BaseTensor & tensor) const236 bool BaseTensor::ValueEqual(const BaseTensor &tensor) const {
237   if (is_parameter_ != tensor.is_parameter_) {
238     return false;
239   }
240   if (is_parameter_ && param_info_->name() != tensor.param_info_->name()) {
241     return false;
242   }
243   return (&tensor == this || (MetaTensor::operator==(tensor) && data_->equals(*tensor.data_)));
244 }
245 
ExecuteLazyTask() const246 void BaseTensor::ExecuteLazyTask() const {
247   if (lazy_callback_ != nullptr && need_pipeline_sync_) {
248     lazy_callback_();
249   }
250 
251   if (contiguous_callback_ != nullptr && storage_info() != nullptr) {
252     device_sync_ = contiguous_callback_(device_address());
253     device_sync_->set_original_ref_count(SIZE_MAX);
254     device_sync_->ResetRefCount();
255   }
256 }
257 
device_address() const258 DeviceSyncPtr BaseTensor::device_address() const { return device_sync_; }
259 
storage_info() const260 const TensorStorageInfoPtr BaseTensor::storage_info() const {
261   if (device_sync_ == nullptr) {
262     return nullptr;
263   }
264 
265   return device_sync_->GetTensorStorageInfo();
266 }
267 
is_contiguous() const268 bool BaseTensor::is_contiguous() const {
269   const auto &storage = storage_info();
270   return storage == nullptr || storage->is_contiguous;
271 }
272 
stride() const273 std::vector<int64_t> BaseTensor::stride() const {
274   const auto &storage = storage_info();
275   if (storage != nullptr) {
276     return storage->strides;
277   }
278 
279   if (shape_.empty()) {
280     return {};
281   }
282   std::vector<int64_t> ret(shape_.size(), 1);
283   int64_t stride = 1;
284   for (size_t i = shape_.size() - 1; i > 0; --i) {
285     stride *= shape_[i];
286     ret[i - 1] = stride;
287   }
288   return ret;
289 }
290 
storage_offset() const291 const int64_t BaseTensor::storage_offset() const {
292   const auto &storage = storage_info();
293   return storage == nullptr ? 0 : SizeToLong(storage->storage_offset);
294 }
295 
set_device_address(const DeviceSyncPtr & device_sync,bool need_update_ref_count)296 void BaseTensor::set_device_address(const DeviceSyncPtr &device_sync, bool need_update_ref_count) {
297   device_sync_ = device_sync;
298   // To support the old and new runtime coexistence, the output of old runtime may be the input of new runtime, so the
299   // device address cannot be released through ref count and set max ref count in this scenario.
300   if (need_update_ref_count && (device_sync_ != nullptr)) {
301     device_sync_->set_original_ref_count(SIZE_MAX);
302     device_sync_->ResetRefCount();
303   }
304 }
305 
AssignValue(const BaseTensor & tensor)306 BaseTensor &BaseTensor::AssignValue(const BaseTensor &tensor) {
307   if (this != &tensor) {
308     ExecuteLazyTask();
309     contiguous_callback_ = tensor.contiguous_callback_;
310     MetaTensor::operator=(tensor);
311     device_sync_ = tensor.device_address();
312     need_pipeline_sync_ = tensor.need_pipeline_sync_;
313     is_forward_output_ = tensor.is_forward_output_;
314     sync_status_ = tensor.sync_status_;
315     MS_EXCEPTION_IF_NULL(data_);
316     if (data_->is_sub_data()) {
317       // If tensor data is sub data, we should keep data
318       // memory address unchange and copy data to it.
319       CopyTensorData(data_, tensor.data_);
320     } else {
321       data_ = tensor.data_;
322     }
323     if (!is_parameter_) {
324       id_ = tensor.id_;
325       auto_grad_meta_data_ = tensor.auto_grad_meta_data_;
326     }
327   }
328   return *this;
329 }
330 
ToAbstract()331 abstract::AbstractBasePtr BaseTensor::ToAbstract() {
332   auto tens = shared_from_base<BaseTensor>();
333   auto dtype = tens->Dtype();
334   if (!IsSubType(dtype, kNumber) && !IsSubType(dtype, kString) && !IsSubType(dtype, kTensorType)) {
335     MS_LOG(EXCEPTION) << "Expect tensor type kNumber or kString or kTensor but got: " << dtype->ToString() << ".";
336   }
337   abstract::AbstractTensorPtr abs_tensor = nullptr;
338   if (base_shape_ptr_ == nullptr) {
339     auto tensor_shape = tens->shape();
340     abs_tensor = std::make_shared<abstract::AbstractTensor>(dtype, tensor_shape);
341   } else {
342     abs_tensor = std::make_shared<abstract::AbstractTensor>(dtype, base_shape_ptr_);
343   }
344   // if is parameter always no value.
345   if (is_parameter_) {
346     auto param_name = param_info_->name();
347     auto ref_key = std::make_shared<RefKey>(param_name);
348     abs_tensor = std::make_shared<abstract::AbstractRefTensor>(abs_tensor, ref_key);
349   } else {
350     abs_tensor->set_value(shared_from_base<BaseTensor>());
351   }
352   return abs_tensor;
353 }
354 
GetAbstractCache()355 abstract::AbstractBasePtr BaseTensor::GetAbstractCache() {
356   auto abs = abstract_.lock();
357   if (abs != nullptr) {
358     MS_LOG(DEBUG) << "Get cached abstract " << abs->ToString() << " real tensor shape is " << shape_;
359     return abs;
360   }
361   return ToAbstract();
362 }
363 
GetShapeAndDataTypeInfo() const364 std::string BaseTensor::GetShapeAndDataTypeInfo() const {
365   std::ostringstream buf;
366   buf << "Tensor shape:[" << shape() << "]" << this->Dtype()->ToString();
367   return buf.str();
368 }
369 
ToStringInternal(size_t limit_size) const370 std::string BaseTensor::ToStringInternal(size_t limit_size) const {
371   std::ostringstream buf;
372   auto dtype = Dtype();
373   MS_EXCEPTION_IF_NULL(dtype);
374   buf << "Tensor(shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString() << ", value=";
375   if (limit_size == 0 || DataSize() < limit_size) {
376     // Only print data for small tensor.
377     buf << ((data().ndim() > 1) ? "\n" : "") << data().ToString(data_type_, shape_, false);
378   } else {
379     buf << "[...]";
380   }
381   if (is_parameter_) {
382     buf << ", name=" << param_info_->name();
383   }
384   buf << ")";
385   return buf.str();
386 }
387 
ToString() const388 std::string BaseTensor::ToString() const {
389   constexpr size_t small_tensor_size = 30;
390   return ToStringInternal(small_tensor_size);
391 }
392 
ToStringNoLimit() const393 std::string BaseTensor::ToStringNoLimit() const { return ToStringInternal(0); }
394 
ToStringRepr() const395 std::string BaseTensor::ToStringRepr() const {
396   std::ostringstream buf;
397   auto dtype = Dtype();
398   MS_EXCEPTION_IF_NULL(dtype);
399   buf << "Tensor(shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString()
400       << ", value=" << ((data().ndim() > 1) ? '\n' : ' ') << data().ToString(data_type_, shape_, true) << ')';
401   return buf.str();
402 }
403 
data_sync(bool need_wait) const404 void BaseTensor::data_sync(bool need_wait) const {
405   if (need_wait) {
406     device_sync_ = device_address();
407     ExecuteLazyTask();
408   }
409   if (device_sync_ == nullptr) {
410     return;
411   }
412   MS_EXCEPTION_IF_NULL(data_);
413   if (data_->is_sub_data()) {
414     return;
415   }
416 
417   std::vector<size_t> shape_tmp;
418   (void)std::transform(shape().begin(), shape().end(), std::back_inserter(shape_tmp), LongToSize);
419   auto size = abstract::ShapeSize(shape_tmp) * abstract::TypeIdSize(data_type());
420   auto address = device_sync_;
421   if (size != 0 && !address->SyncDeviceToHost(shape(), size, data_type(), data_c())) {
422     MS_LOG(INTERNAL_EXCEPTION) << "SyncDeviceToHost failed.";
423   }
424   if (!data_->file_path().empty()) {
425     device_sync_ = nullptr;
426   }
427   sync_status_ = kNeedSyncHostToDevice;
428 }
429 
set_data_type(TypeId data_type)430 TypeId BaseTensor::set_data_type(TypeId data_type) {
431   if (data_type != data_type_) {
432     MS_EXCEPTION_IF_NULL(data_);
433     data_ = MakeTensorData(data_type, shape_, data_->data(), data_type_);
434     return MetaTensor::set_data_type(data_type);
435   }
436   return data_type;
437 }
438 
set_shape(const ShapeVector & shape)439 size_t BaseTensor::set_shape(const ShapeVector &shape) {
440   abstract_.reset();
441   if (DataSize() != SizeOf(shape)) {
442     data_ = MakeTensorData(data_type_, shape);
443   }
444   return MetaTensor::set_shape(shape);
445 }
446 }  // namespace tensor
447 }  // namespace mindspore
448