• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021-2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "runtime/device/tensor_array.h"
17 
18 namespace mindspore {
19 namespace device {
CheckValue(const TypeId & dtype,const ShapeVector & shape)20 bool TensorArray::CheckValue(const TypeId &dtype, const ShapeVector &shape) {
21   MS_LOG(DEBUG) << "Check the data shape and type for " << name_;
22   MS_EXCEPTION_IF_NULL(dtype_);
23   if (dtype != dtype_->type_id()) {
24     MS_LOG(ERROR) << "Invalid data type " << TypeIdLabel(dtype) << " for " << name_ << ", the origin type is "
25                   << TypeIdLabel(dtype_->type_id());
26     return false;
27   }
28   if (shape != shapes_) {
29     MS_LOG(ERROR) << "Invalid data shape " << shape << " for " << name_ << ", the origin shape is " << shapes_;
30     return false;
31   }
32   return true;
33 }
34 
CheckReadIndexLogical(const int64_t index)35 bool TensorArray::CheckReadIndexLogical(const int64_t index) {
36   if (LongToSize(index) >= valid_size_) {
37     MS_LOG(ERROR) << "Index " << index << " out of range " << valid_size_ << ", " << name_;
38     return false;
39   }
40   return true;
41 }
42 
43 // Function Read() can get the tensors in the scope of tensors_.
Read(const int64_t index)44 mindspore::kernel::AddressPtr TensorArray::Read(const int64_t index) {
45   if (LongToSize(index) >= tensors_.size()) {
46     MS_LOG(EXCEPTION) << "Index " << index << " out of range " << tensors_.size() << ", " << name_;
47   }
48   MS_LOG(DEBUG) << "Read tensor index = " << index << ", addr = " << tensors_[LongToSize(index)]->addr;
49   return tensors_[LongToSize(index)];
50 }
51 
52 // Add tensor to the TensorArray and increase the size.
53 // Cast 1: is_dynamic = False and index > max_size_, error.
54 // Case 2: index > valid_size, fill the rest dev_value with zeros, and set valid_size to index + 1.
55 // Case 3: index == tensors_.size(), we need to increase both real tensors_ size and valid size, and add
56 // the new dev_value to tensors_.
57 // Case 4: tensors_size() > index > valid_size, we can reuse the memory in tensors_[index], so
58 // only increase the valid_size.
Write(const int64_t index,const mindspore::kernel::AddressPtr & dev_value)59 bool TensorArray::Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) {
60   MS_LOG(DEBUG) << "Write dev_value to " << name_;
61   if (!is_dynamic_ && (index >= max_size_)) {
62     MS_LOG(ERROR) << name_ << " is not in dynamic size, the max_size is " << max_size_ << ", but get index " << index;
63     return false;
64   }
65   if (LongToSize(index) > valid_size_) {
66     // Create/reuse (index - valid_size) size dev_value with zeros.
67     // 1 create new mem : index > real_size ? index - real_size : 0
68     // 2 reuse old mem : index > real_size ? real_size - valid_size : index - valid_size
69     // 3 fill zeros : index - valid_size
70     MS_EXCEPTION_IF_NULL(dev_value);
71     size_t create_size = (LongToSize(index) > tensors_.size()) ? (LongToSize(index) - tensors_.size()) : 0;
72     for (size_t i = 0; i < create_size; i++) {
73       kernel::AddressPtr create_dev = std::make_shared<kernel::Address>();
74       create_dev->addr = AllocateMemory(dev_value->size);
75       create_dev->size = dev_value->size;
76       tensors_.push_back(create_dev);
77     }
78     tensors_.push_back(dev_value);
79     for (size_t i = valid_size_; i < LongToSize(index); i++) {
80       MS_EXCEPTION_IF_CHECK_FAIL((tensors_.size() > i), "The index is out of range.");
81       MS_EXCEPTION_IF_NULL(tensors_[i]);
82       auto tensor_size = tensors_[i]->size;
83       ClearMemory(tensors_[i]->addr, tensor_size);
84     }
85     valid_size_ = LongToSize(index) + 1;
86   } else if (LongToSize(index) == tensors_.size()) {
87     MS_LOG(DEBUG) << "Write to index " << index << ", increase tensors' size to " << (tensors_.size() + 1);
88     tensors_.push_back(dev_value);
89     valid_size_++;
90   } else {
91     MS_LOG(DEBUG) << "Reuse tensors in position " << index << ", tensors size is " << tensors_.size();
92     if (LongToSize(index) == valid_size_) {
93       valid_size_++;
94     }
95   }
96   return true;
97 }
98 
Clear()99 void TensorArray::Clear() {
100   valid_size_ = 0;
101   return;
102 }
103 
Free()104 void TensorArray::Free() {
105   MS_LOG(DEBUG) << "Free device memory for " << name_;
106   for (const auto &addr : tensors_) {
107     if (addr != nullptr) {
108       FreeMemory(static_cast<DeviceMemPtr>(addr->addr));
109     }
110   }
111 }
112 
GetValidSize() const113 size_t TensorArray::GetValidSize() const { return valid_size_; }
GetRealSize() const114 size_t TensorArray::GetRealSize() const { return tensors_.size(); }
115 
GetTensorAddr(const size_t & index) const116 const void *TensorArray::GetTensorAddr(const size_t &index) const {
117   MS_EXCEPTION_IF_CHECK_FAIL((tensors_.size() > index), "The index is out of range.");
118   MS_EXCEPTION_IF_NULL(tensors_[index]);
119   return tensors_[index]->addr;
120 }
121 
SetMaxSize(const int64_t size,const bool is_dynamic)122 void TensorArray::SetMaxSize(const int64_t size, const bool is_dynamic) {
123   is_dynamic_ = is_dynamic;
124   if (!is_dynamic_) {
125     max_size_ = size;
126     MS_LOG(DEBUG) << name_ << " use fixed size " << max_size_;
127   }
128   return;
129 }
130 }  // namespace device
131 }  // namespace mindspore
132