• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/core/common_runtime/scoped_allocator.h"
16 #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
17 
18 namespace tensorflow {
19 
ScopedAllocator(const Tensor & backing_tensor,int32 scope_id,const string & name,const gtl::ArraySlice<Field> & fields,int32 expected_call_count,ScopedAllocatorContainer * container)20 ScopedAllocator::ScopedAllocator(const Tensor& backing_tensor, int32 scope_id,
21                                  const string& name,
22                                  const gtl::ArraySlice<Field>& fields,
23                                  int32 expected_call_count,
24                                  ScopedAllocatorContainer* container)
25     : backing_tensor_(backing_tensor),
26       tbuf_(backing_tensor_.buf_),
27       id_(scope_id),
28       name_(name),
29       container_(container),
30       fields_(fields.begin(), fields.end()),
31       expected_call_count_(expected_call_count),
32       live_alloc_count_(0) {
33   // Hold this until all aliases have been deallocated.
34   tbuf_->Ref();
35   // Hold this until all expected_calls have been made.
36   container->Ref();
37   CHECK_GE(tbuf_->size(), fields.back().offset + fields.back().bytes);
38 }
39 
~ScopedAllocator()40 ScopedAllocator::~ScopedAllocator() {
41   mutex_lock l(mu_);
42   VLOG(1) << "~ScopedAllocator " << this << " tbuf_ " << tbuf_ << " data "
43           << static_cast<void*>(tbuf_->data());
44   // In the absence of incomplete graph execution situations
45   // (interruption by error status or control flow branch crossing
46   // ScopedAllocation region) we expect expected_call_count_ == 0 at
47   // exit.
48   if (VLOG_IS_ON(1)) {
49     if (expected_call_count_ > 0)
50       VLOG(1) << "expected_call_count_ = " << expected_call_count_
51               << " at deallocation";
52   }
53   if (tbuf_) tbuf_->Unref();
54 }
55 
AllocateRaw(int32 field_index,size_t num_bytes)56 void* ScopedAllocator::AllocateRaw(int32 field_index, size_t num_bytes) {
57   VLOG(1) << "ScopedAllocator index " << id_ << " AllocateRaw "
58           << "field " << field_index << " num_bytes " << num_bytes;
59   mutex_lock l(mu_);
60   if (expected_call_count_ <= 0) {
61     LOG(ERROR) << "Scoped allocator " << name_
62                << " could not satisfy request for " << num_bytes
63                << " bytes, expected uses exhausted. ";
64     return nullptr;
65   }
66 
67   int32_t num_fields = static_cast<int32>(fields_.size());
68   if (field_index >= num_fields) {
69     LOG(ERROR) << "ScopedAllocator " << name_
70                << " received unexpected field number " << field_index;
71     return nullptr;
72   }
73 
74   const Field& f = fields_[field_index];
75   if (num_bytes != f.bytes) {
76     LOG(ERROR) << "ScopedAllocator " << name_ << " got request for "
77                << num_bytes << " bytes from field " << field_index
78                << " which has precalculated size " << f.bytes << " and offset "
79                << f.offset;
80     return nullptr;
81   }
82 
83   void* ptr = static_cast<void*>((tbuf_->template base<char>() + f.offset));
84 
85   ++live_alloc_count_;
86   --expected_call_count_;
87   if (0 == expected_call_count_) {
88     for (auto& f : fields_) {
89       container_->Drop(f.scope_id, this);
90     }
91     container_->Drop(id_, this);
92     container_->Unref();
93     container_ = nullptr;
94   }
95   VLOG(1) << "AllocateRaw returning " << ptr;
96   return ptr;
97 }
98 
DeallocateRaw(void * p)99 void ScopedAllocator::DeallocateRaw(void* p) {
100   CHECK(VerifyPointer(p));
101 
102   bool dead = false;
103   {
104     mutex_lock l(mu_);
105     CHECK_GT(live_alloc_count_, 0);
106     if (0 == --live_alloc_count_) {
107       if (0 == expected_call_count_) {
108         dead = true;
109       }
110     }
111   }
112   if (dead) {
113     delete this;
114   }
115 }
116 
VerifyPointer(const void * p)117 bool ScopedAllocator::VerifyPointer(const void* p) {
118   void* base = tbuf_->data();
119   CHECK_GE(p, base);
120   for (auto& f : fields_) {
121     void* f_ptr = static_cast<void*>(static_cast<char*>(base) + f.offset);
122     if (f_ptr == p) {
123       return true;
124       break;
125     }
126   }
127   VLOG(1) << "ScopedAllocator index " << id_ << " VerifyPointer for p=" << p
128           << " failed.";
129   return false;
130 }
131 
VerifyTensor(const Tensor * t)132 bool ScopedAllocator::VerifyTensor(const Tensor* t) {
133   return VerifyPointer(t->buf_->data());
134 }
135 
ScopedAllocatorInstance(ScopedAllocator * sa,int32 field_index)136 ScopedAllocatorInstance::ScopedAllocatorInstance(ScopedAllocator* sa,
137                                                  int32 field_index)
138     : scoped_allocator_(sa),
139       field_index_(field_index),
140       allocated_(false),
141       deallocated_(false),
142       in_table_(true) {
143   VLOG(1) << "new ScopedAllocatorInstance " << this << " on SA " << sa
144           << " field_index " << field_index;
145 }
146 
DropFromTable()147 void ScopedAllocatorInstance::DropFromTable() {
148   bool del = false;
149   {
150     mutex_lock l(mu_);
151     CHECK(in_table_);
152     in_table_ = false;
153     VLOG(2) << "ScopedAllocatorInstance::DropFromTable " << this
154             << " allocated_ " << allocated_ << " deallocated_ " << deallocated_
155             << " in_table_ " << in_table_;
156     // Single use is complete when it is allocated and deallocated.
157     // This check prevents a race between Allocating the tensor slice and
158     // Dropping it from the parent container's table.
159     if (allocated_ && deallocated_) {
160       del = true;
161     }
162   }
163   if (del) delete this;
164 }
165 
AllocateRaw(size_t alignment,size_t num_bytes)166 void* ScopedAllocatorInstance::AllocateRaw(size_t alignment, size_t num_bytes) {
167   void* ptr = scoped_allocator_->AllocateRaw(field_index_, num_bytes);
168   {
169     mutex_lock l(mu_);
170     if (nullptr == ptr) {
171       VLOG(2) << "ScopedAllocatorInstance::AllocateRaw " << this
172               << " call to underlying ScopedAllocator unsuccessful,"
173               << " allocated_ " << allocated_ << " deallocated_ "
174               << deallocated_ << " in_table_ " << in_table_
175               << " returning nullptr.";
176     } else {
177       allocated_ = true;
178       VLOG(2) << "ScopedAllocatorInstance::AllocateRaw " << this
179               << " allocated_ " << allocated_ << " deallocated_ "
180               << deallocated_ << " in_table_ " << in_table_
181               << " returning ptr = " << ptr;
182     }
183   }
184   return ptr;
185 }
186 
DeallocateRaw(void * p)187 void ScopedAllocatorInstance::DeallocateRaw(void* p) {
188   scoped_allocator_->DeallocateRaw(p);
189   bool del = false;
190   {
191     mutex_lock l(mu_);
192     CHECK(allocated_);
193     deallocated_ = true;
194     VLOG(2) << "ScopedAllocatorInstance::DeallocateRaw " << this
195             << " allocated_ " << allocated_ << " deallocated_ " << deallocated_
196             << " in_table_ " << in_table_;
197     // Single use is now complete, but only delete this instance when it is
198     // no longer in a ScopedAllocatorContainer's table.
199     if (!in_table_) {
200       del = true;
201     }
202   }
203   if (del) delete this;
204 }
205 
Name()206 string ScopedAllocatorInstance::Name() {
207   return strings::StrCat(scoped_allocator_->name(), "_field_", field_index_);
208 }
209 
210 }  // namespace tensorflow
211