• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "bump_pointer_space.h"
18 #include "bump_pointer_space-inl.h"
19 #include "mirror/class-inl.h"
20 #include "mirror/object-inl.h"
21 #include "thread_list.h"
22 
23 namespace art {
24 namespace gc {
25 namespace space {
26 
Create(const std::string & name,size_t capacity)27 BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
28   capacity = RoundUp(capacity, kPageSize);
29   std::string error_msg;
30   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
31                                         capacity,
32                                         PROT_READ | PROT_WRITE,
33                                         /*low_4gb=*/ true,
34                                         &error_msg);
35   if (!mem_map.IsValid()) {
36     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
37         << PrettySize(capacity) << " with message " << error_msg;
38     return nullptr;
39   }
40   return new BumpPointerSpace(name, std::move(mem_map));
41 }
42 
CreateFromMemMap(const std::string & name,MemMap && mem_map)43 BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
44   return new BumpPointerSpace(name, std::move(mem_map));
45 }
46 
BumpPointerSpace(const std::string & name,uint8_t * begin,uint8_t * limit)47 BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
48     : ContinuousMemMapAllocSpace(name,
49                                  MemMap::Invalid(),
50                                  begin,
51                                  begin,
52                                  limit,
53                                  kGcRetentionPolicyAlwaysCollect),
54       growth_end_(limit),
55       objects_allocated_(0), bytes_allocated_(0),
56       block_lock_("Block lock"),
57       main_block_size_(0) {
58   // This constructor gets called only from Heap::PreZygoteFork(), which
59   // doesn't require a mark_bitmap.
60 }
61 
BumpPointerSpace(const std::string & name,MemMap && mem_map)62 BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
63     : ContinuousMemMapAllocSpace(name,
64                                  std::move(mem_map),
65                                  mem_map.Begin(),
66                                  mem_map.Begin(),
67                                  mem_map.End(),
68                                  kGcRetentionPolicyAlwaysCollect),
69       growth_end_(mem_map_.End()),
70       objects_allocated_(0), bytes_allocated_(0),
71       block_lock_("Block lock", kBumpPointerSpaceBlockLock),
72       main_block_size_(0) {
73   mark_bitmap_ =
74       accounting::ContinuousSpaceBitmap::Create("bump-pointer space live bitmap",
75                                                 Begin(),
76                                                 Capacity());
77 }
78 
Clear()79 void BumpPointerSpace::Clear() {
80   // Release the pages back to the operating system.
81   if (!kMadviseZeroes) {
82     memset(Begin(), 0, Limit() - Begin());
83   }
84   CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
85   // Reset the end of the space back to the beginning, we move the end forward as we allocate
86   // objects.
87   SetEnd(Begin());
88   objects_allocated_.store(0, std::memory_order_relaxed);
89   bytes_allocated_.store(0, std::memory_order_relaxed);
90   growth_end_ = Limit();
91   {
92     MutexLock mu(Thread::Current(), block_lock_);
93     block_sizes_.clear();
94     main_block_size_ = 0;
95   }
96 }
97 
Dump(std::ostream & os) const98 void BumpPointerSpace::Dump(std::ostream& os) const {
99   os << GetName() << " "
100       << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
101       << reinterpret_cast<void*>(Limit());
102 }
103 
RevokeThreadLocalBuffers(Thread * thread)104 size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
105   MutexLock mu(Thread::Current(), block_lock_);
106   RevokeThreadLocalBuffersLocked(thread);
107   return 0U;
108 }
109 
RevokeAllThreadLocalBuffers()110 size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
111   Thread* self = Thread::Current();
112   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
113   MutexLock mu2(self, *Locks::thread_list_lock_);
114   // TODO: Not do a copy of the thread list?
115   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
116   for (Thread* thread : thread_list) {
117     RevokeThreadLocalBuffers(thread);
118   }
119   return 0U;
120 }
121 
AssertThreadLocalBuffersAreRevoked(Thread * thread)122 void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
123   if (kIsDebugBuild) {
124     MutexLock mu(Thread::Current(), block_lock_);
125     DCHECK(!thread->HasTlab());
126   }
127 }
128 
AssertAllThreadLocalBuffersAreRevoked()129 void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
130   if (kIsDebugBuild) {
131     Thread* self = Thread::Current();
132     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
133     MutexLock mu2(self, *Locks::thread_list_lock_);
134     // TODO: Not do a copy of the thread list?
135     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
136     for (Thread* thread : thread_list) {
137       AssertThreadLocalBuffersAreRevoked(thread);
138     }
139   }
140 }
141 
UpdateMainBlock()142 void BumpPointerSpace::UpdateMainBlock() {
143   DCHECK(block_sizes_.empty());
144   main_block_size_ = Size();
145 }
146 
147 // Returns the start of the storage.
AllocBlock(size_t bytes)148 uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
149   bytes = RoundUp(bytes, kAlignment);
150   if (block_sizes_.empty()) {
151     UpdateMainBlock();
152   }
153   uint8_t* storage = reinterpret_cast<uint8_t*>(AllocNonvirtualWithoutAccounting(bytes));
154   if (LIKELY(storage != nullptr)) {
155     block_sizes_.push_back(bytes);
156   }
157   return storage;
158 }
159 
GetSweepCallback()160 accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
161   UNIMPLEMENTED(FATAL);
162   UNREACHABLE();
163 }
164 
GetBytesAllocated()165 uint64_t BumpPointerSpace::GetBytesAllocated() {
166   // Start out pre-determined amount (blocks which are not being allocated into).
167   uint64_t total = static_cast<uint64_t>(bytes_allocated_.load(std::memory_order_relaxed));
168   Thread* self = Thread::Current();
169   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
170   MutexLock mu2(self, *Locks::thread_list_lock_);
171   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
172   MutexLock mu3(Thread::Current(), block_lock_);
173   // If we don't have any blocks, we don't have any thread local buffers. This check is required
174   // since there can exist multiple bump pointer spaces which exist at the same time.
175   if (!block_sizes_.empty()) {
176     for (Thread* thread : thread_list) {
177       total += thread->GetThreadLocalBytesAllocated();
178     }
179   }
180   return total;
181 }
182 
GetObjectsAllocated()183 uint64_t BumpPointerSpace::GetObjectsAllocated() {
184   // Start out pre-determined amount (blocks which are not being allocated into).
185   uint64_t total = static_cast<uint64_t>(objects_allocated_.load(std::memory_order_relaxed));
186   Thread* self = Thread::Current();
187   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
188   MutexLock mu2(self, *Locks::thread_list_lock_);
189   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
190   MutexLock mu3(Thread::Current(), block_lock_);
191   // If we don't have any blocks, we don't have any thread local buffers. This check is required
192   // since there can exist multiple bump pointer spaces which exist at the same time.
193   if (!block_sizes_.empty()) {
194     for (Thread* thread : thread_list) {
195       total += thread->GetThreadLocalObjectsAllocated();
196     }
197   }
198   return total;
199 }
200 
RevokeThreadLocalBuffersLocked(Thread * thread)201 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
202   objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed);
203   bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed);
204   thread->ResetTlab();
205 }
206 
AllocNewTlab(Thread * self,size_t bytes)207 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
208   MutexLock mu(Thread::Current(), block_lock_);
209   RevokeThreadLocalBuffersLocked(self);
210   uint8_t* start = AllocBlock(bytes);
211   if (start == nullptr) {
212     return false;
213   }
214   self->SetTlab(start, start + bytes, start + bytes);
215   return true;
216 }
217 
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes)218 bool BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
219                                                     size_t failed_alloc_bytes) {
220   size_t max_contiguous_allocation = Limit() - End();
221   if (failed_alloc_bytes > max_contiguous_allocation) {
222     os << "; failed due to fragmentation (largest possible contiguous allocation "
223        <<  max_contiguous_allocation << " bytes)";
224     return true;
225   }
226   // Caller's job to print failed_alloc_bytes.
227   return false;
228 }
229 
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)230 size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
231   size_t num_bytes = obj->SizeOf();
232   if (usable_size != nullptr) {
233     *usable_size = RoundUp(num_bytes, kAlignment);
234   }
235   return num_bytes;
236 }
237 
AlignEnd(Thread * self,size_t alignment)238 uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment) {
239   Locks::mutator_lock_->AssertExclusiveHeld(self);
240   DCHECK(IsAligned<kAlignment>(alignment));
241   uint8_t* end = end_.load(std::memory_order_relaxed);
242   uint8_t* aligned_end = AlignUp(end, alignment);
243   ptrdiff_t diff = aligned_end - end;
244   if (diff > 0) {
245     end_.store(aligned_end, std::memory_order_relaxed);
246     // If we have blocks after the main one. Then just add the diff to the last
247     // block.
248     MutexLock mu(self, block_lock_);
249     if (!block_sizes_.empty()) {
250       block_sizes_.back() += diff;
251     }
252   }
253   return end;
254 }
255 
GetBlockSizes(Thread * self,size_t * main_block_size)256 std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_block_size) {
257   std::vector<size_t>* block_sizes = nullptr;
258   MutexLock mu(self, block_lock_);
259   if (!block_sizes_.empty()) {
260     block_sizes = new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end());
261   } else {
262     UpdateMainBlock();
263   }
264   *main_block_size = main_block_size_;
265   return block_sizes;
266 }
267 
SetBlockSizes(Thread * self,const size_t main_block_size,const size_t first_valid_idx)268 void BumpPointerSpace::SetBlockSizes(Thread* self,
269                                      const size_t main_block_size,
270                                      const size_t first_valid_idx) {
271   MutexLock mu(self, block_lock_);
272   main_block_size_ = main_block_size;
273   if (!block_sizes_.empty()) {
274     block_sizes_.erase(block_sizes_.begin(), block_sizes_.begin() + first_valid_idx);
275   }
276   size_t size = main_block_size;
277   for (size_t block_size : block_sizes_) {
278     size += block_size;
279   }
280   DCHECK(IsAligned<kAlignment>(size));
281   end_.store(Begin() + size, std::memory_order_relaxed);
282 }
283 
284 }  // namespace space
285 }  // namespace gc
286 }  // namespace art
287