1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "bump_pointer_space.h"
18 #include "bump_pointer_space-inl.h"
19 #include "mirror/class-inl.h"
20 #include "mirror/object-inl.h"
21 #include "thread_list.h"
22
23 namespace art HIDDEN {
24 namespace gc {
25 namespace space {
26
Create(const std::string & name,size_t capacity)27 BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
28 capacity = RoundUp(capacity, gPageSize);
29 std::string error_msg;
30 MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
31 capacity,
32 PROT_READ | PROT_WRITE,
33 /*low_4gb=*/ true,
34 &error_msg);
35 if (!mem_map.IsValid()) {
36 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
37 << PrettySize(capacity) << " with message " << error_msg;
38 return nullptr;
39 }
40 return new BumpPointerSpace(name, std::move(mem_map));
41 }
42
CreateFromMemMap(const std::string & name,MemMap && mem_map)43 BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
44 return new BumpPointerSpace(name, std::move(mem_map));
45 }
46
BumpPointerSpace(const std::string & name,uint8_t * begin,uint8_t * limit)47 BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
48 : ContinuousMemMapAllocSpace(
49 name, MemMap::Invalid(), begin, begin, limit, kGcRetentionPolicyAlwaysCollect),
50 growth_end_(limit),
51 objects_allocated_(0),
52 bytes_allocated_(0),
53 lock_("Bump-pointer space lock"),
54 main_block_size_(0) {
55 // This constructor gets called only from Heap::PreZygoteFork(), which
56 // doesn't require a mark_bitmap.
57 }
58
BumpPointerSpace(const std::string & name,MemMap && mem_map)59 BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
60 : ContinuousMemMapAllocSpace(name,
61 std::move(mem_map),
62 mem_map.Begin(),
63 mem_map.Begin(),
64 mem_map.End(),
65 kGcRetentionPolicyAlwaysCollect),
66 growth_end_(mem_map_.End()),
67 objects_allocated_(0),
68 bytes_allocated_(0),
69 lock_("Bump-pointer space lock", kBumpPointerSpaceBlockLock),
70 main_block_size_(0) {
71 mark_bitmap_ =
72 accounting::ContinuousSpaceBitmap::Create("bump-pointer space live bitmap",
73 Begin(),
74 Capacity());
75 }
76
Clear()77 void BumpPointerSpace::Clear() {
78 // Release the pages back to the operating system.
79 if (!kMadviseZeroes) {
80 memset(Begin(), 0, Limit() - Begin());
81 }
82 CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
83 // Reset the end of the space back to the beginning, we move the end forward as we allocate
84 // objects.
85 SetEnd(Begin());
86 objects_allocated_.store(0, std::memory_order_relaxed);
87 bytes_allocated_.store(0, std::memory_order_relaxed);
88 {
89 MutexLock mu(Thread::Current(), lock_);
90 growth_end_ = Limit();
91 block_sizes_.clear();
92 main_block_size_ = 0;
93 }
94 }
95
ClampGrowthLimit(size_t new_capacity)96 size_t BumpPointerSpace::ClampGrowthLimit(size_t new_capacity) {
97 CHECK(gUseUserfaultfd);
98 MutexLock mu(Thread::Current(), lock_);
99 CHECK_EQ(growth_end_, Limit());
100 uint8_t* end = End();
101 CHECK_LE(end, growth_end_);
102 size_t free_capacity = growth_end_ - end;
103 size_t clamp_size = Capacity() - new_capacity;
104 if (clamp_size > free_capacity) {
105 new_capacity += clamp_size - free_capacity;
106 }
107 SetLimit(Begin() + new_capacity);
108 growth_end_ = Limit();
109 GetMemMap()->SetSize(new_capacity);
110 if (GetMarkBitmap()->HeapBegin() != 0) {
111 GetMarkBitmap()->SetHeapSize(new_capacity);
112 }
113 return new_capacity;
114 }
115
Dump(std::ostream & os) const116 void BumpPointerSpace::Dump(std::ostream& os) const {
117 os << GetName() << " "
118 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
119 << reinterpret_cast<void*>(Limit());
120 }
121
RevokeThreadLocalBuffers(Thread * thread)122 size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
123 MutexLock mu(Thread::Current(), lock_);
124 RevokeThreadLocalBuffersLocked(thread);
125 return 0U;
126 }
127
RevokeAllThreadLocalBuffers()128 size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
129 Thread* self = Thread::Current();
130 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
131 MutexLock mu2(self, *Locks::thread_list_lock_);
132 // TODO: Not do a copy of the thread list?
133 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
134 for (Thread* thread : thread_list) {
135 RevokeThreadLocalBuffers(thread);
136 }
137 return 0U;
138 }
139
AssertThreadLocalBuffersAreRevoked(Thread * thread)140 void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
141 if (kIsDebugBuild) {
142 MutexLock mu(Thread::Current(), lock_);
143 DCHECK(!thread->HasTlab());
144 }
145 }
146
AssertAllThreadLocalBuffersAreRevoked()147 void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
148 if (kIsDebugBuild) {
149 Thread* self = Thread::Current();
150 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
151 MutexLock mu2(self, *Locks::thread_list_lock_);
152 // TODO: Not do a copy of the thread list?
153 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
154 for (Thread* thread : thread_list) {
155 AssertThreadLocalBuffersAreRevoked(thread);
156 }
157 }
158 }
159
UpdateMainBlock()160 void BumpPointerSpace::UpdateMainBlock() {
161 DCHECK(block_sizes_.empty());
162 main_block_size_ = Size();
163 }
164
165 // Returns the start of the storage.
AllocBlock(size_t bytes)166 uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
167 if (block_sizes_.empty()) {
168 UpdateMainBlock();
169 }
170 uint8_t* storage = reinterpret_cast<uint8_t*>(AllocNonvirtualWithoutAccounting(bytes));
171 if (LIKELY(storage != nullptr)) {
172 block_sizes_.push_back(bytes);
173 }
174 return storage;
175 }
176
GetSweepCallback()177 accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
178 UNIMPLEMENTED(FATAL);
179 UNREACHABLE();
180 }
181
GetBytesAllocated()182 uint64_t BumpPointerSpace::GetBytesAllocated() {
183 // Start out pre-determined amount (blocks which are not being allocated into).
184 uint64_t total = static_cast<uint64_t>(bytes_allocated_.load(std::memory_order_relaxed));
185 Thread* self = Thread::Current();
186 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
187 MutexLock mu2(self, *Locks::thread_list_lock_);
188 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
189 MutexLock mu3(Thread::Current(), lock_);
190 // If we don't have any blocks, we don't have any thread local buffers. This check is required
191 // since there can exist multiple bump pointer spaces which exist at the same time.
192 if (!block_sizes_.empty()) {
193 for (Thread* thread : thread_list) {
194 total += thread->GetThreadLocalBytesAllocated();
195 }
196 }
197 return total;
198 }
199
GetObjectsAllocated()200 uint64_t BumpPointerSpace::GetObjectsAllocated() {
201 // Start out pre-determined amount (blocks which are not being allocated into).
202 uint64_t total = static_cast<uint64_t>(objects_allocated_.load(std::memory_order_relaxed));
203 Thread* self = Thread::Current();
204 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
205 MutexLock mu2(self, *Locks::thread_list_lock_);
206 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
207 MutexLock mu3(Thread::Current(), lock_);
208 // If we don't have any blocks, we don't have any thread local buffers. This check is required
209 // since there can exist multiple bump pointer spaces which exist at the same time.
210 if (!block_sizes_.empty()) {
211 for (Thread* thread : thread_list) {
212 total += thread->GetThreadLocalObjectsAllocated();
213 }
214 }
215 return total;
216 }
217
RevokeThreadLocalBuffersLocked(Thread * thread)218 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
219 objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed);
220 bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed);
221 thread->ResetTlab();
222 }
223
AllocNewTlab(Thread * self,size_t bytes,size_t * bytes_tl_bulk_allocated)224 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes, size_t* bytes_tl_bulk_allocated) {
225 bytes = RoundUp(bytes, kAlignment);
226 MutexLock mu(Thread::Current(), lock_);
227 RevokeThreadLocalBuffersLocked(self);
228 uint8_t* start = AllocBlock(bytes);
229 if (start == nullptr) {
230 return false;
231 }
232 self->SetTlab(start, start + bytes, start + bytes);
233 if (bytes_tl_bulk_allocated != nullptr) {
234 *bytes_tl_bulk_allocated = bytes;
235 }
236 return true;
237 }
238
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes)239 bool BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
240 size_t failed_alloc_bytes) {
241 size_t max_contiguous_allocation = Limit() - End();
242 if (failed_alloc_bytes > max_contiguous_allocation) {
243 os << "; failed due to fragmentation (largest possible contiguous allocation "
244 << max_contiguous_allocation << " bytes)";
245 return true;
246 }
247 // Caller's job to print failed_alloc_bytes.
248 return false;
249 }
250
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)251 size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
252 size_t num_bytes = obj->SizeOf();
253 if (usable_size != nullptr) {
254 *usable_size = RoundUp(num_bytes, kAlignment);
255 }
256 return num_bytes;
257 }
258
AlignEnd(Thread * self,size_t alignment,Heap * heap)259 uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment, Heap* heap) {
260 Locks::mutator_lock_->AssertExclusiveHeld(self);
261 DCHECK(IsAligned<kAlignment>(alignment));
262 uint8_t* end = end_.load(std::memory_order_relaxed);
263 uint8_t* aligned_end = AlignUp(end, alignment);
264 ptrdiff_t diff = aligned_end - end;
265 if (diff > 0) {
266 end_.store(aligned_end, std::memory_order_relaxed);
267 heap->AddBytesAllocated(diff);
268 // If we have blocks after the main one. Then just add the diff to the last
269 // block.
270 MutexLock mu(self, lock_);
271 if (!block_sizes_.empty()) {
272 block_sizes_.back() += diff;
273 }
274 }
275 return aligned_end;
276 }
277
GetBlockSizes(Thread * self,size_t * main_block_size)278 std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_block_size) {
279 std::vector<size_t>* block_sizes = nullptr;
280 MutexLock mu(self, lock_);
281 if (!block_sizes_.empty()) {
282 block_sizes = new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end());
283 } else {
284 UpdateMainBlock();
285 }
286 *main_block_size = main_block_size_;
287 return block_sizes;
288 }
289
SetBlockSizes(Thread * self,const size_t main_block_size,const size_t first_valid_idx)290 void BumpPointerSpace::SetBlockSizes(Thread* self,
291 const size_t main_block_size,
292 const size_t first_valid_idx) {
293 MutexLock mu(self, lock_);
294 main_block_size_ = main_block_size;
295 if (!block_sizes_.empty()) {
296 block_sizes_.erase(block_sizes_.begin(), block_sizes_.begin() + first_valid_idx);
297 }
298 size_t size = main_block_size;
299 for (size_t block_size : block_sizes_) {
300 size += block_size;
301 }
302 DCHECK(IsAligned<kAlignment>(size));
303 end_.store(Begin() + size, std::memory_order_relaxed);
304 }
305
306 } // namespace space
307 } // namespace gc
308 } // namespace art
309