1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "bump_pointer_space.h"
18 #include "bump_pointer_space-inl.h"
19 #include "mirror/class-inl.h"
20 #include "mirror/object-inl.h"
21 #include "thread_list.h"
22
23 namespace art HIDDEN {
24 namespace gc {
25 namespace space {
26
Create(const std::string & name,size_t capacity)27 BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
28 capacity = RoundUp(capacity, gPageSize);
29 std::string error_msg;
30 MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
31 capacity,
32 PROT_READ | PROT_WRITE,
33 /*low_4gb=*/ true,
34 &error_msg);
35 if (!mem_map.IsValid()) {
36 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
37 << PrettySize(capacity) << " with message " << error_msg;
38 return nullptr;
39 }
40 return new BumpPointerSpace(name, std::move(mem_map));
41 }
42
CreateFromMemMap(const std::string & name,MemMap && mem_map)43 BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
44 return new BumpPointerSpace(name, std::move(mem_map));
45 }
46
BumpPointerSpace(const std::string & name,uint8_t * begin,uint8_t * limit)47 BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
48 : ContinuousMemMapAllocSpace(
49 name, MemMap::Invalid(), begin, begin, limit, kGcRetentionPolicyAlwaysCollect),
50 growth_end_(limit),
51 objects_allocated_(0),
52 bytes_allocated_(0),
53 lock_("Bump-pointer space lock"),
54 main_block_size_(0) {
55 // This constructor gets called only from Heap::PreZygoteFork(), which
56 // doesn't require a mark_bitmap.
57 }
58
BumpPointerSpace(const std::string & name,MemMap && mem_map)59 BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
60 : ContinuousMemMapAllocSpace(name,
61 std::move(mem_map),
62 mem_map.Begin(),
63 mem_map.Begin(),
64 mem_map.End(),
65 kGcRetentionPolicyAlwaysCollect),
66 growth_end_(mem_map_.End()),
67 objects_allocated_(0),
68 bytes_allocated_(0),
69 lock_("Bump-pointer space lock", kBumpPointerSpaceBlockLock),
70 main_block_size_(0) {
71 mark_bitmap_ =
72 accounting::ContinuousSpaceBitmap::Create("bump-pointer space live bitmap",
73 Begin(),
74 Capacity());
75 }
76
Clear()77 void BumpPointerSpace::Clear() {
78 // Release the pages back to the operating system.
79 if (!kMadviseZeroes) {
80 memset(Begin(), 0, Limit() - Begin());
81 }
82 CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
83 if (GetMarkBitmap() != nullptr) {
84 GetMarkBitmap()->Clear();
85 }
86 // Reset the end of the space back to the beginning, we move the end forward as we allocate
87 // objects.
88 SetEnd(Begin());
89 objects_allocated_.store(0, std::memory_order_relaxed);
90 bytes_allocated_.store(0, std::memory_order_relaxed);
91 {
92 MutexLock mu(Thread::Current(), lock_);
93 growth_end_ = Limit();
94 block_sizes_.clear();
95 main_block_size_ = 0;
96 black_dense_region_size_ = 0;
97 }
98 }
99
ClampGrowthLimit(size_t new_capacity)100 size_t BumpPointerSpace::ClampGrowthLimit(size_t new_capacity) {
101 CHECK(gUseUserfaultfd);
102 MutexLock mu(Thread::Current(), lock_);
103 CHECK_EQ(growth_end_, Limit());
104 uint8_t* end = End();
105 CHECK_LE(end, growth_end_);
106 size_t free_capacity = growth_end_ - end;
107 size_t clamp_size = Capacity() - new_capacity;
108 if (clamp_size > free_capacity) {
109 new_capacity += clamp_size - free_capacity;
110 }
111 SetLimit(Begin() + new_capacity);
112 growth_end_ = Limit();
113 GetMemMap()->SetSize(new_capacity);
114 if (GetMarkBitmap()->HeapBegin() != 0) {
115 GetMarkBitmap()->SetHeapSize(new_capacity);
116 }
117 return new_capacity;
118 }
119
Dump(std::ostream & os) const120 void BumpPointerSpace::Dump(std::ostream& os) const {
121 os << GetName() << " "
122 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
123 << reinterpret_cast<void*>(Limit());
124 }
125
RevokeThreadLocalBuffers(Thread * thread)126 size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
127 MutexLock mu(Thread::Current(), lock_);
128 RevokeThreadLocalBuffersLocked(thread);
129 return 0U;
130 }
131
RevokeAllThreadLocalBuffers()132 size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
133 Thread* self = Thread::Current();
134 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
135 MutexLock mu2(self, *Locks::thread_list_lock_);
136 // TODO: Not do a copy of the thread list?
137 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
138 for (Thread* thread : thread_list) {
139 RevokeThreadLocalBuffers(thread);
140 }
141 return 0U;
142 }
143
AssertThreadLocalBuffersAreRevoked(Thread * thread)144 void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
145 if (kIsDebugBuild) {
146 MutexLock mu(Thread::Current(), lock_);
147 DCHECK(!thread->HasTlab());
148 }
149 }
150
AssertAllThreadLocalBuffersAreRevoked()151 void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
152 if (kIsDebugBuild) {
153 Thread* self = Thread::Current();
154 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
155 MutexLock mu2(self, *Locks::thread_list_lock_);
156 // TODO: Not do a copy of the thread list?
157 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
158 for (Thread* thread : thread_list) {
159 AssertThreadLocalBuffersAreRevoked(thread);
160 }
161 }
162 }
163
UpdateMainBlock()164 void BumpPointerSpace::UpdateMainBlock() {
165 DCHECK(block_sizes_.empty());
166 main_block_size_ = Size();
167 }
168
169 // Returns the start of the storage.
AllocBlock(size_t bytes)170 uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
171 if (block_sizes_.empty()) {
172 UpdateMainBlock();
173 }
174 uint8_t* storage = reinterpret_cast<uint8_t*>(AllocNonvirtualWithoutAccounting(bytes));
175 if (LIKELY(storage != nullptr)) {
176 block_sizes_.push_back(bytes);
177 }
178 return storage;
179 }
180
GetSweepCallback()181 accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
182 UNIMPLEMENTED(FATAL);
183 UNREACHABLE();
184 }
185
GetBytesAllocated()186 uint64_t BumpPointerSpace::GetBytesAllocated() {
187 // Start out pre-determined amount (blocks which are not being allocated into).
188 uint64_t total = static_cast<uint64_t>(bytes_allocated_.load(std::memory_order_relaxed));
189 Thread* self = Thread::Current();
190 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
191 MutexLock mu2(self, *Locks::thread_list_lock_);
192 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
193 MutexLock mu3(Thread::Current(), lock_);
194 // If we don't have any blocks, we don't have any thread local buffers. This check is required
195 // since there can exist multiple bump pointer spaces which exist at the same time.
196 if (!block_sizes_.empty()) {
197 for (Thread* thread : thread_list) {
198 total += thread->GetThreadLocalBytesAllocated();
199 }
200 }
201 return total;
202 }
203
GetObjectsAllocated()204 uint64_t BumpPointerSpace::GetObjectsAllocated() {
205 // Start out pre-determined amount (blocks which are not being allocated into).
206 uint64_t total = static_cast<uint64_t>(objects_allocated_.load(std::memory_order_relaxed));
207 Thread* self = Thread::Current();
208 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
209 MutexLock mu2(self, *Locks::thread_list_lock_);
210 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
211 MutexLock mu3(Thread::Current(), lock_);
212 // If we don't have any blocks, we don't have any thread local buffers. This check is required
213 // since there can exist multiple bump pointer spaces which exist at the same time.
214 if (!block_sizes_.empty()) {
215 for (Thread* thread : thread_list) {
216 total += thread->GetThreadLocalObjectsAllocated();
217 }
218 }
219 return total;
220 }
221
RevokeThreadLocalBuffersLocked(Thread * thread)222 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
223 objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed);
224 bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed);
225 thread->ResetTlab();
226 }
227
AllocNewTlab(Thread * self,size_t bytes,size_t * bytes_tl_bulk_allocated)228 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes, size_t* bytes_tl_bulk_allocated) {
229 bytes = RoundUp(bytes, kAlignment);
230 MutexLock mu(Thread::Current(), lock_);
231 RevokeThreadLocalBuffersLocked(self);
232 uint8_t* start = AllocBlock(bytes);
233 if (start == nullptr) {
234 return false;
235 }
236 self->SetTlab(start, start + bytes, start + bytes);
237 if (bytes_tl_bulk_allocated != nullptr) {
238 *bytes_tl_bulk_allocated = bytes;
239 }
240 return true;
241 }
242
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes)243 bool BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
244 size_t failed_alloc_bytes) {
245 size_t max_contiguous_allocation = Limit() - End();
246 if (failed_alloc_bytes > max_contiguous_allocation) {
247 os << "; failed due to fragmentation (largest possible contiguous allocation "
248 << max_contiguous_allocation << " bytes)";
249 return true;
250 }
251 // Caller's job to print failed_alloc_bytes.
252 return false;
253 }
254
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)255 size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
256 size_t num_bytes = obj->SizeOf();
257 if (usable_size != nullptr) {
258 *usable_size = RoundUp(num_bytes, kAlignment);
259 }
260 return num_bytes;
261 }
262
AlignEnd(Thread * self,size_t alignment,Heap * heap)263 uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment, Heap* heap) {
264 Locks::mutator_lock_->AssertExclusiveHeld(self);
265 DCHECK(IsAligned<kAlignment>(alignment));
266 uint8_t* end = end_.load(std::memory_order_relaxed);
267 uint8_t* aligned_end = AlignUp(end, alignment);
268 ptrdiff_t diff = aligned_end - end;
269 if (diff > 0) {
270 end_.store(aligned_end, std::memory_order_relaxed);
271 heap->AddBytesAllocated(diff);
272 // If we have blocks after the main one. Then just add the diff to the last
273 // block.
274 MutexLock mu(self, lock_);
275 if (!block_sizes_.empty()) {
276 block_sizes_.back() += diff;
277 }
278 }
279 return aligned_end;
280 }
281
GetBlockSizes(Thread * self,size_t * main_block_size)282 std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_block_size) {
283 std::vector<size_t>* block_sizes = nullptr;
284 MutexLock mu(self, lock_);
285 if (!block_sizes_.empty()) {
286 block_sizes = new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end());
287 } else {
288 UpdateMainBlock();
289 }
290 *main_block_size = main_block_size_;
291 return block_sizes;
292 }
293
SetBlockSizes(Thread * self,const size_t main_block_size,const size_t first_valid_idx)294 void BumpPointerSpace::SetBlockSizes(Thread* self,
295 const size_t main_block_size,
296 const size_t first_valid_idx) {
297 MutexLock mu(self, lock_);
298 main_block_size_ = main_block_size;
299 if (!block_sizes_.empty()) {
300 block_sizes_.erase(block_sizes_.begin(), block_sizes_.begin() + first_valid_idx);
301 }
302 size_t size = main_block_size;
303 for (size_t block_size : block_sizes_) {
304 size += block_size;
305 }
306 DCHECK(IsAligned<kAlignment>(size));
307 end_.store(Begin() + size, std::memory_order_relaxed);
308 }
309
SetBlackDenseRegionSize(size_t size)310 void BumpPointerSpace::SetBlackDenseRegionSize(size_t size) {
311 DCHECK_ALIGNED_PARAM(size, gPageSize);
312 MutexLock mu(Thread::Current(), lock_);
313 black_dense_region_size_ = size;
314 }
315
316 } // namespace space
317 } // namespace gc
318 } // namespace art
319